From 84f7ac1f1175dc4a2a1e25795e1f52b05cf51ad4 Mon Sep 17 00:00:00 2001 From: tianshapjq Date: Tue, 12 Dec 2017 14:30:24 +0800 Subject: [PATCH 001/307] small nit in the annotations --- pkg/kubelet/container/os.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/container/os.go b/pkg/kubelet/container/os.go index 6126063b308..bd27ae9f079 100644 --- a/pkg/kubelet/container/os.go +++ b/pkg/kubelet/container/os.go @@ -43,7 +43,7 @@ type OSInterface interface { // RealOS is used to dispatch the real system level operations. type RealOS struct{} -// MkDir will will call os.Mkdir to create a directory. +// MkdirAll will call os.MkdirAll to create a directory. func (RealOS) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } From 9c591b0c94e723b6a5150e952f385d402252ca3d Mon Sep 17 00:00:00 2001 From: Anshul Sharma Date: Tue, 16 Jan 2018 14:51:52 +0530 Subject: [PATCH 002/307] Replace error string with const --- pkg/cloudprovider/providers/aws/aws_loadbalancer.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 438b5617f19..3fc603438d4 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -1375,8 +1375,7 @@ func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescrip if err != nil { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { - case "PolicyNotFound": - // TODO change from string to `elb.ErrCodePolicyNotFoundException` once the AWS SDK is updated + case elb.ErrCodePolicyNotFoundException: default: return fmt.Errorf("error describing security policies on load balancer: %q", err) } From 284d08bf7032a94b568a95d6de1e13d8c0ac5bc2 Mon Sep 17 00:00:00 2001 From: root <837829664@qq.com> Date: Wed, 28 Feb 2018 19:11:35 +0800 Subject: [PATCH 003/307] fix persist typo --- .../src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go index 1fe52c5241d..9c3ea0ab8d5 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go @@ -279,7 +279,7 @@ func (p *oidcAuthProvider) idToken() (string, error) { // Persist new config and if successful, update the in memory config. if err = p.persister.Persist(newCfg); err != nil { - return "", fmt.Errorf("could not perist new tokens: %v", err) + return "", fmt.Errorf("could not persist new tokens: %v", err) } p.cfg = newCfg From bdc509131cc6c970780934a3f96677c8d5e35513 Mon Sep 17 00:00:00 2001 From: hangaoshuai Date: Thu, 19 Apr 2018 10:02:28 +0800 Subject: [PATCH 004/307] check error when parse field failed --- pkg/kubectl/BUILD | 1 + pkg/kubectl/sorter.go | 9 ++++-- pkg/kubectl/sorter_test.go | 65 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 3 deletions(-) diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index f6cd4a553f9..883d0825118 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -62,6 +62,7 @@ go_test( "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", diff --git a/pkg/kubectl/sorter.go b/pkg/kubectl/sorter.go index f1330e3582b..fbdf8040df7 100644 --- a/pkg/kubectl/sorter.go +++ b/pkg/kubectl/sorter.go @@ -277,13 +277,16 @@ func (r *RuntimeSort) Less(i, j int) bool { iObj := r.objs[i] jObj := r.objs[j] - parser := jsonpath.New("sorting").AllowMissingKeys(true) - parser.Parse(r.field) - var iValues [][]reflect.Value var jValues [][]reflect.Value var err error + parser := jsonpath.New("sorting").AllowMissingKeys(true) + err = parser.Parse(r.field) + if err != nil { + panic(err) + } + if unstructured, ok := iObj.(*unstructured.Unstructured); ok { iValues, err = parser.FindResults(unstructured.Object) } else { diff --git a/pkg/kubectl/sorter_test.go b/pkg/kubectl/sorter_test.go index 7f6b0ad5c4d..7c2e5ee0a09 100644 --- a/pkg/kubectl/sorter_test.go +++ b/pkg/kubectl/sorter_test.go @@ -22,6 +22,7 @@ import ( "testing" api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -408,3 +409,67 @@ func TestSortingPrinter(t *testing.T) { } } } + +func TestRuntimeSortLess(t *testing.T) { + var testobj runtime.Object + + testobj = &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "b", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "c", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "a", + }, + }, + }, + } + + testobjs, err := meta.ExtractList(testobj) + if err != nil { + t.Fatalf("ExtractList testobj got unexpected error: %v", err) + } + + testfield := "{.metadata.name}" + testruntimeSort := NewRuntimeSort(testfield, testobjs) + tests := []struct { + name string + runtimeSort *RuntimeSort + i int + j int + expectResult bool + expectErr bool + }{ + { + name: "test less true", + runtimeSort: testruntimeSort, + i: 0, + j: 1, + expectResult: true, + }, + { + name: "test less false", + runtimeSort: testruntimeSort, + i: 1, + j: 2, + expectResult: false, + }, + } + + for i, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := test.runtimeSort.Less(test.i, test.j) + if result != test.expectResult { + t.Errorf("case[%d]:%s Expected result: %v, Got result: %v", i, test.name, test.expectResult, result) + } + }) + } +} From 0f3e1dcfc250626841f82c81a324cbfcb24cd5e5 Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Mon, 23 Apr 2018 15:38:20 +0900 Subject: [PATCH 005/307] Whitelist CronJob for kubectl apply --prune --- pkg/kubectl/cmd/apply.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index de24503d17a..a7048ad6768 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -536,6 +536,7 @@ func getRESTMappings(mapper meta.RESTMapper, pruneResources *[]pruneResource) (n {"", "v1", "Secret", true}, {"", "v1", "Service", true}, {"batch", "v1", "Job", true}, + {"batch", "v1beta1", "CronJob", true}, {"extensions", "v1beta1", "DaemonSet", true}, {"extensions", "v1beta1", "Deployment", true}, {"extensions", "v1beta1", "Ingress", true}, From 71e4449e1768f82e6c2187d94fc97361c6f49d89 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 30 Apr 2018 17:19:12 +0200 Subject: [PATCH 006/307] Return attach error to A/D controller. WaitForAttach runs on nodes, not in A/D controller. --- pkg/volume/csi/csi_attacher.go | 10 ++------- pkg/volume/csi/csi_attacher_test.go | 34 +++++++++++++++++++++++------ 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index ae6a267c001..48bf74f2168 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -102,17 +102,11 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle)) } - // probe for attachment update here - // NOTE: any error from waiting for attachment is logged only. This is because - // the primary intent of the enclosing method is to create VolumeAttachment. - // DONOT return that error here as it is mitigated in attacher.WaitForAttach. - volAttachmentOK := true if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil { - volAttachmentOK = false - glog.Error(log("attacher.Attach attempted to wait for attachment to be ready, but failed with: %v", err)) + return "", err } - glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment verified=%t: attachment object [%s]", volAttachmentOK, attachID)) + glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID)) return attachID, nil } diff --git a/pkg/volume/csi/csi_attacher_test.go b/pkg/volume/csi/csi_attacher_test.go index e8c227e306b..f9224cbcc17 100644 --- a/pkg/volume/csi/csi_attacher_test.go +++ b/pkg/volume/csi/csi_attacher_test.go @@ -59,12 +59,13 @@ func makeTestAttachment(attachID, nodeName, pvName string) *storage.VolumeAttach func TestAttacherAttach(t *testing.T) { testCases := []struct { - name string - nodeName string - driverName string - volumeName string - attachID string - shouldFail bool + name string + nodeName string + driverName string + volumeName string + attachID string + injectAttacherError bool + shouldFail bool }{ { name: "test ok 1", @@ -104,6 +105,15 @@ func TestAttacherAttach(t *testing.T) { attachID: getAttachmentName("vol02", "driver02", "node02"), shouldFail: true, }, + { + name: "attacher error", + nodeName: "node02", + driverName: "driver02", + volumeName: "vol02", + attachID: getAttachmentName("vol02", "driver02", "node02"), + injectAttacherError: true, + shouldFail: true, + }, } // attacher loop @@ -127,6 +137,9 @@ func TestAttacherAttach(t *testing.T) { if !fail && err != nil { t.Errorf("expecting no failure, but got err: %v", err) } + if fail && err == nil { + t.Errorf("expecting failure, but got no err") + } if attachID != id && !fail { t.Errorf("expecting attachID %v, got %v", id, attachID) } @@ -154,7 +167,14 @@ func TestAttacherAttach(t *testing.T) { if attach == nil { t.Logf("attachment not found for id:%v", tc.attachID) } else { - attach.Status.Attached = true + if tc.injectAttacherError { + attach.Status.Attached = false + attach.Status.AttachError = &storage.VolumeError{ + Message: "attacher error", + } + } else { + attach.Status.Attached = true + } _, err = csiAttacher.k8s.StorageV1beta1().VolumeAttachments().Update(attach) if err != nil { t.Error(err) From 27da26754f97d1be384bee71ee0335291115a286 Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Wed, 2 May 2018 22:50:29 -0700 Subject: [PATCH 007/307] Use default seccomp profile for addons --- .../cluster-loadbalancing/glbc/default-svc-controller.yaml | 2 ++ .../addons/cluster-monitoring/google/heapster-controller.yaml | 1 + .../googleinfluxdb/heapster-controller-combined.yaml | 1 + .../cluster-monitoring/influxdb/heapster-controller.yaml | 1 + .../influxdb/influxdb-grafana-controller.yaml | 1 + .../cluster-monitoring/stackdriver/heapster-controller.yaml | 1 + .../cluster-monitoring/standalone/heapster-controller.yaml | 1 + cluster/addons/dashboard/dashboard-controller.yaml | 1 + .../dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml | 1 + cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml | 4 ++++ cluster/addons/metrics-server/metrics-server-deployment.yaml | 1 + 11 files changed, 15 insertions(+) diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml index 99f8bf23d4d..0fbadc5e12b 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml @@ -17,6 +17,8 @@ spec: labels: k8s-app: glbc name: glbc + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: containers: - name: default-http-backend diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 4aa453e17ba..58b79eb5133 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -56,6 +56,7 @@ spec: version: v1.5.2 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 1ac542cc2a6..8ae5583e7f4 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -56,6 +56,7 @@ spec: version: v1.5.2 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 722c842cf6a..3710fcbfa39 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -56,6 +56,7 @@ spec: version: v1.5.2 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml index d494f33789f..dd0ebe652d4 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -21,6 +21,7 @@ spec: version: v4 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical tolerations: diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index af0c8c535ed..946118ab874 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -43,6 +43,7 @@ spec: version: v1.5.2 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index 14b18d12dde..6722ec45f11 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -43,6 +43,7 @@ spec: version: v1.5.2 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index 4988f590652..32f2b27dcdc 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -26,6 +26,7 @@ spec: k8s-app: kubernetes-dashboard annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index 95494214a3c..5cc3e05c6f8 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -77,6 +77,7 @@ spec: k8s-app: kube-dns-autoscaler annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical containers: diff --git a/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml b/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml index 025ecb556c0..0d9ea98b6f7 100644 --- a/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml +++ b/cluster/addons/metadata-agent/stackdriver/metadata-agent.yaml @@ -40,6 +40,8 @@ spec: metadata: labels: app: metadata-agent + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: serviceAccountName: metadata-agent containers: @@ -103,6 +105,8 @@ spec: metadata: labels: app: metadata-agent-cluster-level + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: serviceAccountName: metadata-agent containers: diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index 3b6e5e66ff5..9190425f505 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -43,6 +43,7 @@ spec: version: v0.2.1 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical serviceAccountName: metrics-server From 64dee74bb7af69721a16fff5f32a481c79d74420 Mon Sep 17 00:00:00 2001 From: Lukas Grossar Date: Fri, 4 May 2018 15:49:07 +0200 Subject: [PATCH 008/307] Fix typo in volume_stats.go volumeStatsCollecotr -> volumeStatsCollector --- pkg/kubelet/metrics/collectors/volume_stats.go | 8 ++++---- pkg/kubelet/metrics/collectors/volume_stats_test.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/metrics/collectors/volume_stats.go b/pkg/kubelet/metrics/collectors/volume_stats.go index 7f80243dc97..e6f1cf36da8 100644 --- a/pkg/kubelet/metrics/collectors/volume_stats.go +++ b/pkg/kubelet/metrics/collectors/volume_stats.go @@ -58,17 +58,17 @@ var ( ) ) -type volumeStatsCollecotr struct { +type volumeStatsCollector struct { statsProvider serverstats.StatsProvider } // NewVolumeStatsCollector creates a volume stats prometheus collector. func NewVolumeStatsCollector(statsProvider serverstats.StatsProvider) prometheus.Collector { - return &volumeStatsCollecotr{statsProvider: statsProvider} + return &volumeStatsCollector{statsProvider: statsProvider} } // Describe implements the prometheus.Collector interface. -func (collector *volumeStatsCollecotr) Describe(ch chan<- *prometheus.Desc) { +func (collector *volumeStatsCollector) Describe(ch chan<- *prometheus.Desc) { ch <- volumeStatsCapacityBytesDesc ch <- volumeStatsAvailableBytesDesc ch <- volumeStatsUsedBytesDesc @@ -78,7 +78,7 @@ func (collector *volumeStatsCollecotr) Describe(ch chan<- *prometheus.Desc) { } // Collect implements the prometheus.Collector interface. -func (collector *volumeStatsCollecotr) Collect(ch chan<- prometheus.Metric) { +func (collector *volumeStatsCollector) Collect(ch chan<- prometheus.Metric) { podStats, err := collector.statsProvider.ListPodStats() if err != nil { return diff --git a/pkg/kubelet/metrics/collectors/volume_stats_test.go b/pkg/kubelet/metrics/collectors/volume_stats_test.go index 903169d00da..3e4d07e1deb 100644 --- a/pkg/kubelet/metrics/collectors/volume_stats_test.go +++ b/pkg/kubelet/metrics/collectors/volume_stats_test.go @@ -129,7 +129,7 @@ func TestVolumeStatsCollector(t *testing.T) { mockStatsProvider := new(statstest.StatsProvider) mockStatsProvider.On("ListPodStats").Return(podStats, nil) - if err := gatherAndCompare(&volumeStatsCollecotr{statsProvider: mockStatsProvider}, want, metrics); err != nil { + if err := gatherAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, want, metrics); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } } From 294e00c66908d6979766aead408ce6d880061035 Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Thu, 3 May 2018 16:12:10 -0700 Subject: [PATCH 009/307] Add conversion to properly parse query parameter propagationPolicy --- .../pkg/apis/meta/v1/conversion.go | 12 +++++++ .../pkg/apis/meta/v1/conversion_test.go | 35 +++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index cd651bcd56d..c36fc655668 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -75,6 +75,8 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_unversioned_LabelSelector_to_map, Convert_Slice_string_To_Slice_int32, + + Convert_Slice_string_To_v1_DeletionPropagation, ) } @@ -304,3 +306,13 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio } return nil } + +// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { + if len(*input) > 0 { + *out = DeletionPropagation((*input)[0]) + } else { + *out = "" + } + return nil +} diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go index bc591584ef5..4ff57fd8dc7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/conversion_test.go @@ -47,3 +47,38 @@ func TestMapToLabelSelectorRoundTrip(t *testing.T) { } } } + +func TestConvertSliceStringToDeletionPropagation(t *testing.T) { + tcs := []struct { + Input []string + Output v1.DeletionPropagation + }{ + { + Input: nil, + Output: "", + }, + { + Input: []string{}, + Output: "", + }, + { + Input: []string{"foo"}, + Output: "foo", + }, + { + Input: []string{"bar", "foo"}, + Output: "bar", + }, + } + + for _, tc := range tcs { + var dp v1.DeletionPropagation + if err := v1.Convert_Slice_string_To_v1_DeletionPropagation(&tc.Input, &dp, nil); err != nil { + t.Errorf("Convert_Slice_string_To_v1_DeletionPropagation(%#v): %v", tc.Input, err) + continue + } + if !apiequality.Semantic.DeepEqual(dp, tc.Output) { + t.Errorf("slice string to DeletionPropagation conversion failed: got %v; want %v", dp, tc.Output) + } + } +} From 906fd7529e3f7ea3cc1ff753b3d36115be061394 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 9 May 2018 09:52:37 +0200 Subject: [PATCH 010/307] Fix iSCSI and RBD UnmountDevice with mount containers. Google's configure-helper.sh script bind-mounts /var/lib/kubelet somewhere into /home/kubernetes and thus every mount that Kubernetes does is visible twice in /proc/mounts. iSCSI and RBD should not rely on counting on entries in /proc/mounts and unmount device when Kubernetes thinks it's unusued. Kubernetes tracks the mounts by itself and most of other volume plugins rely on it safely. --- pkg/volume/iscsi/iscsi_util.go | 11 +---------- pkg/volume/rbd/attacher.go | 18 +++++++----------- 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index ad6382f308f..d649e351729 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -396,25 +396,16 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I // DetachDisk unmounts and detaches a volume from node func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { - _, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath) - if err != nil { - glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err) - return err - } if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath) return nil } - if err = c.mounter.Unmount(mntPath); err != nil { + if err := c.mounter.Unmount(mntPath); err != nil { glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err) return err } - cnt-- - if cnt != 0 { - return nil - } // if device is no longer used, see if need to logout the target device, prefix, err := extractDeviceAndPrefix(mntPath) if err != nil { diff --git a/pkg/volume/rbd/attacher.go b/pkg/volume/rbd/attacher.go index 2e5960092e1..9a74a608c33 100644 --- a/pkg/volume/rbd/attacher.go +++ b/pkg/volume/rbd/attacher.go @@ -189,21 +189,17 @@ func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) return nil } - devicePath, cnt, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) + devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) if err != nil { return err } - if cnt > 1 { - return fmt.Errorf("rbd: more than 1 reference counts at %s", deviceMountPath) - } - if cnt == 1 { - // Unmount the device from the device mount point. - glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) - if err = detacher.mounter.Unmount(deviceMountPath); err != nil { - return err - } - glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) + // Unmount the device from the device mount point. + glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) + if err = detacher.mounter.Unmount(deviceMountPath); err != nil { + return err } + glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) + glog.V(4).Infof("rbd: detaching device %s", devicePath) err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath) if err != nil { From 27dcb1f362af327e105d42276e8bc88bcd043bc8 Mon Sep 17 00:00:00 2001 From: Chengfei Shang Date: Fri, 11 May 2018 14:39:07 +0800 Subject: [PATCH 011/307] fix typo: peirodically->periodically --- pkg/kubelet/logs/container_log_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/logs/container_log_manager.go b/pkg/kubelet/logs/container_log_manager.go index baedd6c4c4c..12ba95bd765 100644 --- a/pkg/kubelet/logs/container_log_manager.go +++ b/pkg/kubelet/logs/container_log_manager.go @@ -168,7 +168,7 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, maxSize s // Start the container log manager. func (c *containerLogManager) Start() { - // Start a goroutine peirodically does container log rotation. + // Start a goroutine periodically does container log rotation. go wait.Forever(func() { if err := c.rotateLogs(); err != nil { glog.Errorf("Failed to rotate container logs: %v", err) From f4d07083415bc3d247a0e6b36cf61ce7db32e8f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 15 May 2018 18:43:52 +0100 Subject: [PATCH 012/307] Disable the public cadvisor port by default --- cmd/kubelet/app/options/options.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 1c0e4080cae..ad9a91a169b 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -242,8 +242,8 @@ func NewKubeletFlags() *KubeletFlags { HostNetworkSources: []string{kubetypes.AllSource}, HostPIDSources: []string{kubetypes.AllSource}, HostIPCSources: []string{kubetypes.AllSource}, - // TODO(#56523): default CAdvisorPort to 0 (disabled) and deprecate it - CAdvisorPort: 4194, + // TODO(#56523:v1.12.0): Remove --cadvisor-port, it has been deprecated since v1.10 + CAdvisorPort: 0, // TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated AllowPrivileged: true, } @@ -400,7 +400,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.StringVar(&f.BootstrapKubeconfig, "experimental-bootstrap-kubeconfig", f.BootstrapKubeconfig, "") fs.MarkDeprecated("experimental-bootstrap-kubeconfig", "Use --bootstrap-kubeconfig") fs.Int32Var(&f.CAdvisorPort, "cadvisor-port", f.CAdvisorPort, "The port of the localhost cAdvisor endpoint (set to 0 to disable)") - fs.MarkDeprecated("cadvisor-port", "The default will change to 0 (disabled) in 1.12, and the cadvisor port will be removed entirely in 1.13") + fs.MarkDeprecated("cadvisor-port", "The default will change to 0 (disabled) in 1.11, and the cadvisor port will be removed entirely in 1.12") fs.DurationVar(&f.MinimumGCAge.Duration, "minimum-container-ttl-duration", f.MinimumGCAge.Duration, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'") fs.MarkDeprecated("minimum-container-ttl-duration", "Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.") fs.Int32Var(&f.MaxPerPodContainerCount, "maximum-dead-containers-per-container", f.MaxPerPodContainerCount, "Maximum number of old instances to retain per container. Each container takes up some disk space.") From 582128a8374b2a7d40db8268568c01a73e5f3cc6 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 15 May 2018 22:15:23 +0200 Subject: [PATCH 013/307] Remove unnecessary roundtripping in get.go --- pkg/kubectl/cmd/get/get.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index 8bed7793e67..5c4372893c1 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -343,7 +343,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e objs := make([]runtime.Object, len(infos)) for ix := range infos { if o.ServerPrint { - table, err := o.decodeIntoTable(cmdutil.InternalVersionJSONEncoder(), infos[ix].Object) + table, err := o.decodeIntoTable(infos[ix].Object) if err == nil { infos[ix].Object = table } else { @@ -589,34 +589,28 @@ func attemptToConvertToInternal(obj runtime.Object, converter runtime.ObjectConv return internalObject } -func (o *GetOptions) decodeIntoTable(encoder runtime.Encoder, obj runtime.Object) (runtime.Object, error) { +func (o *GetOptions) decodeIntoTable(obj runtime.Object) (runtime.Object, error) { if obj.GetObjectKind().GroupVersionKind().Kind != "Table" { return nil, fmt.Errorf("attempt to decode non-Table object into a v1beta1.Table") } - b, err := runtime.Encode(encoder, obj) - if err != nil { - return nil, err + unstr, ok := obj.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("attempt to decode non-Unstructured object") } - table := &metav1beta1.Table{} - err = json.Unmarshal(b, table) - if err != nil { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, table); err != nil { return nil, err } for i := range table.Rows { row := &table.Rows[i] if row.Object.Raw == nil || row.Object.Object != nil { - //if row already has Object.Object - //we don't change it continue } - converted, err := runtime.Decode(unstructured.UnstructuredJSONScheme, row.Object.Raw) if err != nil { - //if error happens, we just continue - continue + return nil, err } row.Object.Object = converted } From 5608a4e3f180799315ee9582f7407e9aba10b07c Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Wed, 16 May 2018 11:18:40 +0800 Subject: [PATCH 014/307] Revert enable PodPreset admission and also enable settings.k8s.io/v1alpha1 api resource --- cluster/gce/config-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index d7d511cf02a..6301efd3073 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -197,7 +197,7 @@ fi if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true" fi -APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --vmodule=httplog=3 --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}" +APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --vmodule=httplog=3 --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}" CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}" SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}" KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}" @@ -336,7 +336,7 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then fi if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then - ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection" + ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection" if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy" fi From ca26a8b375522ebf1049964418b4f166c83c4a06 Mon Sep 17 00:00:00 2001 From: Kazuki Suda Date: Wed, 16 May 2018 18:27:14 +0900 Subject: [PATCH 015/307] Add initContainers into completion suggestions for kubectl logs/attach --- pkg/kubectl/cmd/cmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index a1f7498ca5b..8422219875a 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -148,7 +148,7 @@ __kubectl_get_resource_clusterrole() __kubectl_get_containers() { local template - template="{{ range .spec.containers }}{{ .name }} {{ end }}" + template="{{ range .spec.initContainers }}{{ .name }} {{end}}{{ range .spec.containers }}{{ .name }} {{ end }}" __kubectl_debug "${FUNCNAME} nouns are ${nouns[*]}" local len="${#nouns[@]}" From 703dc826bf4ebf15e498d47dc74eda598ce2b6e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 16 May 2018 10:39:07 +0100 Subject: [PATCH 016/307] Remove e2e test for cAdvisor running in the kubelet, as it's deprecated and gonna be removed --- test/e2e/network/proxy.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index fe2f9f34452..b6be82183d9 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -73,7 +73,6 @@ var _ = SIGDescribe("Proxy", func() { subresource. */ framework.ConformanceIt("should proxy logs on node using proxy subresource ", func() { nodeProxyTest(f, prefix+"/nodes/", "/proxy/logs/") }) - It("should proxy to cadvisor using proxy subresource", func() { nodeProxyTest(f, prefix+"/nodes/", ":4194/proxy/containers/") }) // using the porter image to serve content, access the content // (of multiple pods?) from multiple (endpoints/services?) From 442b3fcf00117828081021ebd0aafd4787b922f6 Mon Sep 17 00:00:00 2001 From: Michael Gugino Date: Tue, 15 May 2018 18:05:36 -0400 Subject: [PATCH 017/307] Additional test coverage for kubectl/cmd/cp This commit adds some additional test coverage for the kubectl cp command. --- pkg/kubectl/cmd/cp_test.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pkg/kubectl/cmd/cp_test.go b/pkg/kubectl/cmd/cp_test.go index 5b2fe9865a8..925383a7260 100644 --- a/pkg/kubectl/cmd/cp_test.go +++ b/pkg/kubectl/cmd/cp_test.go @@ -74,6 +74,10 @@ func TestExtractFileSpec(t *testing.T) { spec: "some:bad:spec", expectErr: true, }, + { + spec: "namespace/pod/invalid:/some/file", + expectErr: true, + }, } for _, test := range tests { spec, err := extractFileSpec(test.spec) @@ -579,3 +583,35 @@ func TestCopyToPod(t *testing.T) { }) } } + +func TestValidate(t *testing.T) { + tests := []struct { + name string + args []string + expectedErr bool + }{ + { + name: "Validate Succeed", + args: []string{"1", "2"}, + expectedErr: false, + }, + { + name: "Validate Fail", + args: []string{"1", "2", "3"}, + expectedErr: true, + }, + } + tf := cmdtesting.NewTestFactory() + ioStreams, _, _, _ := genericclioptions.NewTestIOStreams() + opts := NewCopyOptions(ioStreams) + cmd := NewCmdCp(tf, ioStreams) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := opts.Validate(cmd, test.args) + if (err != nil) != test.expectedErr { + t.Errorf("expected error: %v, saw: %v, error: %v", test.expectedErr, err != nil, err) + } + }) + } +} From 37e77f148dfab60461555fee5b6613ff49bc0124 Mon Sep 17 00:00:00 2001 From: "Bobby (Babak) Salamat" Date: Fri, 20 Apr 2018 19:34:39 -0700 Subject: [PATCH 018/307] Fix PDB preemption tests. --- test/integration/scheduler/preemption_test.go | 65 +++++++++++----- test/integration/scheduler/util.go | 78 +++++++++++++++++++ 2 files changed, 122 insertions(+), 21 deletions(-) diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 8e2e4976b79..d5832a0bd86 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -27,7 +27,6 @@ import ( policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" @@ -601,6 +600,18 @@ func mkMinAvailablePDB(name, namespace string, uid types.UID, minAvailable int, } } +func addPodConditionReady(pod *v1.Pod) { + pod.Status = v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + }, + } +} + // TestPDBInPreemption tests PodDisruptionBudget support in preemption. func TestPDBInPreemption(t *testing.T) { // Enable PodPriority feature gate. @@ -610,6 +621,8 @@ func TestPDBInPreemption(t *testing.T) { defer cleanupTest(t, context) cs := context.clientSet + initDisruptionController(context) + defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI)}, @@ -629,6 +642,7 @@ func TestPDBInPreemption(t *testing.T) { description string nodes []*nodeConfig pdbs []*policy.PodDisruptionBudget + pdbPodNum []int32 existingPods []*v1.Pod pod *v1.Pod preemptedPodIndexes map[int]struct{} @@ -639,6 +653,7 @@ func TestPDBInPreemption(t *testing.T) { pdbs: []*policy.PodDisruptionBudget{ mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}), }, + pdbPodNum: []int32{2}, existingPods: []*v1.Pod{ initPausePod(context.clientSet, &pausePodConfig{ Name: "low-pod1", @@ -681,6 +696,7 @@ func TestPDBInPreemption(t *testing.T) { pdbs: []*policy.PodDisruptionBudget{ mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}), }, + pdbPodNum: []int32{1}, existingPods: []*v1.Pod{ initPausePod(context.clientSet, &pausePodConfig{ Name: "low-pod1", @@ -720,6 +736,7 @@ func TestPDBInPreemption(t *testing.T) { mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}), mkMinAvailablePDB("pdb-2", context.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}), }, + pdbPodNum: []int32{1, 5}, existingPods: []*v1.Pod{ initPausePod(context.clientSet, &pausePodConfig{ Name: "low-pod1", @@ -783,38 +800,22 @@ func TestPDBInPreemption(t *testing.T) { Priority: &highPriority, Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)}, + v1.ResourceMemory: *resource.NewQuantity(400, resource.BinarySI)}, }, }), - preemptedPodIndexes: map[int]struct{}{0: {}, 1: {}}, + // The third node is chosen because PDB is not violated for node 3 and the victims have lower priority than node-2. + preemptedPodIndexes: map[int]struct{}{4: {}, 5: {}, 6: {}}, }, } for _, test := range tests { + t.Logf("================ Running test: %v\n", test.description) for _, nodeConf := range test.nodes { _, err := createNode(cs, nodeConf.name, nodeConf.res) if err != nil { t.Fatalf("Error creating node %v: %v", nodeConf.name, err) } } - // Create PDBs. - for _, pdb := range test.pdbs { - _, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb) - if err != nil { - t.Fatalf("Failed to create PDB: %v", err) - } - } - // Wait for PDBs to show up in the scheduler's cache. - if err := wait.Poll(time.Second, 15*time.Second, func() (bool, error) { - cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) - if err != nil { - t.Errorf("Error while polling for PDB: %v", err) - return false, err - } - return len(cachedPDBs) == len(test.pdbs), err - }); err != nil { - t.Fatalf("Not all PDBs were added to the cache: %v", err) - } pods := make([]*v1.Pod, len(test.existingPods)) var err error @@ -823,7 +824,29 @@ func TestPDBInPreemption(t *testing.T) { if pods[i], err = runPausePod(cs, p); err != nil { t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err) } + // Add pod condition ready so that PDB is updated. + addPodConditionReady(p) + if _, err := context.clientSet.CoreV1().Pods(context.ns.Name).UpdateStatus(p); err != nil { + t.Fatal(err) + } } + // Wait for Pods to be stable in scheduler cache. + if err := waitCachedPodsStable(context, test.existingPods); err != nil { + t.Fatalf("Not all pods are stable in the cache: %v", err) + } + + // Create PDBs. + for _, pdb := range test.pdbs { + _, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb) + if err != nil { + t.Fatalf("Failed to create PDB: %v", err) + } + } + // Wait for PDBs to show up in the scheduler's cache and become stable. + if err := waitCachedPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil { + t.Fatalf("Not all pdbs are stable in the cache: %v", err) + } + // Create the "pod". preemptor, err := createPausePod(cs, test.pod) if err != nil { diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index d42803c1786..b7035855471 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -24,9 +24,11 @@ import ( "time" "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -42,6 +44,8 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/api/legacyscheme" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler" _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" @@ -194,6 +198,7 @@ func initTestSchedulerWithOptions( // set setPodInformer if provided. if setPodInformer { go podInformer.Informer().Run(context.schedulerConfig.StopEverything) + controller.WaitForCacheSync("scheduler", context.schedulerConfig.StopEverything, podInformer.Informer().HasSynced) } eventBroadcaster := record.NewBroadcaster() @@ -218,6 +223,26 @@ func initTestSchedulerWithOptions( return context } +// initDisruptionController initializes and runs a Disruption Controller to properly +// update PodDisuptionBudget objects. +func initDisruptionController(context *TestContext) *disruption.DisruptionController { + informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour) + + dc := disruption.NewDisruptionController( + informers.Core().V1().Pods(), + informers.Policy().V1beta1().PodDisruptionBudgets(), + informers.Core().V1().ReplicationControllers(), + informers.Extensions().V1beta1().ReplicaSets(), + informers.Extensions().V1beta1().Deployments(), + informers.Apps().V1beta1().StatefulSets(), + context.clientSet) + + informers.Start(context.schedulerConfig.StopEverything) + informers.WaitForCacheSync(context.schedulerConfig.StopEverything) + go dc.Run(context.schedulerConfig.StopEverything) + return dc +} + // initTest initializes a test environment and creates master and scheduler with default // configuration. func initTest(t *testing.T, nsPrefix string) *TestContext { @@ -514,6 +539,59 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error { return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second) } +// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to +// the expected values. +func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error { + return wait.Poll(time.Second, 60*time.Second, func() (bool, error) { + cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) + if err != nil { + return false, err + } + if len(cachedPDBs) != len(pdbs) { + return false, nil + } + for i, pdb := range pdbs { + found := false + for _, cpdb := range cachedPDBs { + if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace { + found = true + if cpdb.Status.CurrentHealthy != pdbPodNum[i] { + return false, nil + } + } + } + if !found { + return false, nil + } + } + return true, nil + }) +} + +// waitCachedPodsStable waits until scheduler cache has the given pods. +func waitCachedPodsStable(context *TestContext, pods []*v1.Pod) error { + return wait.Poll(time.Second, 30*time.Second, func() (bool, error) { + cachedPods, err := context.scheduler.Config().SchedulerCache.List(labels.Everything()) + if err != nil { + return false, err + } + if len(pods) != len(cachedPods) { + return false, nil + } + for _, p := range pods { + actualPod, err1 := context.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{}) + if err1 != nil { + return false, err1 + } + cachedPod, err2 := context.scheduler.Config().SchedulerCache.GetPod(actualPod) + if err2 != nil || cachedPod == nil { + return false, err2 + } + } + return true, nil + }) +} + // deletePod deletes the given pod in the given namespace. func deletePod(cs clientset.Interface, podName string, nsName string) error { return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0)) From ff8b70c4094f8d2cc818ce7e88eb7735bd43ae61 Mon Sep 17 00:00:00 2001 From: Weibin Lin Date: Thu, 17 May 2018 14:33:47 +0800 Subject: [PATCH 019/307] Init ipvsInterface only when ipvs modules are present --- cmd/kube-proxy/app/server_others.go | 4 +++- pkg/proxy/ipvs/proxier.go | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 1ab70b1ab52..38806696d05 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -92,9 +92,11 @@ func newProxyServer( dbus = utildbus.New() iptInterface = utiliptables.New(execer, dbus, protocol) - ipvsInterface = utilipvs.New(execer) kernelHandler = ipvs.NewLinuxKernelHandler() ipsetInterface = utilipset.New(execer) + if canUse, _ := ipvs.CanUseIPVSProxier(kernelHandler, ipsetInterface); canUse { + ipvsInterface = utilipvs.New(execer) + } // We omit creation of pretty much everything if we run in cleanup mode if cleanupAndExit { diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index c920caf0813..3b45ce076a5 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -547,6 +547,9 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool // CleanupLeftovers clean up all ipvs and iptables rules created by ipvs Proxier. func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset utilipset.Interface, cleanupIPVS bool) (encounteredError bool) { + if canUse, _ := CanUseIPVSProxier(NewLinuxKernelHandler(), ipset); !canUse { + return false + } if cleanupIPVS { // Return immediately when ipvs interface is nil - Probably initialization failed in somewhere. if ipvs == nil { From 068844aeb1e2c4e00eedb964e03cab3a8ac36638 Mon Sep 17 00:00:00 2001 From: wojtekt Date: Fri, 11 May 2018 11:40:22 +0200 Subject: [PATCH 020/307] WatchingSecretManager --- pkg/kubelet/util/manager/BUILD | 14 +- .../util/manager/watch_based_manager.go | 194 ++++++++++++++++++ .../util/manager/watch_based_manager_test.go | 184 +++++++++++++++++ 3 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 pkg/kubelet/util/manager/watch_based_manager.go create mode 100644 pkg/kubelet/util/manager/watch_based_manager_test.go diff --git a/pkg/kubelet/util/manager/BUILD b/pkg/kubelet/util/manager/BUILD index 60005033ded..e789de207c2 100644 --- a/pkg/kubelet/util/manager/BUILD +++ b/pkg/kubelet/util/manager/BUILD @@ -5,6 +5,7 @@ go_library( srcs = [ "cache_based_manager.go", "manager.go", + "watch_based_manager.go", ], importpath = "k8s.io/kubernetes/pkg/kubelet/util/manager", visibility = ["//visibility:public"], @@ -13,19 +14,28 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["cache_based_manager_test.go"], + srcs = [ + "cache_based_manager_test.go", + "watch_based_manager_test.go", + ], embed = [":go_default_library"], deps = [ "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/core/v1:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -33,6 +43,8 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", diff --git a/pkg/kubelet/util/manager/watch_based_manager.go b/pkg/kubelet/util/manager/watch_based_manager.go new file mode 100644 index 00000000000..719f1f3b1d1 --- /dev/null +++ b/pkg/kubelet/util/manager/watch_based_manager.go @@ -0,0 +1,194 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: We did some scalability tests and using watchBasedManager +// seems to help with apiserver performance at scale visibly. +// No issues we also observed at the scale of ~200k watchers with a +// single apiserver. +// However, we need to perform more extensive testing before we +// enable this in production setups. + +package manager + +import ( + "fmt" + "sync" + "time" + + "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" +) + +type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error) +type watchObjectFunc func(string, metav1.ListOptions) (watch.Interface, error) +type newObjectFunc func() runtime.Object + +// objectCacheItem is a single item stored in objectCache. +type objectCacheItem struct { + refCount int + store cache.Store + hasSynced func() (bool, error) + stopCh chan struct{} +} + +// objectCache is a local cache of objects propagated via +// individual watches. +type objectCache struct { + listObject listObjectFunc + watchObject watchObjectFunc + newObject newObjectFunc + groupResource schema.GroupResource + + lock sync.Mutex + items map[objectKey]*objectCacheItem +} + +// NewObjectCache returns a new watch-based instance of Store interface. +func NewObjectCache(listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, groupResource schema.GroupResource) Store { + return &objectCache{ + listObject: listObject, + watchObject: watchObject, + newObject: newObject, + groupResource: groupResource, + items: make(map[objectKey]*objectCacheItem), + } +} + +func (c *objectCache) newStore() cache.Store { + // TODO: We may consider created a dedicated store keeping just a single + // item, instead of using a generic store implementation for this purpose. + // However, simple benchmarks show that memory overhead in that case is + // decrease from ~600B to ~300B per object. So we are not optimizing it + // until we will see a good reason for that. + return cache.NewStore(cache.MetaNamespaceKeyFunc) +} + +func (c *objectCache) newReflector(namespace, name string) *objectCacheItem { + fieldSelector := fields.Set{"metadata.name": name}.AsSelector().String() + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return c.listObject(namespace, options) + } + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return c.watchObject(namespace, options) + } + store := c.newStore() + reflector := cache.NewNamedReflector( + fmt.Sprintf("object-%q/%q", namespace, name), + &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}, + c.newObject(), + store, + 0, + ) + stopCh := make(chan struct{}) + go reflector.Run(stopCh) + return &objectCacheItem{ + refCount: 0, + store: store, + hasSynced: func() (bool, error) { return reflector.LastSyncResourceVersion() != "", nil }, + stopCh: stopCh, + } +} + +func (c *objectCache) AddReference(namespace, name string) { + key := objectKey{namespace: namespace, name: name} + + // AddReference is called from RegisterPod thus it needs to be efficient. + // Thus, it is only increaisng refCount and in case of first registration + // of a given object it starts corresponding reflector. + // It's responsibility of the first Get operation to wait until the + // reflector propagated the store. + c.lock.Lock() + defer c.lock.Unlock() + item, exists := c.items[key] + if !exists { + item = c.newReflector(namespace, name) + c.items[key] = item + } + item.refCount++ +} + +func (c *objectCache) DeleteReference(namespace, name string) { + key := objectKey{namespace: namespace, name: name} + + c.lock.Lock() + defer c.lock.Unlock() + if item, ok := c.items[key]; ok { + item.refCount-- + if item.refCount == 0 { + // Stop the underlying reflector. + close(item.stopCh) + delete(c.items, key) + } + } +} + +// key returns key of an object with a given name and namespace. +// This has to be in-sync with cache.MetaNamespaceKeyFunc. +func (c *objectCache) key(namespace, name string) string { + if len(namespace) > 0 { + return namespace + "/" + name + } + return name +} + +func (c *objectCache) Get(namespace, name string) (runtime.Object, error) { + key := objectKey{namespace: namespace, name: name} + + c.lock.Lock() + item, exists := c.items[key] + c.lock.Unlock() + + if !exists { + return nil, fmt.Errorf("object %q/%q not registered", namespace, name) + } + if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil { + return nil, fmt.Errorf("couldn't propagate object cache: %v", err) + } + + obj, exists, err := item.store.GetByKey(c.key(namespace, name)) + if err != nil { + return nil, err + } + if !exists { + return nil, apierrors.NewNotFound(c.groupResource, name) + } + if object, ok := obj.(runtime.Object); ok { + return object, nil + } + return nil, fmt.Errorf("unexpected object type: %v", obj) +} + +// NewWatchBasedManager creates a manager that keeps a cache of all objects +// necessary for registered pods. +// It implements the following logic: +// - whenever a pod is created or updated, we start individual watches for all +// referenced objects that aren't referenced from other registered pods +// - every GetObject() returns a value from local cache propagated via watches +func NewWatchBasedManager(listObject listObjectFunc, watchObject watchObjectFunc, newObject newObjectFunc, groupResource schema.GroupResource, getReferencedObjects func(*v1.Pod) sets.String) Manager { + objectStore := NewObjectCache(listObject, watchObject, newObject, groupResource) + return NewCacheBasedManager(objectStore, getReferencedObjects) +} diff --git a/pkg/kubelet/util/manager/watch_based_manager_test.go b/pkg/kubelet/util/manager/watch_based_manager_test.go new file mode 100644 index 00000000000..fc27a7374d6 --- /dev/null +++ b/pkg/kubelet/util/manager/watch_based_manager_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "fmt" + "strings" + "testing" + "time" + + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" + + corev1 "k8s.io/kubernetes/pkg/apis/core/v1" + + "github.com/stretchr/testify/assert" +) + +func listSecret(fakeClient clientset.Interface) listObjectFunc { + return func(namespace string, opts metav1.ListOptions) (runtime.Object, error) { + return fakeClient.CoreV1().Secrets(namespace).List(opts) + } +} + +func watchSecret(fakeClient clientset.Interface) watchObjectFunc { + return func(namespace string, opts metav1.ListOptions) (watch.Interface, error) { + return fakeClient.CoreV1().Secrets(namespace).Watch(opts) + } +} + +func newSecretCache(fakeClient clientset.Interface) *objectCache { + return &objectCache{ + listObject: listSecret(fakeClient), + watchObject: watchSecret(fakeClient), + newObject: func() runtime.Object { return &v1.Secret{} }, + groupResource: corev1.Resource("secret"), + items: make(map[objectKey]*objectCacheItem), + } +} + +func TestSecretCache(t *testing.T) { + fakeClient := &fake.Clientset{} + + listReactor := func(a core.Action) (bool, runtime.Object, error) { + result := &v1.SecretList{ + ListMeta: metav1.ListMeta{ + ResourceVersion: "123", + }, + } + return true, result, nil + } + fakeClient.AddReactor("list", "secrets", listReactor) + fakeWatch := watch.NewFake() + fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil)) + + store := newSecretCache(fakeClient) + + store.AddReference("ns", "name") + _, err := store.Get("ns", "name") + if !apierrors.IsNotFound(err) { + t.Errorf("Expected NotFound error, got: %v", err) + } + + // Eventually we should be able to read added secret. + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "name", Namespace: "ns", ResourceVersion: "125"}, + } + fakeWatch.Add(secret) + getFn := func() (bool, error) { + object, err := store.Get("ns", "name") + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + secret := object.(*v1.Secret) + if secret == nil || secret.Name != "name" || secret.Namespace != "ns" { + return false, fmt.Errorf("unexpected secret: %v", secret) + } + return true, nil + } + if err := wait.PollImmediate(10*time.Millisecond, time.Second, getFn); err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Eventually we should observer secret deletion. + fakeWatch.Delete(secret) + getFn = func() (bool, error) { + _, err := store.Get("ns", "name") + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + } + if err := wait.PollImmediate(10*time.Millisecond, time.Second, getFn); err != nil { + t.Errorf("unexpected error: %v", err) + } + + store.DeleteReference("ns", "name") + _, err = store.Get("ns", "name") + if err == nil || !strings.Contains(err.Error(), "not registered") { + t.Errorf("unexpected error: %v", err) + } +} + +func TestSecretCacheMultipleRegistrations(t *testing.T) { + fakeClient := &fake.Clientset{} + + listReactor := func(a core.Action) (bool, runtime.Object, error) { + result := &v1.SecretList{ + ListMeta: metav1.ListMeta{ + ResourceVersion: "123", + }, + } + return true, result, nil + } + fakeClient.AddReactor("list", "secrets", listReactor) + fakeWatch := watch.NewFake() + fakeClient.AddWatchReactor("secrets", core.DefaultWatchReactor(fakeWatch, nil)) + + store := newSecretCache(fakeClient) + + store.AddReference("ns", "name") + // This should trigger List and Watch actions eventually. + actionsFn := func() (bool, error) { + actions := fakeClient.Actions() + if len(actions) > 2 { + return false, fmt.Errorf("too many actions: %v", actions) + } + if len(actions) < 2 { + return false, nil + } + if actions[0].GetVerb() != "list" || actions[1].GetVerb() != "watch" { + return false, fmt.Errorf("unexpected actions: %v", actions) + } + return true, nil + } + if err := wait.PollImmediate(10*time.Millisecond, time.Second, actionsFn); err != nil { + t.Errorf("unexpected error: %v", err) + } + + // Next registrations shouldn't trigger any new actions. + for i := 0; i < 20; i++ { + store.AddReference("ns", "name") + store.DeleteReference("ns", "name") + } + actions := fakeClient.Actions() + assert.Equal(t, 2, len(actions), "unexpected actions: %#v", actions) + + // Final delete also doesn't trigger any action. + store.DeleteReference("ns", "name") + _, err := store.Get("ns", "name") + if err == nil || !strings.Contains(err.Error(), "not registered") { + t.Errorf("unexpected error: %v", err) + } + actions = fakeClient.Actions() + assert.Equal(t, 2, len(actions), "unexpected actions: %#v", actions) +} From 598ca5accc410c3cb0fc307c4d6f961457768567 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 17 May 2018 13:36:37 +0200 Subject: [PATCH 021/307] Add GetSELinuxSupport to mounter. --- .../cm/container_manager_linux_test.go | 4 + pkg/util/mount/exec_mount.go | 4 + pkg/util/mount/exec_mount_test.go | 4 + pkg/util/mount/exec_mount_unsupported.go | 4 + pkg/util/mount/fake.go | 4 + pkg/util/mount/mount.go | 3 + pkg/util/mount/mount_linux.go | 78 ++++++++++++++----- pkg/util/mount/mount_linux_test.go | 53 ++++++++++++- pkg/util/mount/mount_unsupported.go | 4 + pkg/util/mount/mount_windows.go | 5 ++ pkg/util/mount/nsenter_mount.go | 4 + pkg/util/mount/nsenter_mount_unsupported.go | 4 + pkg/util/removeall/removeall_test.go | 4 + pkg/volume/host_path/host_path_test.go | 4 + pkg/volume/volume.go | 1 + 15 files changed, 159 insertions(+), 21 deletions(-) diff --git a/pkg/kubelet/cm/container_manager_linux_test.go b/pkg/kubelet/cm/container_manager_linux_test.go index 2e9bb61b645..cd4b0460c6f 100644 --- a/pkg/kubelet/cm/container_manager_linux_test.go +++ b/pkg/kubelet/cm/container_manager_linux_test.go @@ -116,6 +116,10 @@ func (mi *fakeMountInterface) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } +func (mi *fakeMountInterface) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} + func fakeContainerMgrMountInt() mount.Interface { return &fakeMountInterface{ []mount.MountPoint{ diff --git a/pkg/util/mount/exec_mount.go b/pkg/util/mount/exec_mount.go index 3145d71dd5b..fcb948aa34e 100644 --- a/pkg/util/mount/exec_mount.go +++ b/pkg/util/mount/exec_mount.go @@ -159,3 +159,7 @@ func (m *execMounter) GetMountRefs(pathname string) ([]string, error) { func (m *execMounter) GetFSGroup(pathname string) (int64, error) { return m.wrappedMounter.GetFSGroup(pathname) } + +func (m *execMounter) GetSELinuxSupport(pathname string) (bool, error) { + return m.wrappedMounter.GetSELinuxSupport(pathname) +} diff --git a/pkg/util/mount/exec_mount_test.go b/pkg/util/mount/exec_mount_test.go index c15a9c16d94..b3af0a46fbb 100644 --- a/pkg/util/mount/exec_mount_test.go +++ b/pkg/util/mount/exec_mount_test.go @@ -172,3 +172,7 @@ func (fm *fakeMounter) GetMountRefs(pathname string) ([]string, error) { func (fm *fakeMounter) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } + +func (fm *fakeMounter) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} diff --git a/pkg/util/mount/exec_mount_unsupported.go b/pkg/util/mount/exec_mount_unsupported.go index 5274a3938a7..cbb5bbc1591 100644 --- a/pkg/util/mount/exec_mount_unsupported.go +++ b/pkg/util/mount/exec_mount_unsupported.go @@ -106,3 +106,7 @@ func (mounter *execMounter) GetMountRefs(pathname string) ([]string, error) { func (mounter *execMounter) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } + +func (mounter *execMounter) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} diff --git a/pkg/util/mount/fake.go b/pkg/util/mount/fake.go index ef79e731954..f82f669b2eb 100644 --- a/pkg/util/mount/fake.go +++ b/pkg/util/mount/fake.go @@ -228,3 +228,7 @@ func (f *FakeMounter) GetMountRefs(pathname string) ([]string, error) { func (f *FakeMounter) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("GetFSGroup not implemented") } + +func (f *FakeMounter) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("GetSELinuxSupport not implemented") +} diff --git a/pkg/util/mount/mount.go b/pkg/util/mount/mount.go index a7a08fa82f4..0c59ca9ebb7 100644 --- a/pkg/util/mount/mount.go +++ b/pkg/util/mount/mount.go @@ -114,6 +114,9 @@ type Interface interface { GetMountRefs(pathname string) ([]string, error) // GetFSGroup returns FSGroup of the path. GetFSGroup(pathname string) (int64, error) + // GetSELinuxSupport returns true if given path is on a mount that supports + // SELinux. + GetSELinuxSupport(pathname string) (bool, error) } type Subpath struct { diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 05bf54dae8e..dbb864cd479 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -591,25 +591,12 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { // isShared returns true, if given path is on a mount point that has shared // mount propagation. -func isShared(path string, filename string) (bool, error) { - infos, err := parseMountInfo(filename) +func isShared(mount string, mountInfoPath string) (bool, error) { + info, err := findMountInfo(mount, mountInfoPath) if err != nil { return false, err } - // process /proc/xxx/mountinfo in backward order and find the first mount - // point that is prefix of 'path' - that's the mount where path resides - var info *mountInfo - for i := len(infos) - 1; i >= 0; i-- { - if strings.HasPrefix(path, infos[i].mountPoint) { - info = &infos[i] - break - } - } - if info == nil { - return false, fmt.Errorf("cannot find mount point for %q", path) - } - // parse optional parameters for _, opt := range info.optional { if strings.HasPrefix(opt, "shared:") { @@ -624,6 +611,10 @@ type mountInfo struct { mountPoint string // list of "optional parameters", mount propagation is one of them optional []string + // mount options + mountOptions []string + // super options: per-superblock options. + superOptions []string } // parseMountInfo parses /proc/xxx/mountinfo. @@ -642,22 +633,46 @@ func parseMountInfo(filename string) ([]mountInfo, error) { } // See `man proc` for authoritative description of format of the file. fields := strings.Fields(line) - if len(fields) < 7 { - return nil, fmt.Errorf("wrong number of fields in (expected %d, got %d): %s", 8, len(fields), line) + if len(fields) < 10 { + return nil, fmt.Errorf("wrong number of fields in (expected %d, got %d): %s", 10, len(fields), line) } info := mountInfo{ - mountPoint: fields[4], - optional: []string{}, + mountPoint: fields[4], + mountOptions: strings.Split(fields[5], ","), + optional: []string{}, } // All fields until "-" are "optional fields". for i := 6; i < len(fields) && fields[i] != "-"; i++ { info.optional = append(info.optional, fields[i]) } + superOpts := fields[len(fields)-1] + info.superOptions = strings.Split(superOpts, ",") infos = append(infos, info) } return infos, nil } +func findMountInfo(path, mountInfoPath string) (mountInfo, error) { + infos, err := parseMountInfo(mountInfoPath) + if err != nil { + return mountInfo{}, err + } + + // process /proc/xxx/mountinfo in backward order and find the first mount + // point that is prefix of 'path' - that's the mount where path resides + var info *mountInfo + for i := len(infos) - 1; i >= 0; i-- { + if pathWithinBase(path, infos[i].mountPoint) { + info = &infos[i] + break + } + } + if info == nil { + return mountInfo{}, fmt.Errorf("cannot find mount point for %q", path) + } + return *info, nil +} + // doMakeRShared is common implementation of MakeRShared on Linux. It checks if // path is shared and bind-mounts it as rshared if needed. mountCmd and // mountArgs are expected to contain mount-like command, doMakeRShared will add @@ -686,6 +701,27 @@ func doMakeRShared(path string, mountInfoFilename string) error { return nil } +// getSELinuxSupport is common implementation of GetSELinuxSupport on Linux. +func getSELinuxSupport(path string, mountInfoFilename string) (bool, error) { + info, err := findMountInfo(path, mountInfoFilename) + if err != nil { + return false, err + } + + // "seclabel" can be both in mount options and super options. + for _, opt := range info.superOptions { + if opt == "seclabel" { + return true, nil + } + } + for _, opt := range info.mountOptions { + if opt == "seclabel" { + return true, nil + } + } + return false, nil +} + func (mounter *Mounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) { newHostPath, err = doBindSubPath(mounter, subPath, os.Getpid()) // There is no action when the container starts. Bind-mount will be cleaned @@ -934,6 +970,10 @@ func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { return getMountRefsByDev(mounter, realpath) } +func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { + return getSELinuxSupport(pathname, procMountInfoPath) +} + func (mounter *Mounter) GetFSGroup(pathname string) (int64, error) { realpath, err := filepath.EvalSymlinks(pathname) if err != nil { diff --git a/pkg/util/mount/mount_linux_test.go b/pkg/util/mount/mount_linux_test.go index 7bec4e447ac..6028d608e97 100644 --- a/pkg/util/mount/mount_linux_test.go +++ b/pkg/util/mount/mount_linux_test.go @@ -1399,8 +1399,10 @@ func TestParseMountInfo(t *testing.T) { "simple bind mount", "/var/lib/kubelet", mountInfo{ - mountPoint: "/var/lib/kubelet", - optional: []string{"shared:30"}, + mountPoint: "/var/lib/kubelet", + optional: []string{"shared:30"}, + mountOptions: []string{"rw", "relatime"}, + superOptions: []string{"rw", "commit=30", "data=ordered"}, }, }, } @@ -1427,6 +1429,53 @@ func TestParseMountInfo(t *testing.T) { } } +func TestGetSELinuxSupport(t *testing.T) { + info := + `62 0 253:0 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered +78 62 0:41 / /tmp rw,nosuid,nodev shared:30 - tmpfs tmpfs rw,seclabel +83 63 0:44 / /var/lib/bar rw,relatime - tmpfs tmpfs rw +227 62 253:0 /var/lib/docker/devicemapper /var/lib/docker/devicemapper rw,relatime - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered +150 23 1:58 / /media/nfs_vol rw,relatime shared:89 - nfs4 172.18.4.223:/srv/nfs rw,vers=4.0,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=172.18.4.223,local_lock=none,addr=172.18.4.223 +` + tempDir, filename, err := writeFile(info) + if err != nil { + t.Fatalf("cannot create temporary file: %v", err) + } + defer os.RemoveAll(tempDir) + + tests := []struct { + name string + mountPoint string + expectedResult bool + }{ + { + "ext4 on /", + "/", + true, + }, + { + "tmpfs on /var/lib/bar", + "/var/lib/bar", + false, + }, + { + "nfsv4", + "/media/nfs_vol", + false, + }, + } + + for _, test := range tests { + out, err := getSELinuxSupport(test.mountPoint, filename) + if err != nil { + t.Errorf("Test %s failed with error: %s", test.name, err) + } + if test.expectedResult != out { + t.Errorf("Test %s failed: expected %v, got %v", test.name, test.expectedResult, out) + } + } +} + func TestSafeOpen(t *testing.T) { defaultPerm := os.FileMode(0750) diff --git a/pkg/util/mount/mount_unsupported.go b/pkg/util/mount/mount_unsupported.go index 87b8e334825..6e268e0f43d 100644 --- a/pkg/util/mount/mount_unsupported.go +++ b/pkg/util/mount/mount_unsupported.go @@ -134,3 +134,7 @@ func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { func (mounter *Mounter) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } + +func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index 0c10733e9de..f31f99bd66d 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -456,6 +456,11 @@ func (mounter *Mounter) GetFSGroup(pathname string) (int64, error) { return 0, nil } +func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { + // Windows does not support SELinux. + return false, nil +} + // SafeMakeDir makes sure that the created directory does not escape given base directory mis-using symlinks. func (mounter *Mounter) SafeMakeDir(pathname string, base string, perm os.FileMode) error { return doSafeMakeDir(pathname, base, perm) diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index 17d730bf758..4c48d673254 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -343,3 +343,7 @@ func (mounter *NsenterMounter) GetFSGroup(pathname string) (int64, error) { } return getFSGroup(kubeletpath) } + +func (mounter *NsenterMounter) GetSELinuxSupport(pathname string) (bool, error) { + return getSELinuxSupport(pathname, procMountInfoPath) +} diff --git a/pkg/util/mount/nsenter_mount_unsupported.go b/pkg/util/mount/nsenter_mount_unsupported.go index 787ba6b7197..8cf79de472c 100644 --- a/pkg/util/mount/nsenter_mount_unsupported.go +++ b/pkg/util/mount/nsenter_mount_unsupported.go @@ -106,3 +106,7 @@ func (*NsenterMounter) GetMountRefs(pathname string) ([]string, error) { func (*NsenterMounter) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } + +func (*NsenterMounter) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} diff --git a/pkg/util/removeall/removeall_test.go b/pkg/util/removeall/removeall_test.go index ad0efac8d1d..134326369ff 100644 --- a/pkg/util/removeall/removeall_test.go +++ b/pkg/util/removeall/removeall_test.go @@ -99,6 +99,10 @@ func (mounter *fakeMounter) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } +func (mounter *fakeMounter) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} + func (mounter *fakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { name := path.Base(file) if strings.HasPrefix(name, "mount") { diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index b48c0264449..199880c247e 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -397,6 +397,10 @@ func (fftc *fakeFileTypeChecker) GetFSGroup(pathname string) (int64, error) { return -1, errors.New("not implemented") } +func (fftc *fakeFileTypeChecker) GetSELinuxSupport(pathname string) (bool, error) { + return false, errors.New("not implemented") +} + func setUp() error { err := os.MkdirAll("/tmp/ExistingFolder", os.FileMode(0755)) if err != nil { diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index 471963556a8..161d84faf49 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -131,6 +131,7 @@ type Mounter interface { // idempotent. SetUpAt(dir string, fsGroup *int64) error // GetAttributes returns the attributes of the mounter. + // This function is called after SetUp()/SetUpAt(). GetAttributes() Attributes } From a0b0c152c9e4146db0a3274ec94d8caf438c11f0 Mon Sep 17 00:00:00 2001 From: "Bobby (Babak) Salamat" Date: Sat, 21 Apr 2018 11:12:32 -0700 Subject: [PATCH 022/307] autogenerated files --- test/integration/scheduler/BUILD | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index b670e5bb42d..d39015f2eb4 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -89,6 +89,8 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/controller:go_default_library", + "//pkg/controller/disruption:go_default_library", "//pkg/features:go_default_library", "//pkg/scheduler:go_default_library", "//pkg/scheduler/algorithmprovider:go_default_library", @@ -97,9 +99,11 @@ go_library( "//test/integration/framework:go_default_library", "//test/utils/image:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", From f47f5156002fba9de802ea7d06daee45561dd786 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 16 May 2018 15:21:43 +0800 Subject: [PATCH 023/307] construct a new CloudControllerManagerConfiguration struct for cloud-controller manager --- pkg/apis/componentconfig/types.go | 22 +++++ pkg/apis/componentconfig/v1alpha1/defaults.go | 80 ++++++++++++------- pkg/apis/componentconfig/v1alpha1/types.go | 22 +++++ 3 files changed, 93 insertions(+), 31 deletions(-) diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index e7460df3773..7ca6a224445 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -244,6 +244,28 @@ type KubeControllerManagerConfiguration struct { ExternalCloudVolumePlugin string } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type CloudControllerManagerConfiguration struct { + metav1.TypeMeta + + // CloudProviderConfiguration holds configuration for CloudProvider related features. + CloudProvider CloudProviderConfiguration + // DebuggingConfiguration holds configuration for Debugging related features. + Debugging DebuggingConfiguration + // GenericComponentConfiguration holds configuration for GenericComponent + // related features both in cloud controller manager and kube-controller manager. + GenericComponent GenericComponentConfiguration + // KubeCloudSharedConfiguration holds configuration for shared related features + // both in cloud controller manager and kube-controller manager. + KubeCloudShared KubeCloudSharedConfiguration + // ServiceControllerConfiguration holds configuration for ServiceController + // related features. + ServiceController ServiceControllerConfiguration + // NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status + NodeStatusUpdateFrequency metav1.Duration +} + type CloudProviderConfiguration struct { // Name is the provider for cloud services. Name string diff --git a/pkg/apis/componentconfig/v1alpha1/defaults.go b/pkg/apis/componentconfig/v1alpha1/defaults.go index 925412c7138..225b279a03c 100644 --- a/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -33,15 +33,21 @@ func addDefaultingFuncs(scheme *kruntime.Scheme) error { return RegisterDefaults(scheme) } +func SetDefaults_CloudControllerManagerConfiguration(obj *CloudControllerManagerConfiguration) { + zero := metav1.Duration{} + if obj.ServiceController.ConcurrentServiceSyncs == 0 { + obj.ServiceController.ConcurrentServiceSyncs = 1 + } + if obj.NodeStatusUpdateFrequency == zero { + obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 5 * time.Minute} + } +} + func SetDefaults_KubeControllerManagerConfiguration(obj *KubeControllerManagerConfiguration) { zero := metav1.Duration{} if len(obj.Controllers) == 0 { obj.Controllers = []string{"*"} } - // Port - if obj.KubeCloudShared.Address == "" { - obj.KubeCloudShared.Address = "0.0.0.0" - } if obj.EndPointController.ConcurrentEndpointSyncs == 0 { obj.EndPointController.ConcurrentEndpointSyncs = 5 } @@ -72,9 +78,6 @@ func SetDefaults_KubeControllerManagerConfiguration(obj *KubeControllerManagerCo if obj.SAController.ConcurrentSATokenSyncs == 0 { obj.SAController.ConcurrentSATokenSyncs = 5 } - if obj.KubeCloudShared.RouteReconciliationPeriod == zero { - obj.KubeCloudShared.RouteReconciliationPeriod = metav1.Duration{Duration: 10 * time.Second} - } if obj.ResourceQuotaController.ResourceQuotaSyncPeriod == zero { obj.ResourceQuotaController.ResourceQuotaSyncPeriod = metav1.Duration{Duration: 5 * time.Minute} } @@ -99,9 +102,6 @@ func SetDefaults_KubeControllerManagerConfiguration(obj *KubeControllerManagerCo if obj.DeploymentController.DeploymentControllerSyncPeriod == zero { obj.DeploymentController.DeploymentControllerSyncPeriod = metav1.Duration{Duration: 30 * time.Second} } - if obj.GenericComponent.MinResyncPeriod == zero { - obj.GenericComponent.MinResyncPeriod = metav1.Duration{Duration: 12 * time.Hour} - } if obj.DeprecatedController.RegisterRetryCount == 0 { obj.DeprecatedController.RegisterRetryCount = 10 } @@ -114,33 +114,12 @@ func SetDefaults_KubeControllerManagerConfiguration(obj *KubeControllerManagerCo if obj.NodeLifecycleController.NodeStartupGracePeriod == zero { obj.NodeLifecycleController.NodeStartupGracePeriod = metav1.Duration{Duration: 60 * time.Second} } - if obj.KubeCloudShared.NodeMonitorPeriod == zero { - obj.KubeCloudShared.NodeMonitorPeriod = metav1.Duration{Duration: 5 * time.Second} - } - if obj.KubeCloudShared.ClusterName == "" { - obj.KubeCloudShared.ClusterName = "kubernetes" - } if obj.NodeIpamController.NodeCIDRMaskSize == 0 { obj.NodeIpamController.NodeCIDRMaskSize = 24 } - if obj.KubeCloudShared.ConfigureCloudRoutes == nil { - obj.KubeCloudShared.ConfigureCloudRoutes = utilpointer.BoolPtr(true) - } if obj.PodGCController.TerminatedPodGCThreshold == 0 { obj.PodGCController.TerminatedPodGCThreshold = 12500 } - if obj.GenericComponent.ContentType == "" { - obj.GenericComponent.ContentType = "application/vnd.kubernetes.protobuf" - } - if obj.GenericComponent.KubeAPIQPS == 0 { - obj.GenericComponent.KubeAPIQPS = 20.0 - } - if obj.GenericComponent.KubeAPIBurst == 0 { - obj.GenericComponent.KubeAPIBurst = 30 - } - if obj.GenericComponent.ControllerStartInterval == zero { - obj.GenericComponent.ControllerStartInterval = metav1.Duration{Duration: 0 * time.Second} - } if obj.GarbageCollectorController.EnableGarbageCollector == nil { obj.GarbageCollectorController.EnableGarbageCollector = utilpointer.BoolPtr(true) } @@ -167,6 +146,45 @@ func SetDefaults_KubeControllerManagerConfiguration(obj *KubeControllerManagerCo } } +func SetDefaults_GenericComponentConfiguration(obj *GenericComponentConfiguration) { + zero := metav1.Duration{} + if obj.MinResyncPeriod == zero { + obj.MinResyncPeriod = metav1.Duration{Duration: 12 * time.Hour} + } + if obj.ContentType == "" { + obj.ContentType = "application/vnd.kubernetes.protobuf" + } + if obj.KubeAPIQPS == 0 { + obj.KubeAPIQPS = 20.0 + } + if obj.KubeAPIBurst == 0 { + obj.KubeAPIBurst = 30 + } + if obj.ControllerStartInterval == zero { + obj.ControllerStartInterval = metav1.Duration{Duration: 0 * time.Second} + } +} + +func SetDefaults_KubeCloudSharedConfiguration(obj *KubeCloudSharedConfiguration) { + zero := metav1.Duration{} + // Port + if obj.Address == "" { + obj.Address = "0.0.0.0" + } + if obj.RouteReconciliationPeriod == zero { + obj.RouteReconciliationPeriod = metav1.Duration{Duration: 10 * time.Second} + } + if obj.NodeMonitorPeriod == zero { + obj.NodeMonitorPeriod = metav1.Duration{Duration: 5 * time.Second} + } + if obj.ClusterName == "" { + obj.ClusterName = "kubernetes" + } + if obj.ConfigureCloudRoutes == nil { + obj.ConfigureCloudRoutes = utilpointer.BoolPtr(true) + } +} + func SetDefaults_PersistentVolumeRecyclerConfiguration(obj *PersistentVolumeRecyclerConfiguration) { if obj.MaximumRetry == 0 { obj.MaximumRetry = 3 diff --git a/pkg/apis/componentconfig/v1alpha1/types.go b/pkg/apis/componentconfig/v1alpha1/types.go index ccf81061dc6..129f5d650c5 100644 --- a/pkg/apis/componentconfig/v1alpha1/types.go +++ b/pkg/apis/componentconfig/v1alpha1/types.go @@ -286,6 +286,28 @@ type KubeControllerManagerConfiguration struct { ExternalCloudVolumePlugin string `json:"externalCloudVolumePlugin"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type CloudControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // CloudProviderConfiguration holds configuration for CloudProvider related features. + CloudProvider CloudProviderConfiguration + // DebuggingConfiguration holds configuration for Debugging related features. + Debugging DebuggingConfiguration + // GenericComponentConfiguration holds configuration for GenericComponent + // related features both in cloud controller manager and kube-controller manager. + GenericComponent GenericComponentConfiguration + // KubeCloudSharedConfiguration holds configuration for shared related features + // both in cloud controller manager and kube-controller manager. + KubeCloudShared KubeCloudSharedConfiguration + // ServiceControllerConfiguration holds configuration for ServiceController + // related features. + ServiceController ServiceControllerConfiguration + // NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status + NodeStatusUpdateFrequency metav1.Duration +} + type CloudProviderConfiguration struct { // Name is the provider for cloud services. Name string `json:"cloudProvider"` From 6ad56325cab2b38d64b234163705f8c7132b8e3d Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Tue, 8 May 2018 16:43:24 +0800 Subject: [PATCH 024/307] kube-proxy should not depend on kubectl --- cmd/kube-proxy/app/BUILD | 1 - cmd/kube-proxy/app/server.go | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 3ae5ab8f044..4b40cbdedcd 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -64,7 +64,6 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubelet/qos:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/proxy:go_default_library", diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 60468d6990c..6161f15d9fe 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -51,7 +51,6 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/proxy" @@ -353,9 +352,13 @@ with the apiserver API to configure the proxy.`, glog.Fatalf("failed OS init: %v", err) } - cmdutil.CheckErr(opts.Complete()) - cmdutil.CheckErr(opts.Validate(args)) - cmdutil.CheckErr(opts.Run()) + if err := opts.Complete(); err != nil { + glog.Fatalf("failed complete: %v", err) + } + if err := opts.Validate(args); err != nil { + glog.Fatalf("failed validate: %v", err) + } + glog.Fatal(opts.Run()) }, } From f86af075264194b2ddbce8e6a71095b65e404ed4 Mon Sep 17 00:00:00 2001 From: Weibin Lin Date: Thu, 17 May 2018 17:10:25 +0800 Subject: [PATCH 025/307] Update ipvs docs --- check the prerequisite --- pkg/proxy/ipvs/README.md | 45 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/pkg/proxy/ipvs/README.md b/pkg/proxy/ipvs/README.md index e98c525504d..cd6fc1153ba 100644 --- a/pkg/proxy/ipvs/README.md +++ b/pkg/proxy/ipvs/README.md @@ -200,8 +200,7 @@ DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 /* default/nginx-s Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags. ### Prerequisite -Ensure the following kernel modules required by IPVS-based kube-proxy have been compiled into the node kernel (use `lsmod` to check): - +Ensure IPVS required kernel modules ```shell ip_vs ip_vs_rr @@ -209,6 +208,46 @@ ip_vs_wrr ip_vs_sh nf_conntrack_ipv4 ``` +1. have been compiled into the node kernel. Use + +`grep -e ipvs -e nf_conntrack_ipv4 /lib/modules/$(uname -r)/modules.builtin` + +and get results like the followings if compiled into kernel. +``` +kernel/net/ipv4/netfilter/nf_conntrack_ipv4.ko +kernel/net/netfilter/ipvs/ip_vs.ko +kernel/net/netfilter/ipvs/ip_vs_rr.ko +kernel/net/netfilter/ipvs/ip_vs_wrr.ko +kernel/net/netfilter/ipvs/ip_vs_lc.ko +kernel/net/netfilter/ipvs/ip_vs_wlc.ko +kernel/net/netfilter/ipvs/ip_vs_fo.ko +kernel/net/netfilter/ipvs/ip_vs_ovf.ko +kernel/net/netfilter/ipvs/ip_vs_lblc.ko +kernel/net/netfilter/ipvs/ip_vs_lblcr.ko +kernel/net/netfilter/ipvs/ip_vs_dh.ko +kernel/net/netfilter/ipvs/ip_vs_sh.ko +kernel/net/netfilter/ipvs/ip_vs_sed.ko +kernel/net/netfilter/ipvs/ip_vs_nq.ko +kernel/net/netfilter/ipvs/ip_vs_ftp.ko +``` + +OR + +2. have been loaded. +```shell +# load module +modprobe -- ip_vs +modprobe -- ip_vs_rr +modprobe -- ip_vs_wrr +modprobe -- ip_vs_sh +modprobe -- nf_conntrack_ipv4 + +# to check loaded modules, use +lsmod | grep -e ipvs -e nf_conntrack_ipv4 +# or +cut -f1 -d " " /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4 + ``` + Packages such as `ipset` should also be installed on the node before using IPVS mode. Kube-proxy will fall back to IPTABLES mode if those requirements are not met. @@ -219,7 +258,7 @@ Kube-proxy will run in iptables mode by default in a [local-up cluster](https:// To use IPVS mode, users should export the env `KUBE_PROXY_MODE=ipvs` to specify the ipvs mode before [starting the cluster](https://github.com/kubernetes/community/blob/master/contributors/devel/running-locally.md#starting-the-cluster): ```shell -#before running `hack/local-up-cluster.sh` +# before running `hack/local-up-cluster.sh` export KUBE_PROXY_MODE=ipvs ``` From f66bafd12cc6d7503f529dc4f2dce1367c14c9a4 Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Thu, 17 May 2018 11:13:41 +0800 Subject: [PATCH 026/307] remove kube-proxy and kube-scheduler from pkg_kubectl_cmd_util_CONSUMES_BAD group --- build/visible_to/BUILD | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/build/visible_to/BUILD b/build/visible_to/BUILD index 752599ee075..693d8ea1833 100644 --- a/build/visible_to/BUILD +++ b/build/visible_to/BUILD @@ -200,22 +200,12 @@ package_group( ], ) -package_group( - name = "pkg_kubectl_cmd_util_CONSUMERS_BAD", - includes = [ - ":KUBEADM_BAD", - ], - packages = [ - "//cmd/kube-proxy/app", - ], -) - package_group( name = "pkg_kubectl_cmd_util_CONSUMERS", includes = [ ":COMMON_generators", ":COMMON_testing", - ":pkg_kubectl_cmd_util_CONSUMERS_BAD", + ":KUBEADM_BAD", ], packages = [ "//cmd/kubectl", From 2a8d258f667fc05a94775175096f9bbe6ee4c4d9 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 16 May 2018 15:48:32 +0800 Subject: [PATCH 027/307] [cloud-controller manager]get rid of GenericControllerManagerOptions sub-struct --- .../app/options/options.go | 163 +++++++- .../app/options/options_test.go | 364 +++++------------- cmd/controller-manager/app/options/generic.go | 16 + .../app/options/kubecloudshared.go | 16 + 4 files changed, 278 insertions(+), 281 deletions(-) diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index cfde5f3b7e6..433d9935164 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -18,24 +18,49 @@ package options import ( "fmt" - "time" + "net" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" + apiserveroptions "k8s.io/apiserver/pkg/server/options" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/componentconfig" + componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/master/ports" - // add the kubernetes feature gates _ "k8s.io/kubernetes/pkg/features" + "github.com/golang/glog" "github.com/spf13/pflag" ) // CloudControllerManagerOptions is the main context object for the controller manager. type CloudControllerManagerOptions struct { - Generic *cmoptions.GenericControllerManagerOptions + CloudProvider *cmoptions.CloudProviderOptions + Debugging *cmoptions.DebuggingOptions + GenericComponent *cmoptions.GenericComponentConfigOptions + KubeCloudShared *cmoptions.KubeCloudSharedOptions + ServiceController *cmoptions.ServiceControllerOptions + + SecureServing *apiserveroptions.SecureServingOptions + // TODO: remove insecure serving mode + InsecureServing *cmoptions.InsecureServingOptions + Authentication *apiserveroptions.DelegatingAuthenticationOptions + Authorization *apiserveroptions.DelegatingAuthorizationOptions + + Master string + Kubeconfig string // NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status NodeStatusUpdateFrequency metav1.Duration @@ -43,37 +68,125 @@ type CloudControllerManagerOptions struct { // NewCloudControllerManagerOptions creates a new ExternalCMServer with a default config. func NewCloudControllerManagerOptions() *CloudControllerManagerOptions { - componentConfig := cmoptions.NewDefaultControllerManagerComponentConfig(ports.InsecureCloudControllerManagerPort) + componentConfig := NewDefaultComponentConfig(ports.InsecureCloudControllerManagerPort) s := CloudControllerManagerOptions{ - // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. - // Please make common changes there and put anything cloud specific here. - Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig), - NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute}, + CloudProvider: &cmoptions.CloudProviderOptions{}, + Debugging: &cmoptions.DebuggingOptions{}, + GenericComponent: cmoptions.NewGenericComponentConfigOptions(componentConfig.GenericComponent), + KubeCloudShared: cmoptions.NewKubeCloudSharedOptions(componentConfig.KubeCloudShared), + ServiceController: &cmoptions.ServiceControllerOptions{ + ConcurrentServiceSyncs: componentConfig.ServiceController.ConcurrentServiceSyncs, + }, + SecureServing: apiserveroptions.NewSecureServingOptions(), + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP(componentConfig.KubeCloudShared.Address), + BindPort: int(componentConfig.KubeCloudShared.Port), + BindNetwork: "tcp", + }, + Authentication: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthenticationOptions() + Authorization: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthorizationOptions() + NodeStatusUpdateFrequency: componentConfig.NodeStatusUpdateFrequency, } - s.Generic.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes" - s.Generic.SecureServing.ServerCert.PairName = "cloud-controller-manager" + s.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes" + s.SecureServing.ServerCert.PairName = "cloud-controller-manager" + + // disable secure serving for now + // TODO: enable HTTPS by default + s.SecureServing.BindPort = 0 return &s } +// NewDefaultComponentConfig returns cloud-controller manager configuration object. +func NewDefaultComponentConfig(insecurePort int32) componentconfig.CloudControllerManagerConfiguration { + scheme := runtime.NewScheme() + componentconfigv1alpha1.AddToScheme(scheme) + componentconfig.AddToScheme(scheme) + + versioned := componentconfigv1alpha1.CloudControllerManagerConfiguration{} + scheme.Default(&versioned) + + internal := componentconfig.CloudControllerManagerConfiguration{} + scheme.Convert(&versioned, &internal, nil) + internal.KubeCloudShared.Port = insecurePort + return internal +} + // AddFlags adds flags for a specific ExternalCMServer to the specified FlagSet func (o *CloudControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { - o.Generic.AddFlags(fs) + o.CloudProvider.AddFlags(fs) + o.Debugging.AddFlags(fs) + o.GenericComponent.AddFlags(fs) + o.KubeCloudShared.AddFlags(fs) + o.ServiceController.AddFlags(fs) + o.SecureServing.AddFlags(fs) + o.InsecureServing.AddFlags(fs) + o.Authentication.AddFlags(fs) + o.Authorization.AddFlags(fs) + + fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") + fs.StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") fs.DurationVar(&o.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", o.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.") utilfeature.DefaultFeatureGate.AddFlag(fs) } // ApplyTo fills up cloud controller manager config with options. -func (o *CloudControllerManagerOptions) ApplyTo(c *cloudcontrollerconfig.Config) error { - if err := o.Generic.ApplyTo(&c.Generic, "cloud-controller-manager"); err != nil { +func (o *CloudControllerManagerOptions) ApplyTo(c *cloudcontrollerconfig.Config, userAgent string) error { + if err := o.CloudProvider.ApplyTo(&c.ComponentConfig.CloudProvider); err != nil { + return err + } + if err := o.Debugging.ApplyTo(&c.ComponentConfig.Debugging); err != nil { + return err + } + if err := o.GenericComponent.ApplyTo(&c.ComponentConfig.GenericComponent); err != nil { + return err + } + if err := o.KubeCloudShared.ApplyTo(&c.ComponentConfig.KubeCloudShared); err != nil { + return err + } + if err := o.ServiceController.ApplyTo(&c.ComponentConfig.ServiceController); err != nil { + return err + } + if err := o.SecureServing.ApplyTo(&c.SecureServing); err != nil { + return err + } + if err := o.InsecureServing.ApplyTo(&c.InsecureServing); err != nil { + return err + } + if err := o.Authentication.ApplyTo(&c.Authentication, c.SecureServing, nil); err != nil { + return err + } + if err := o.Authorization.ApplyTo(&c.Authorization); err != nil { return err } - c.Extra.NodeStatusUpdateFrequency = o.NodeStatusUpdateFrequency.Duration + // sync back to component config + // TODO: find more elegant way than synching back the values. + c.ComponentConfig.KubeCloudShared.Port = int32(o.InsecureServing.BindPort) + c.ComponentConfig.KubeCloudShared.Address = o.InsecureServing.BindAddress.String() + + var err error + c.Kubeconfig, err = clientcmd.BuildConfigFromFlags(o.Master, o.Kubeconfig) + if err != nil { + return err + } + c.Kubeconfig.ContentConfig.ContentType = o.GenericComponent.ContentType + c.Kubeconfig.QPS = o.GenericComponent.KubeAPIQPS + c.Kubeconfig.Burst = int(o.GenericComponent.KubeAPIBurst) + + c.Client, err = clientset.NewForConfig(restclient.AddUserAgent(c.Kubeconfig, userAgent)) + if err != nil { + return err + } + + c.LeaderElectionClient = clientset.NewForConfigOrDie(restclient.AddUserAgent(c.Kubeconfig, "leader-election")) + + c.EventRecorder = createRecorder(c.Client, userAgent) + c.ComponentConfig.NodeStatusUpdateFrequency = o.NodeStatusUpdateFrequency return nil } @@ -81,9 +194,18 @@ func (o *CloudControllerManagerOptions) ApplyTo(c *cloudcontrollerconfig.Config) // Validate is used to validate config before launching the cloud controller manager func (o *CloudControllerManagerOptions) Validate() error { errors := []error{} - errors = append(errors, o.Generic.Validate()...) - if len(o.Generic.CloudProvider.Name) == 0 { + errors = append(errors, o.CloudProvider.Validate()...) + errors = append(errors, o.Debugging.Validate()...) + errors = append(errors, o.GenericComponent.Validate()...) + errors = append(errors, o.KubeCloudShared.Validate()...) + errors = append(errors, o.ServiceController.Validate()...) + errors = append(errors, o.SecureServing.Validate()...) + errors = append(errors, o.InsecureServing.Validate()...) + errors = append(errors, o.Authentication.Validate()...) + errors = append(errors, o.Authorization.Validate()...) + + if len(o.CloudProvider.Name) == 0 { errors = append(errors, fmt.Errorf("--cloud-provider cannot be empty")) } @@ -97,9 +219,16 @@ func (o CloudControllerManagerOptions) Config() (*cloudcontrollerconfig.Config, } c := &cloudcontrollerconfig.Config{} - if err := o.ApplyTo(c); err != nil { + if err := o.ApplyTo(c, "cloud-controller-manager"); err != nil { return nil, err } return c, nil } + +func createRecorder(kubeClient kubernetes.Interface, userAgent string) record.EventRecorder { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) +} diff --git a/cmd/cloud-controller-manager/app/options/options_test.go b/cmd/cloud-controller-manager/app/options/options_test.go index 71c14092e9e..fe2f62f0b35 100644 --- a/cmd/cloud-controller-manager/app/options/options_test.go +++ b/cmd/cloud-controller-manager/app/options/options_test.go @@ -35,139 +35,57 @@ func TestDefaultFlags(t *testing.T) { s := NewCloudControllerManagerOptions() expected := &CloudControllerManagerOptions{ - Generic: &cmoptions.GenericControllerManagerOptions{ - CloudProvider: &cmoptions.CloudProviderOptions{ - Name: "", - CloudConfigFile: "", - }, - Debugging: &cmoptions.DebuggingOptions{ - EnableContentionProfiling: false, - }, - GenericComponent: &cmoptions.GenericComponentConfigOptions{ - MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, - ContentType: "application/vnd.kubernetes.protobuf", - KubeAPIQPS: 20.0, - KubeAPIBurst: 30, - ControllerStartInterval: metav1.Duration{Duration: 0}, - LeaderElection: componentconfig.LeaderElectionConfiguration{ - ResourceLock: "endpoints", - LeaderElect: true, - LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, - RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, - RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, - }, - }, - KubeCloudShared: &cmoptions.KubeCloudSharedOptions{ - Port: 10253, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config - Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config - RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, - NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, - ClusterName: "kubernetes", - ClusterCIDR: "", - AllocateNodeCIDRs: false, - CIDRAllocatorType: "", - ConfigureCloudRoutes: true, - }, - AttachDetachController: &cmoptions.AttachDetachControllerOptions{ - ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 1 * time.Minute}, - }, - CSRSigningController: &cmoptions.CSRSigningControllerOptions{ - ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem", - ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key", - ClusterSigningDuration: metav1.Duration{Duration: 8760 * time.Hour}, - }, - DaemonSetController: &cmoptions.DaemonSetControllerOptions{ - ConcurrentDaemonSetSyncs: 2, - }, - DeploymentController: &cmoptions.DeploymentControllerOptions{ - ConcurrentDeploymentSyncs: 5, - DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - }, - DeprecatedFlags: &cmoptions.DeprecatedControllerOptions{ - RegisterRetryCount: 10, - }, - EndPointController: &cmoptions.EndPointControllerOptions{ - ConcurrentEndpointSyncs: 5, - }, - GarbageCollectorController: &cmoptions.GarbageCollectorControllerOptions{ - EnableGarbageCollector: true, - ConcurrentGCSyncs: 20, - }, - HPAController: &cmoptions.HPAControllerOptions{ - HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, - HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, - HorizontalPodAutoscalerTolerance: 0.1, - HorizontalPodAutoscalerUseRESTClients: true, - }, - JobController: &cmoptions.JobControllerOptions{ - ConcurrentJobSyncs: 5, - }, - NamespaceController: &cmoptions.NamespaceControllerOptions{ - ConcurrentNamespaceSyncs: 10, - NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - }, - NodeIpamController: &cmoptions.NodeIpamControllerOptions{ - NodeCIDRMaskSize: 24, - }, - NodeLifecycleController: &cmoptions.NodeLifecycleControllerOptions{ - EnableTaintManager: true, - NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, - NodeStartupGracePeriod: metav1.Duration{Duration: 1 * time.Minute}, - PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, - }, - PersistentVolumeBinderController: &cmoptions.PersistentVolumeBinderControllerOptions{ - PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, - VolumeConfiguration: componentconfig.VolumeConfiguration{ - EnableDynamicProvisioning: true, - EnableHostPathProvisioning: false, - FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", - PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ - MaximumRetry: 3, - MinimumTimeoutNFS: 300, - IncrementTimeoutNFS: 30, - MinimumTimeoutHostPath: 60, - IncrementTimeoutHostPath: 30, - }, - }, - }, - PodGCController: &cmoptions.PodGCControllerOptions{ - TerminatedPodGCThreshold: 12500, - }, - ReplicaSetController: &cmoptions.ReplicaSetControllerOptions{ - ConcurrentRSSyncs: 5, - }, - ReplicationController: &cmoptions.ReplicationControllerOptions{ - ConcurrentRCSyncs: 5, - }, - ResourceQuotaController: &cmoptions.ResourceQuotaControllerOptions{ - ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - ConcurrentResourceQuotaSyncs: 5, - }, - SAController: &cmoptions.SAControllerOptions{ - ConcurrentSATokenSyncs: 5, - }, - ServiceController: &cmoptions.ServiceControllerOptions{ - ConcurrentServiceSyncs: 1, - }, - Controllers: []string{"*"}, - SecureServing: &apiserveroptions.SecureServingOptions{ - BindPort: 0, - BindAddress: net.ParseIP("0.0.0.0"), - ServerCert: apiserveroptions.GeneratableKeyCert{ - CertDirectory: "/var/run/kubernetes", - PairName: "cloud-controller-manager", - }, - HTTP2MaxStreamsPerConnection: 0, - }, - InsecureServing: &cmoptions.InsecureServingOptions{ - BindAddress: net.ParseIP("0.0.0.0"), - BindPort: int(10253), - BindNetwork: "tcp", - }, - Kubeconfig: "", - Master: "", + CloudProvider: &cmoptions.CloudProviderOptions{ + Name: "", + CloudConfigFile: "", }, + Debugging: &cmoptions.DebuggingOptions{ + EnableContentionProfiling: false, + }, + GenericComponent: &cmoptions.GenericComponentConfigOptions{ + MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour}, + ContentType: "application/vnd.kubernetes.protobuf", + KubeAPIQPS: 20.0, + KubeAPIBurst: 30, + ControllerStartInterval: metav1.Duration{Duration: 0}, + LeaderElection: componentconfig.LeaderElectionConfiguration{ + ResourceLock: "endpoints", + LeaderElect: true, + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + }, + }, + KubeCloudShared: &cmoptions.KubeCloudSharedOptions{ + Port: 10253, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second}, + NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, + ClusterName: "kubernetes", + ClusterCIDR: "", + AllocateNodeCIDRs: false, + CIDRAllocatorType: "", + ConfigureCloudRoutes: true, + }, + ServiceController: &cmoptions.ServiceControllerOptions{ + ConcurrentServiceSyncs: 1, + }, + SecureServing: &apiserveroptions.SecureServingOptions{ + BindPort: 0, + BindAddress: net.ParseIP("0.0.0.0"), + ServerCert: apiserveroptions.GeneratableKeyCert{ + CertDirectory: "/var/run/kubernetes", + PairName: "cloud-controller-manager", + }, + HTTP2MaxStreamsPerConnection: 0, + }, + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP("0.0.0.0"), + BindPort: int(10253), + BindNetwork: "tcp", + }, + Kubeconfig: "", + Master: "", NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute}, } if !reflect.DeepEqual(expected, s) { @@ -216,139 +134,57 @@ func TestAddFlags(t *testing.T) { f.Parse(args) expected := &CloudControllerManagerOptions{ - Generic: &cmoptions.GenericControllerManagerOptions{ - CloudProvider: &cmoptions.CloudProviderOptions{ - Name: "gce", - CloudConfigFile: "/cloud-config", - }, - Debugging: &cmoptions.DebuggingOptions{ - EnableContentionProfiling: true, - }, - GenericComponent: &cmoptions.GenericComponentConfigOptions{ - MinResyncPeriod: metav1.Duration{Duration: 100 * time.Minute}, - ContentType: "application/vnd.kubernetes.protobuf", - KubeAPIQPS: 50.0, - KubeAPIBurst: 100, - ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, - LeaderElection: componentconfig.LeaderElectionConfiguration{ - ResourceLock: "configmap", - LeaderElect: false, - LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, - RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, - RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, - }, - KubeCloudShared: &cmoptions.KubeCloudSharedOptions{ - Port: 10253, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config - Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config - RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, - NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, - ClusterName: "k8s", - ClusterCIDR: "1.2.3.4/24", - AllocateNodeCIDRs: true, - CIDRAllocatorType: "RangeAllocator", - ConfigureCloudRoutes: false, - }, - AttachDetachController: &cmoptions.AttachDetachControllerOptions{ - ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 1 * time.Minute}, - }, - CSRSigningController: &cmoptions.CSRSigningControllerOptions{ - ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem", - ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key", - ClusterSigningDuration: metav1.Duration{Duration: 8760 * time.Hour}, - }, - DaemonSetController: &cmoptions.DaemonSetControllerOptions{ - ConcurrentDaemonSetSyncs: 2, - }, - DeploymentController: &cmoptions.DeploymentControllerOptions{ - ConcurrentDeploymentSyncs: 5, - DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - }, - DeprecatedFlags: &cmoptions.DeprecatedControllerOptions{ - RegisterRetryCount: 10, - }, - EndPointController: &cmoptions.EndPointControllerOptions{ - ConcurrentEndpointSyncs: 5, - }, - GarbageCollectorController: &cmoptions.GarbageCollectorControllerOptions{ - ConcurrentGCSyncs: 20, - EnableGarbageCollector: true, - }, - HPAController: &cmoptions.HPAControllerOptions{ - HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute}, - HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute}, - HorizontalPodAutoscalerTolerance: 0.1, - HorizontalPodAutoscalerUseRESTClients: true, - }, - JobController: &cmoptions.JobControllerOptions{ - ConcurrentJobSyncs: 5, - }, - NamespaceController: &cmoptions.NamespaceControllerOptions{ - NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - ConcurrentNamespaceSyncs: 10, - }, - NodeIpamController: &cmoptions.NodeIpamControllerOptions{ - NodeCIDRMaskSize: 24, - }, - NodeLifecycleController: &cmoptions.NodeLifecycleControllerOptions{ - EnableTaintManager: true, - NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second}, - NodeStartupGracePeriod: metav1.Duration{Duration: 1 * time.Minute}, - PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute}, - }, - PersistentVolumeBinderController: &cmoptions.PersistentVolumeBinderControllerOptions{ - PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second}, - VolumeConfiguration: componentconfig.VolumeConfiguration{ - EnableDynamicProvisioning: true, - EnableHostPathProvisioning: false, - FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", - PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ - MaximumRetry: 3, - MinimumTimeoutNFS: 300, - IncrementTimeoutNFS: 30, - MinimumTimeoutHostPath: 60, - IncrementTimeoutHostPath: 30, - }, - }, - }, - PodGCController: &cmoptions.PodGCControllerOptions{ - TerminatedPodGCThreshold: 12500, - }, - ReplicaSetController: &cmoptions.ReplicaSetControllerOptions{ - ConcurrentRSSyncs: 5, - }, - ReplicationController: &cmoptions.ReplicationControllerOptions{ - ConcurrentRCSyncs: 5, - }, - ResourceQuotaController: &cmoptions.ResourceQuotaControllerOptions{ - ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute}, - ConcurrentResourceQuotaSyncs: 5, - }, - SAController: &cmoptions.SAControllerOptions{ - ConcurrentSATokenSyncs: 5, - }, - ServiceController: &cmoptions.ServiceControllerOptions{ - ConcurrentServiceSyncs: 1, - }, - Controllers: []string{"*"}, - SecureServing: &apiserveroptions.SecureServingOptions{ - BindPort: 10001, - BindAddress: net.ParseIP("192.168.4.21"), - ServerCert: apiserveroptions.GeneratableKeyCert{ - CertDirectory: "/a/b/c", - PairName: "cloud-controller-manager", - }, - HTTP2MaxStreamsPerConnection: 47, - }, - InsecureServing: &cmoptions.InsecureServingOptions{ - BindAddress: net.ParseIP("192.168.4.10"), - BindPort: int(10000), - BindNetwork: "tcp", - }, - Kubeconfig: "/kubeconfig", - Master: "192.168.4.20", + CloudProvider: &cmoptions.CloudProviderOptions{ + Name: "gce", + CloudConfigFile: "/cloud-config", }, + Debugging: &cmoptions.DebuggingOptions{ + EnableContentionProfiling: true, + }, + GenericComponent: &cmoptions.GenericComponentConfigOptions{ + MinResyncPeriod: metav1.Duration{Duration: 100 * time.Minute}, + ContentType: "application/vnd.kubernetes.protobuf", + KubeAPIQPS: 50.0, + KubeAPIBurst: 100, + ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, + LeaderElection: componentconfig.LeaderElectionConfiguration{ + ResourceLock: "configmap", + LeaderElect: false, + LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + }, + KubeCloudShared: &cmoptions.KubeCloudSharedOptions{ + Port: 10253, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, + NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second}, + ClusterName: "k8s", + ClusterCIDR: "1.2.3.4/24", + AllocateNodeCIDRs: true, + CIDRAllocatorType: "RangeAllocator", + ConfigureCloudRoutes: false, + }, + ServiceController: &cmoptions.ServiceControllerOptions{ + ConcurrentServiceSyncs: 1, + }, + SecureServing: &apiserveroptions.SecureServingOptions{ + BindPort: 10001, + BindAddress: net.ParseIP("192.168.4.21"), + ServerCert: apiserveroptions.GeneratableKeyCert{ + CertDirectory: "/a/b/c", + PairName: "cloud-controller-manager", + }, + HTTP2MaxStreamsPerConnection: 47, + }, + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP("192.168.4.10"), + BindPort: int(10000), + BindNetwork: "tcp", + }, + Kubeconfig: "/kubeconfig", + Master: "192.168.4.20", NodeStatusUpdateFrequency: metav1.Duration{Duration: 10 * time.Minute}, } if !reflect.DeepEqual(expected, s) { diff --git a/cmd/controller-manager/app/options/generic.go b/cmd/controller-manager/app/options/generic.go index df9b19f8a39..d32b975691a 100644 --- a/cmd/controller-manager/app/options/generic.go +++ b/cmd/controller-manager/app/options/generic.go @@ -33,6 +33,22 @@ type GenericComponentConfigOptions struct { LeaderElection componentconfig.LeaderElectionConfiguration } +// NewGenericComponentConfigOptions returns generic configuration default values for both +// the kube-controller-manager and the cloud-contoller-manager. Any common changes should +// be made here. Any individual changes should be made in that controller. +func NewGenericComponentConfigOptions(cfg componentconfig.GenericComponentConfiguration) *GenericComponentConfigOptions { + o := &GenericComponentConfigOptions{ + MinResyncPeriod: cfg.MinResyncPeriod, + ContentType: cfg.ContentType, + KubeAPIQPS: cfg.KubeAPIQPS, + KubeAPIBurst: cfg.KubeAPIBurst, + ControllerStartInterval: cfg.ControllerStartInterval, + LeaderElection: cfg.LeaderElection, + } + + return o +} + // AddFlags adds flags related to generic for controller manager to the specified FlagSet. func (o *GenericComponentConfigOptions) AddFlags(fs *pflag.FlagSet) { if o == nil { diff --git a/cmd/controller-manager/app/options/kubecloudshared.go b/cmd/controller-manager/app/options/kubecloudshared.go index 823500b3644..d9607bf54b5 100644 --- a/cmd/controller-manager/app/options/kubecloudshared.go +++ b/cmd/controller-manager/app/options/kubecloudshared.go @@ -40,6 +40,22 @@ type KubeCloudSharedOptions struct { NodeSyncPeriod metav1.Duration } +// NewKubeCloudSharedOptions returns common/default configuration values for both +// the kube-controller-manager and the cloud-contoller-manager. Any common changes should +// be made here. Any individual changes should be made in that controller. +func NewKubeCloudSharedOptions(cfg componentconfig.KubeCloudSharedConfiguration) *KubeCloudSharedOptions { + o := &KubeCloudSharedOptions{ + Port: cfg.Port, + Address: cfg.Address, + RouteReconciliationPeriod: cfg.RouteReconciliationPeriod, + NodeMonitorPeriod: cfg.NodeMonitorPeriod, + ClusterName: cfg.ClusterName, + ConfigureCloudRoutes: cfg.ConfigureCloudRoutes, + } + + return o +} + // AddFlags adds flags related to shared variable for controller manager to the specified FlagSet. func (o *KubeCloudSharedOptions) AddFlags(fs *pflag.FlagSet) { if o == nil { From bbb48fd0686cb2d964d1f2425f45d314b1133d6e Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 16 May 2018 16:16:31 +0800 Subject: [PATCH 028/307] [kube-controller manager]get rid of GenericControllerManagerOptions sub-struct --- .../app/options/csrsigningcontroller.go | 10 + cmd/controller-manager/app/options/options.go | 383 ------------------ .../app/options/options.go | 373 +++++++++++++++-- .../app/options/options_test.go | 310 +++++++------- 4 files changed, 500 insertions(+), 576 deletions(-) delete mode 100644 cmd/controller-manager/app/options/options.go diff --git a/cmd/controller-manager/app/options/csrsigningcontroller.go b/cmd/controller-manager/app/options/csrsigningcontroller.go index f307c503f71..bf55c9df89b 100644 --- a/cmd/controller-manager/app/options/csrsigningcontroller.go +++ b/cmd/controller-manager/app/options/csrsigningcontroller.go @@ -22,6 +22,16 @@ import ( "k8s.io/kubernetes/pkg/apis/componentconfig" ) +const ( + // These defaults are deprecated and exported so that we can warn if + // they are being used. + + // DefaultClusterSigningCertFile is deprecated. Do not use. + DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" + // DefaultClusterSigningKeyFile is deprecated. Do not use. + DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" +) + // CSRSigningControllerOptions holds the CSRSigningController options. type CSRSigningControllerOptions struct { ClusterSigningDuration metav1.Duration diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go deleted file mode 100644 index 567a795b87b..00000000000 --- a/cmd/controller-manager/app/options/options.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "net" - - "github.com/golang/glog" - - "github.com/spf13/pflag" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - apiserveroptions "k8s.io/apiserver/pkg/server/options" - "k8s.io/client-go/kubernetes" - clientset "k8s.io/client-go/kubernetes" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/record" - genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/componentconfig" - componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" -) - -// GenericControllerManagerOptions is the common structure for a controller manager. It works with NewGenericControllerManagerOptions -// and AddDefaultControllerFlags to create the common components of kube-controller-manager and cloud-controller-manager. -type GenericControllerManagerOptions struct { - CloudProvider *CloudProviderOptions - Debugging *DebuggingOptions - GenericComponent *GenericComponentConfigOptions - KubeCloudShared *KubeCloudSharedOptions - - AttachDetachController *AttachDetachControllerOptions - CSRSigningController *CSRSigningControllerOptions - DaemonSetController *DaemonSetControllerOptions - DeploymentController *DeploymentControllerOptions - DeprecatedFlags *DeprecatedControllerOptions - EndPointController *EndPointControllerOptions - GarbageCollectorController *GarbageCollectorControllerOptions - HPAController *HPAControllerOptions - JobController *JobControllerOptions - NamespaceController *NamespaceControllerOptions - NodeIpamController *NodeIpamControllerOptions - NodeLifecycleController *NodeLifecycleControllerOptions - PersistentVolumeBinderController *PersistentVolumeBinderControllerOptions - PodGCController *PodGCControllerOptions - ReplicaSetController *ReplicaSetControllerOptions - ReplicationController *ReplicationControllerOptions - ResourceQuotaController *ResourceQuotaControllerOptions - SAController *SAControllerOptions - ServiceController *ServiceControllerOptions - - Controllers []string - ExternalCloudVolumePlugin string - - SecureServing *apiserveroptions.SecureServingOptions - // TODO: remove insecure serving mode - InsecureServing *InsecureServingOptions - Authentication *apiserveroptions.DelegatingAuthenticationOptions - Authorization *apiserveroptions.DelegatingAuthorizationOptions - - Master string - Kubeconfig string -} - -const ( - // These defaults are deprecated and exported so that we can warn if - // they are being used. - - // DefaultClusterSigningCertFile is deprecated. Do not use. - DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem" - // DefaultClusterSigningKeyFile is deprecated. Do not use. - DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key" -) - -// NewGenericControllerManagerOptions returns common/default configuration values for both -// the kube-controller-manager and the cloud-contoller-manager. Any common changes should -// be made here. Any individual changes should be made in that controller. -func NewGenericControllerManagerOptions(componentConfig componentconfig.KubeControllerManagerConfiguration) *GenericControllerManagerOptions { - o := &GenericControllerManagerOptions{ - CloudProvider: &CloudProviderOptions{}, - Debugging: &DebuggingOptions{}, - GenericComponent: &GenericComponentConfigOptions{ - MinResyncPeriod: componentConfig.GenericComponent.MinResyncPeriod, - ContentType: componentConfig.GenericComponent.ContentType, - KubeAPIQPS: componentConfig.GenericComponent.KubeAPIQPS, - KubeAPIBurst: componentConfig.GenericComponent.KubeAPIBurst, - ControllerStartInterval: componentConfig.GenericComponent.ControllerStartInterval, - LeaderElection: componentConfig.GenericComponent.LeaderElection, - }, - KubeCloudShared: &KubeCloudSharedOptions{ - Port: componentConfig.KubeCloudShared.Port, - Address: componentConfig.KubeCloudShared.Address, - RouteReconciliationPeriod: componentConfig.KubeCloudShared.RouteReconciliationPeriod, - NodeMonitorPeriod: componentConfig.KubeCloudShared.NodeMonitorPeriod, - ClusterName: componentConfig.KubeCloudShared.ClusterName, - ConfigureCloudRoutes: componentConfig.KubeCloudShared.ConfigureCloudRoutes, - }, - AttachDetachController: &AttachDetachControllerOptions{ - ReconcilerSyncLoopPeriod: componentConfig.AttachDetachController.ReconcilerSyncLoopPeriod, - }, - CSRSigningController: &CSRSigningControllerOptions{ - ClusterSigningCertFile: componentConfig.CSRSigningController.ClusterSigningCertFile, - ClusterSigningKeyFile: componentConfig.CSRSigningController.ClusterSigningKeyFile, - ClusterSigningDuration: componentConfig.CSRSigningController.ClusterSigningDuration, - }, - DaemonSetController: &DaemonSetControllerOptions{ - ConcurrentDaemonSetSyncs: componentConfig.DaemonSetController.ConcurrentDaemonSetSyncs, - }, - DeploymentController: &DeploymentControllerOptions{ - ConcurrentDeploymentSyncs: componentConfig.DeploymentController.ConcurrentDeploymentSyncs, - DeploymentControllerSyncPeriod: componentConfig.DeploymentController.DeploymentControllerSyncPeriod, - }, - DeprecatedFlags: &DeprecatedControllerOptions{ - RegisterRetryCount: componentConfig.DeprecatedController.RegisterRetryCount, - }, - EndPointController: &EndPointControllerOptions{ - ConcurrentEndpointSyncs: componentConfig.EndPointController.ConcurrentEndpointSyncs, - }, - GarbageCollectorController: &GarbageCollectorControllerOptions{ - ConcurrentGCSyncs: componentConfig.GarbageCollectorController.ConcurrentGCSyncs, - EnableGarbageCollector: componentConfig.GarbageCollectorController.EnableGarbageCollector, - }, - HPAController: &HPAControllerOptions{ - HorizontalPodAutoscalerSyncPeriod: componentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod, - HorizontalPodAutoscalerUpscaleForbiddenWindow: componentConfig.HPAController.HorizontalPodAutoscalerUpscaleForbiddenWindow, - HorizontalPodAutoscalerDownscaleForbiddenWindow: componentConfig.HPAController.HorizontalPodAutoscalerDownscaleForbiddenWindow, - HorizontalPodAutoscalerTolerance: componentConfig.HPAController.HorizontalPodAutoscalerTolerance, - HorizontalPodAutoscalerUseRESTClients: componentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients, - }, - JobController: &JobControllerOptions{ - ConcurrentJobSyncs: componentConfig.JobController.ConcurrentJobSyncs, - }, - NamespaceController: &NamespaceControllerOptions{ - NamespaceSyncPeriod: componentConfig.NamespaceController.NamespaceSyncPeriod, - ConcurrentNamespaceSyncs: componentConfig.NamespaceController.ConcurrentNamespaceSyncs, - }, - NodeIpamController: &NodeIpamControllerOptions{ - NodeCIDRMaskSize: componentConfig.NodeIpamController.NodeCIDRMaskSize, - }, - NodeLifecycleController: &NodeLifecycleControllerOptions{ - EnableTaintManager: componentConfig.NodeLifecycleController.EnableTaintManager, - NodeMonitorGracePeriod: componentConfig.NodeLifecycleController.NodeMonitorGracePeriod, - NodeStartupGracePeriod: componentConfig.NodeLifecycleController.NodeStartupGracePeriod, - PodEvictionTimeout: componentConfig.NodeLifecycleController.PodEvictionTimeout, - }, - PersistentVolumeBinderController: &PersistentVolumeBinderControllerOptions{ - PVClaimBinderSyncPeriod: componentConfig.PersistentVolumeBinderController.PVClaimBinderSyncPeriod, - VolumeConfiguration: componentConfig.PersistentVolumeBinderController.VolumeConfiguration, - }, - PodGCController: &PodGCControllerOptions{ - TerminatedPodGCThreshold: componentConfig.PodGCController.TerminatedPodGCThreshold, - }, - ReplicaSetController: &ReplicaSetControllerOptions{ - ConcurrentRSSyncs: componentConfig.ReplicaSetController.ConcurrentRSSyncs, - }, - ReplicationController: &ReplicationControllerOptions{ - ConcurrentRCSyncs: componentConfig.ReplicationController.ConcurrentRCSyncs, - }, - ResourceQuotaController: &ResourceQuotaControllerOptions{ - ResourceQuotaSyncPeriod: componentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod, - ConcurrentResourceQuotaSyncs: componentConfig.ResourceQuotaController.ConcurrentResourceQuotaSyncs, - }, - SAController: &SAControllerOptions{ - ConcurrentSATokenSyncs: componentConfig.SAController.ConcurrentSATokenSyncs, - }, - ServiceController: &ServiceControllerOptions{ - ConcurrentServiceSyncs: componentConfig.ServiceController.ConcurrentServiceSyncs, - }, - Controllers: componentConfig.Controllers, - SecureServing: apiserveroptions.NewSecureServingOptions(), - InsecureServing: &InsecureServingOptions{ - BindAddress: net.ParseIP(componentConfig.KubeCloudShared.Address), - BindPort: int(componentConfig.KubeCloudShared.Port), - BindNetwork: "tcp", - }, - Authentication: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthenticationOptions() - Authorization: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthorizationOptions() - } - - // disable secure serving for now - // TODO: enable HTTPS by default - o.SecureServing.BindPort = 0 - - return o -} - -// NewDefaultControllerManagerComponentConfig returns default kube-controller manager configuration object. -func NewDefaultControllerManagerComponentConfig(insecurePort int32) componentconfig.KubeControllerManagerConfiguration { - scheme := runtime.NewScheme() - componentconfigv1alpha1.AddToScheme(scheme) - versioned := componentconfigv1alpha1.KubeControllerManagerConfiguration{} - scheme.Default(&versioned) - internal := componentconfig.KubeControllerManagerConfiguration{} - scheme.Convert(&versioned, &internal, nil) - internal.KubeCloudShared.Port = insecurePort - return internal -} - -// AddFlags adds common/default flags for both the kube and cloud Controller Manager Server to the -// specified FlagSet. Any common changes should be made here. Any individual changes should be made in that controller. -func (o *GenericControllerManagerOptions) AddFlags(fs *pflag.FlagSet) { - - fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") - fs.StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") - o.CloudProvider.AddFlags(fs) - o.Debugging.AddFlags(fs) - o.GenericComponent.AddFlags(fs) - o.KubeCloudShared.AddFlags(fs) - o.ServiceController.AddFlags(fs) - o.SecureServing.AddFlags(fs) - o.InsecureServing.AddFlags(fs) - o.Authentication.AddFlags(fs) - o.Authorization.AddFlags(fs) -} - -// ApplyTo fills up controller manager config with options and userAgent -func (o *GenericControllerManagerOptions) ApplyTo(c *genericcontrollermanager.Config, userAgent string) error { - if err := o.CloudProvider.ApplyTo(&c.ComponentConfig.CloudProvider); err != nil { - return err - } - if err := o.Debugging.ApplyTo(&c.ComponentConfig.Debugging); err != nil { - return err - } - if err := o.GenericComponent.ApplyTo(&c.ComponentConfig.GenericComponent); err != nil { - return err - } - if err := o.KubeCloudShared.ApplyTo(&c.ComponentConfig.KubeCloudShared); err != nil { - return err - } - if err := o.AttachDetachController.ApplyTo(&c.ComponentConfig.AttachDetachController); err != nil { - return err - } - if err := o.CSRSigningController.ApplyTo(&c.ComponentConfig.CSRSigningController); err != nil { - return err - } - if err := o.DaemonSetController.ApplyTo(&c.ComponentConfig.DaemonSetController); err != nil { - return err - } - if err := o.DeploymentController.ApplyTo(&c.ComponentConfig.DeploymentController); err != nil { - return err - } - if err := o.DeprecatedFlags.ApplyTo(&c.ComponentConfig.DeprecatedController); err != nil { - return err - } - if err := o.EndPointController.ApplyTo(&c.ComponentConfig.EndPointController); err != nil { - return err - } - if err := o.GarbageCollectorController.ApplyTo(&c.ComponentConfig.GarbageCollectorController); err != nil { - return err - } - if err := o.HPAController.ApplyTo(&c.ComponentConfig.HPAController); err != nil { - return err - } - if err := o.JobController.ApplyTo(&c.ComponentConfig.JobController); err != nil { - return err - } - if err := o.NamespaceController.ApplyTo(&c.ComponentConfig.NamespaceController); err != nil { - return err - } - if err := o.NodeIpamController.ApplyTo(&c.ComponentConfig.NodeIpamController); err != nil { - return err - } - if err := o.NodeLifecycleController.ApplyTo(&c.ComponentConfig.NodeLifecycleController); err != nil { - return err - } - if err := o.PersistentVolumeBinderController.ApplyTo(&c.ComponentConfig.PersistentVolumeBinderController); err != nil { - return err - } - if err := o.PodGCController.ApplyTo(&c.ComponentConfig.PodGCController); err != nil { - return err - } - if err := o.ReplicaSetController.ApplyTo(&c.ComponentConfig.ReplicaSetController); err != nil { - return err - } - if err := o.ReplicationController.ApplyTo(&c.ComponentConfig.ReplicationController); err != nil { - return err - } - if err := o.ResourceQuotaController.ApplyTo(&c.ComponentConfig.ResourceQuotaController); err != nil { - return err - } - if err := o.SAController.ApplyTo(&c.ComponentConfig.SAController); err != nil { - return err - } - if err := o.ServiceController.ApplyTo(&c.ComponentConfig.ServiceController); err != nil { - return err - } - if err := o.SecureServing.ApplyTo(&c.SecureServing); err != nil { - return err - } - if err := o.InsecureServing.ApplyTo(&c.InsecureServing); err != nil { - return err - } - if err := o.Authentication.ApplyTo(&c.Authentication, c.SecureServing, nil); err != nil { - return err - } - if err := o.Authorization.ApplyTo(&c.Authorization); err != nil { - return err - } - - // sync back to component config - // TODO: find more elegant way than synching back the values. - c.ComponentConfig.KubeCloudShared.Port = int32(o.InsecureServing.BindPort) - c.ComponentConfig.KubeCloudShared.Address = o.InsecureServing.BindAddress.String() - - var err error - c.Kubeconfig, err = clientcmd.BuildConfigFromFlags(o.Master, o.Kubeconfig) - if err != nil { - return err - } - c.Kubeconfig.ContentConfig.ContentType = o.GenericComponent.ContentType - c.Kubeconfig.QPS = o.GenericComponent.KubeAPIQPS - c.Kubeconfig.Burst = int(o.GenericComponent.KubeAPIBurst) - - c.Client, err = clientset.NewForConfig(restclient.AddUserAgent(c.Kubeconfig, userAgent)) - if err != nil { - return err - } - - c.LeaderElectionClient = clientset.NewForConfigOrDie(restclient.AddUserAgent(c.Kubeconfig, "leader-election")) - - c.EventRecorder = createRecorder(c.Client, userAgent) - - return nil -} - -// Validate checks GenericControllerManagerOptions and return a slice of found errors. -func (o *GenericControllerManagerOptions) Validate() []error { - errors := []error{} - errors = append(errors, o.CloudProvider.Validate()...) - errors = append(errors, o.Debugging.Validate()...) - errors = append(errors, o.GenericComponent.Validate()...) - errors = append(errors, o.KubeCloudShared.Validate()...) - errors = append(errors, o.AttachDetachController.Validate()...) - errors = append(errors, o.CSRSigningController.Validate()...) - errors = append(errors, o.DaemonSetController.Validate()...) - errors = append(errors, o.DeploymentController.Validate()...) - errors = append(errors, o.DeprecatedFlags.Validate()...) - errors = append(errors, o.EndPointController.Validate()...) - errors = append(errors, o.GarbageCollectorController.Validate()...) - errors = append(errors, o.HPAController.Validate()...) - errors = append(errors, o.JobController.Validate()...) - errors = append(errors, o.NamespaceController.Validate()...) - errors = append(errors, o.NodeIpamController.Validate()...) - errors = append(errors, o.NodeLifecycleController.Validate()...) - errors = append(errors, o.PersistentVolumeBinderController.Validate()...) - errors = append(errors, o.PodGCController.Validate()...) - errors = append(errors, o.ReplicaSetController.Validate()...) - errors = append(errors, o.ReplicationController.Validate()...) - errors = append(errors, o.ResourceQuotaController.Validate()...) - errors = append(errors, o.SAController.Validate()...) - errors = append(errors, o.ServiceController.Validate()...) - errors = append(errors, o.SecureServing.Validate()...) - errors = append(errors, o.InsecureServing.Validate()...) - errors = append(errors, o.Authentication.Validate()...) - errors = append(errors, o.Authorization.Validate()...) - - // TODO: validate component config, master and kubeconfig - - return errors -} - -func createRecorder(kubeClient kubernetes.Interface, userAgent string) record.EventRecorder { - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) - return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) -} diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index b4bb98a79cb..2841dba49e4 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -20,77 +20,236 @@ package options import ( "fmt" + "net" "strings" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + apiserveroptions "k8s.io/apiserver/pkg/server/options" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options" kubecontrollerconfig "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" + "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/componentconfig" + componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" "k8s.io/kubernetes/pkg/controller/garbagecollector" "k8s.io/kubernetes/pkg/master/ports" - // add the kubernetes feature gates _ "k8s.io/kubernetes/pkg/features" + "github.com/golang/glog" "github.com/spf13/pflag" ) -// KubeControllerManagerOptions is the main context object for the controller manager. +// KubeControllerManagerOptions is the main context object for the kube-controller manager. type KubeControllerManagerOptions struct { - Generic *cmoptions.GenericControllerManagerOptions + CloudProvider *cmoptions.CloudProviderOptions + Debugging *cmoptions.DebuggingOptions + GenericComponent *cmoptions.GenericComponentConfigOptions + KubeCloudShared *cmoptions.KubeCloudSharedOptions + + AttachDetachController *cmoptions.AttachDetachControllerOptions + CSRSigningController *cmoptions.CSRSigningControllerOptions + DaemonSetController *cmoptions.DaemonSetControllerOptions + DeploymentController *cmoptions.DeploymentControllerOptions + DeprecatedFlags *cmoptions.DeprecatedControllerOptions + EndPointController *cmoptions.EndPointControllerOptions + GarbageCollectorController *cmoptions.GarbageCollectorControllerOptions + HPAController *cmoptions.HPAControllerOptions + JobController *cmoptions.JobControllerOptions + NamespaceController *cmoptions.NamespaceControllerOptions + NodeIpamController *cmoptions.NodeIpamControllerOptions + NodeLifecycleController *cmoptions.NodeLifecycleControllerOptions + PersistentVolumeBinderController *cmoptions.PersistentVolumeBinderControllerOptions + PodGCController *cmoptions.PodGCControllerOptions + ReplicaSetController *cmoptions.ReplicaSetControllerOptions + ReplicationController *cmoptions.ReplicationControllerOptions + ResourceQuotaController *cmoptions.ResourceQuotaControllerOptions + SAController *cmoptions.SAControllerOptions + ServiceController *cmoptions.ServiceControllerOptions + + Controllers []string + ExternalCloudVolumePlugin string + + SecureServing *apiserveroptions.SecureServingOptions + // TODO: remove insecure serving mode + InsecureServing *cmoptions.InsecureServingOptions + Authentication *apiserveroptions.DelegatingAuthenticationOptions + Authorization *apiserveroptions.DelegatingAuthorizationOptions + + Master string + Kubeconfig string } // NewKubeControllerManagerOptions creates a new KubeControllerManagerOptions with a default config. func NewKubeControllerManagerOptions() *KubeControllerManagerOptions { - componentConfig := cmoptions.NewDefaultControllerManagerComponentConfig(ports.InsecureKubeControllerManagerPort) + componentConfig := NewDefaultComponentConfig(ports.InsecureKubeControllerManagerPort) s := KubeControllerManagerOptions{ - // The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'. - // Please make common changes there but put anything kube-controller specific here. - Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig), + CloudProvider: &cmoptions.CloudProviderOptions{}, + Debugging: &cmoptions.DebuggingOptions{}, + GenericComponent: cmoptions.NewGenericComponentConfigOptions(componentConfig.GenericComponent), + KubeCloudShared: cmoptions.NewKubeCloudSharedOptions(componentConfig.KubeCloudShared), + AttachDetachController: &cmoptions.AttachDetachControllerOptions{ + ReconcilerSyncLoopPeriod: componentConfig.AttachDetachController.ReconcilerSyncLoopPeriod, + }, + CSRSigningController: &cmoptions.CSRSigningControllerOptions{ + ClusterSigningCertFile: componentConfig.CSRSigningController.ClusterSigningCertFile, + ClusterSigningKeyFile: componentConfig.CSRSigningController.ClusterSigningKeyFile, + ClusterSigningDuration: componentConfig.CSRSigningController.ClusterSigningDuration, + }, + DaemonSetController: &cmoptions.DaemonSetControllerOptions{ + ConcurrentDaemonSetSyncs: componentConfig.DaemonSetController.ConcurrentDaemonSetSyncs, + }, + DeploymentController: &cmoptions.DeploymentControllerOptions{ + ConcurrentDeploymentSyncs: componentConfig.DeploymentController.ConcurrentDeploymentSyncs, + DeploymentControllerSyncPeriod: componentConfig.DeploymentController.DeploymentControllerSyncPeriod, + }, + DeprecatedFlags: &cmoptions.DeprecatedControllerOptions{ + RegisterRetryCount: componentConfig.DeprecatedController.RegisterRetryCount, + }, + EndPointController: &cmoptions.EndPointControllerOptions{ + ConcurrentEndpointSyncs: componentConfig.EndPointController.ConcurrentEndpointSyncs, + }, + GarbageCollectorController: &cmoptions.GarbageCollectorControllerOptions{ + ConcurrentGCSyncs: componentConfig.GarbageCollectorController.ConcurrentGCSyncs, + EnableGarbageCollector: componentConfig.GarbageCollectorController.EnableGarbageCollector, + }, + HPAController: &cmoptions.HPAControllerOptions{ + HorizontalPodAutoscalerSyncPeriod: componentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod, + HorizontalPodAutoscalerUpscaleForbiddenWindow: componentConfig.HPAController.HorizontalPodAutoscalerUpscaleForbiddenWindow, + HorizontalPodAutoscalerDownscaleForbiddenWindow: componentConfig.HPAController.HorizontalPodAutoscalerDownscaleForbiddenWindow, + HorizontalPodAutoscalerTolerance: componentConfig.HPAController.HorizontalPodAutoscalerTolerance, + HorizontalPodAutoscalerUseRESTClients: componentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients, + }, + JobController: &cmoptions.JobControllerOptions{ + ConcurrentJobSyncs: componentConfig.JobController.ConcurrentJobSyncs, + }, + NamespaceController: &cmoptions.NamespaceControllerOptions{ + NamespaceSyncPeriod: componentConfig.NamespaceController.NamespaceSyncPeriod, + ConcurrentNamespaceSyncs: componentConfig.NamespaceController.ConcurrentNamespaceSyncs, + }, + NodeIpamController: &cmoptions.NodeIpamControllerOptions{ + NodeCIDRMaskSize: componentConfig.NodeIpamController.NodeCIDRMaskSize, + }, + NodeLifecycleController: &cmoptions.NodeLifecycleControllerOptions{ + EnableTaintManager: componentConfig.NodeLifecycleController.EnableTaintManager, + NodeMonitorGracePeriod: componentConfig.NodeLifecycleController.NodeMonitorGracePeriod, + NodeStartupGracePeriod: componentConfig.NodeLifecycleController.NodeStartupGracePeriod, + PodEvictionTimeout: componentConfig.NodeLifecycleController.PodEvictionTimeout, + }, + PersistentVolumeBinderController: &cmoptions.PersistentVolumeBinderControllerOptions{ + PVClaimBinderSyncPeriod: componentConfig.PersistentVolumeBinderController.PVClaimBinderSyncPeriod, + VolumeConfiguration: componentConfig.PersistentVolumeBinderController.VolumeConfiguration, + }, + PodGCController: &cmoptions.PodGCControllerOptions{ + TerminatedPodGCThreshold: componentConfig.PodGCController.TerminatedPodGCThreshold, + }, + ReplicaSetController: &cmoptions.ReplicaSetControllerOptions{ + ConcurrentRSSyncs: componentConfig.ReplicaSetController.ConcurrentRSSyncs, + }, + ReplicationController: &cmoptions.ReplicationControllerOptions{ + ConcurrentRCSyncs: componentConfig.ReplicationController.ConcurrentRCSyncs, + }, + ResourceQuotaController: &cmoptions.ResourceQuotaControllerOptions{ + ResourceQuotaSyncPeriod: componentConfig.ResourceQuotaController.ResourceQuotaSyncPeriod, + ConcurrentResourceQuotaSyncs: componentConfig.ResourceQuotaController.ConcurrentResourceQuotaSyncs, + }, + SAController: &cmoptions.SAControllerOptions{ + ConcurrentSATokenSyncs: componentConfig.SAController.ConcurrentSATokenSyncs, + }, + ServiceController: &cmoptions.ServiceControllerOptions{ + ConcurrentServiceSyncs: componentConfig.ServiceController.ConcurrentServiceSyncs, + }, + Controllers: componentConfig.Controllers, + SecureServing: apiserveroptions.NewSecureServingOptions(), + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP(componentConfig.KubeCloudShared.Address), + BindPort: int(componentConfig.KubeCloudShared.Port), + BindNetwork: "tcp", + }, + Authentication: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthenticationOptions() + Authorization: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthorizationOptions() } - s.Generic.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes" - s.Generic.SecureServing.ServerCert.PairName = "kube-controller-manager" + s.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes" + s.SecureServing.ServerCert.PairName = "kube-controller-manager" + + // disable secure serving for now + // TODO: enable HTTPS by default + s.SecureServing.BindPort = 0 gcIgnoredResources := make([]componentconfig.GroupResource, 0, len(garbagecollector.DefaultIgnoredResources())) for r := range garbagecollector.DefaultIgnoredResources() { gcIgnoredResources = append(gcIgnoredResources, componentconfig.GroupResource{Group: r.Group, Resource: r.Resource}) } - s.Generic.GarbageCollectorController.GCIgnoredResources = gcIgnoredResources + s.GarbageCollectorController.GCIgnoredResources = gcIgnoredResources return &s } +// NewDefaultComponentConfig returns kube-controller manager configuration object. +func NewDefaultComponentConfig(insecurePort int32) componentconfig.KubeControllerManagerConfiguration { + scheme := runtime.NewScheme() + componentconfigv1alpha1.AddToScheme(scheme) + componentconfig.AddToScheme(scheme) + + versioned := componentconfigv1alpha1.KubeControllerManagerConfiguration{} + scheme.Default(&versioned) + + internal := componentconfig.KubeControllerManagerConfiguration{} + scheme.Convert(&versioned, &internal, nil) + internal.KubeCloudShared.Port = insecurePort + return internal +} + // AddFlags adds flags for a specific KubeControllerManagerOptions to the specified FlagSet func (s *KubeControllerManagerOptions) AddFlags(fs *pflag.FlagSet, allControllers []string, disabledByDefaultControllers []string) { - s.Generic.AddFlags(fs) - s.Generic.AttachDetachController.AddFlags(fs) - s.Generic.CSRSigningController.AddFlags(fs) - s.Generic.DeploymentController.AddFlags(fs) - s.Generic.DaemonSetController.AddFlags(fs) - s.Generic.DeprecatedFlags.AddFlags(fs) - s.Generic.EndPointController.AddFlags(fs) - s.Generic.GarbageCollectorController.AddFlags(fs) - s.Generic.HPAController.AddFlags(fs) - s.Generic.JobController.AddFlags(fs) - s.Generic.NamespaceController.AddFlags(fs) - s.Generic.NodeIpamController.AddFlags(fs) - s.Generic.NodeLifecycleController.AddFlags(fs) - s.Generic.PersistentVolumeBinderController.AddFlags(fs) - s.Generic.PodGCController.AddFlags(fs) - s.Generic.ReplicaSetController.AddFlags(fs) - s.Generic.ReplicationController.AddFlags(fs) - s.Generic.ResourceQuotaController.AddFlags(fs) - s.Generic.SAController.AddFlags(fs) + s.CloudProvider.AddFlags(fs) + s.Debugging.AddFlags(fs) + s.GenericComponent.AddFlags(fs) + s.KubeCloudShared.AddFlags(fs) + s.ServiceController.AddFlags(fs) - fs.StringSliceVar(&s.Generic.Controllers, "controllers", s.Generic.Controllers, fmt.Sprintf(""+ + s.SecureServing.AddFlags(fs) + s.InsecureServing.AddFlags(fs) + s.Authentication.AddFlags(fs) + s.Authorization.AddFlags(fs) + + s.AttachDetachController.AddFlags(fs) + s.CSRSigningController.AddFlags(fs) + s.DeploymentController.AddFlags(fs) + s.DaemonSetController.AddFlags(fs) + s.DeprecatedFlags.AddFlags(fs) + s.EndPointController.AddFlags(fs) + s.GarbageCollectorController.AddFlags(fs) + s.HPAController.AddFlags(fs) + s.JobController.AddFlags(fs) + s.NamespaceController.AddFlags(fs) + s.NodeIpamController.AddFlags(fs) + s.NodeLifecycleController.AddFlags(fs) + s.PersistentVolumeBinderController.AddFlags(fs) + s.PodGCController.AddFlags(fs) + s.ReplicaSetController.AddFlags(fs) + s.ReplicationController.AddFlags(fs) + s.ResourceQuotaController.AddFlags(fs) + s.SAController.AddFlags(fs) + + fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).") + fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") + fs.StringSliceVar(&s.Controllers, "controllers", s.Controllers, fmt.Sprintf(""+ "A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ "named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s", strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", "))) - fs.StringVar(&s.Generic.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.Generic.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.") + fs.StringVar(&s.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.") var dummy string fs.MarkDeprecated("insecure-experimental-approve-all-kubelet-csrs-for-group", "This flag does nothing.") fs.StringVar(&dummy, "insecure-experimental-approve-all-kubelet-csrs-for-group", "", "This flag does nothing.") @@ -98,11 +257,114 @@ func (s *KubeControllerManagerOptions) AddFlags(fs *pflag.FlagSet, allController } // ApplyTo fills up controller manager config with options. -func (s *KubeControllerManagerOptions) ApplyTo(c *kubecontrollerconfig.Config) error { - err := s.Generic.ApplyTo(&c.Generic, "controller-manager") +func (s *KubeControllerManagerOptions) ApplyTo(c *kubecontrollerconfig.Config, userAgent string) error { + if err := s.CloudProvider.ApplyTo(&c.ComponentConfig.CloudProvider); err != nil { + return err + } + if err := s.Debugging.ApplyTo(&c.ComponentConfig.Debugging); err != nil { + return err + } + if err := s.GenericComponent.ApplyTo(&c.ComponentConfig.GenericComponent); err != nil { + return err + } + if err := s.KubeCloudShared.ApplyTo(&c.ComponentConfig.KubeCloudShared); err != nil { + return err + } + if err := s.AttachDetachController.ApplyTo(&c.ComponentConfig.AttachDetachController); err != nil { + return err + } + if err := s.CSRSigningController.ApplyTo(&c.ComponentConfig.CSRSigningController); err != nil { + return err + } + if err := s.DaemonSetController.ApplyTo(&c.ComponentConfig.DaemonSetController); err != nil { + return err + } + if err := s.DeploymentController.ApplyTo(&c.ComponentConfig.DeploymentController); err != nil { + return err + } + if err := s.DeprecatedFlags.ApplyTo(&c.ComponentConfig.DeprecatedController); err != nil { + return err + } + if err := s.EndPointController.ApplyTo(&c.ComponentConfig.EndPointController); err != nil { + return err + } + if err := s.GarbageCollectorController.ApplyTo(&c.ComponentConfig.GarbageCollectorController); err != nil { + return err + } + if err := s.HPAController.ApplyTo(&c.ComponentConfig.HPAController); err != nil { + return err + } + if err := s.JobController.ApplyTo(&c.ComponentConfig.JobController); err != nil { + return err + } + if err := s.NamespaceController.ApplyTo(&c.ComponentConfig.NamespaceController); err != nil { + return err + } + if err := s.NodeIpamController.ApplyTo(&c.ComponentConfig.NodeIpamController); err != nil { + return err + } + if err := s.NodeLifecycleController.ApplyTo(&c.ComponentConfig.NodeLifecycleController); err != nil { + return err + } + if err := s.PersistentVolumeBinderController.ApplyTo(&c.ComponentConfig.PersistentVolumeBinderController); err != nil { + return err + } + if err := s.PodGCController.ApplyTo(&c.ComponentConfig.PodGCController); err != nil { + return err + } + if err := s.ReplicaSetController.ApplyTo(&c.ComponentConfig.ReplicaSetController); err != nil { + return err + } + if err := s.ReplicationController.ApplyTo(&c.ComponentConfig.ReplicationController); err != nil { + return err + } + if err := s.ResourceQuotaController.ApplyTo(&c.ComponentConfig.ResourceQuotaController); err != nil { + return err + } + if err := s.SAController.ApplyTo(&c.ComponentConfig.SAController); err != nil { + return err + } + if err := s.ServiceController.ApplyTo(&c.ComponentConfig.ServiceController); err != nil { + return err + } + if err := s.SecureServing.ApplyTo(&c.SecureServing); err != nil { + return err + } + if err := s.InsecureServing.ApplyTo(&c.InsecureServing); err != nil { + return err + } + if err := s.Authentication.ApplyTo(&c.Authentication, c.SecureServing, nil); err != nil { + return err + } + if err := s.Authorization.ApplyTo(&c.Authorization); err != nil { + return err + } - c.Generic.ComponentConfig.Controllers = s.Generic.Controllers - c.Generic.ComponentConfig.ExternalCloudVolumePlugin = s.Generic.ExternalCloudVolumePlugin + // sync back to component config + // TODO: find more elegant way than synching back the values. + c.ComponentConfig.KubeCloudShared.Port = int32(s.InsecureServing.BindPort) + c.ComponentConfig.KubeCloudShared.Address = s.InsecureServing.BindAddress.String() + + var err error + c.Kubeconfig, err = clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) + if err != nil { + return err + } + c.Kubeconfig.ContentConfig.ContentType = s.GenericComponent.ContentType + c.Kubeconfig.QPS = s.GenericComponent.KubeAPIQPS + c.Kubeconfig.Burst = int(s.GenericComponent.KubeAPIBurst) + + c.Client, err = clientset.NewForConfig(restclient.AddUserAgent(c.Kubeconfig, userAgent)) + if err != nil { + return err + } + + c.LeaderElectionClient = clientset.NewForConfigOrDie(restclient.AddUserAgent(c.Kubeconfig, "leader-election")) + + c.EventRecorder = createRecorder(c.Client, userAgent) + + c.ComponentConfig.Controllers = s.Controllers + c.ComponentConfig.ExternalCloudVolumePlugin = s.ExternalCloudVolumePlugin return err } @@ -111,8 +373,38 @@ func (s *KubeControllerManagerOptions) ApplyTo(c *kubecontrollerconfig.Config) e func (s *KubeControllerManagerOptions) Validate(allControllers []string, disabledByDefaultControllers []string) error { var errs []error + errs = append(errs, s.CloudProvider.Validate()...) + errs = append(errs, s.Debugging.Validate()...) + errs = append(errs, s.GenericComponent.Validate()...) + errs = append(errs, s.KubeCloudShared.Validate()...) + errs = append(errs, s.AttachDetachController.Validate()...) + errs = append(errs, s.CSRSigningController.Validate()...) + errs = append(errs, s.DaemonSetController.Validate()...) + errs = append(errs, s.DeploymentController.Validate()...) + errs = append(errs, s.DeprecatedFlags.Validate()...) + errs = append(errs, s.EndPointController.Validate()...) + errs = append(errs, s.GarbageCollectorController.Validate()...) + errs = append(errs, s.HPAController.Validate()...) + errs = append(errs, s.JobController.Validate()...) + errs = append(errs, s.NamespaceController.Validate()...) + errs = append(errs, s.NodeIpamController.Validate()...) + errs = append(errs, s.NodeLifecycleController.Validate()...) + errs = append(errs, s.PersistentVolumeBinderController.Validate()...) + errs = append(errs, s.PodGCController.Validate()...) + errs = append(errs, s.ReplicaSetController.Validate()...) + errs = append(errs, s.ReplicationController.Validate()...) + errs = append(errs, s.ResourceQuotaController.Validate()...) + errs = append(errs, s.SAController.Validate()...) + errs = append(errs, s.ServiceController.Validate()...) + errs = append(errs, s.SecureServing.Validate()...) + errs = append(errs, s.InsecureServing.Validate()...) + errs = append(errs, s.Authentication.Validate()...) + errs = append(errs, s.Authorization.Validate()...) + + // TODO: validate component config, master and kubeconfig + allControllersSet := sets.NewString(allControllers...) - for _, controller := range s.Generic.Controllers { + for _, controller := range s.Controllers { if controller == "*" { continue } @@ -135,9 +427,16 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy } c := &kubecontrollerconfig.Config{} - if err := s.ApplyTo(c); err != nil { + if err := s.ApplyTo(c, "kube-controller-manager"); err != nil { return nil, err } return c, nil } + +func createRecorder(kubeClient kubernetes.Interface, userAgent string) record.EventRecorder { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) +} diff --git a/cmd/kube-controller-manager/app/options/options_test.go b/cmd/kube-controller-manager/app/options/options_test.go index 2b4a86ee8f4..0e69b91b857 100644 --- a/cmd/kube-controller-manager/app/options/options_test.go +++ b/cmd/kube-controller-manager/app/options/options_test.go @@ -113,169 +113,167 @@ func TestAddFlags(t *testing.T) { f.Parse(args) // Sort GCIgnoredResources because it's built from a map, which means the // insertion order is random. - sort.Sort(sortedGCIgnoredResources(s.Generic.GarbageCollectorController.GCIgnoredResources)) + sort.Sort(sortedGCIgnoredResources(s.GarbageCollectorController.GCIgnoredResources)) expected := &KubeControllerManagerOptions{ - Generic: &cmoptions.GenericControllerManagerOptions{ - CloudProvider: &cmoptions.CloudProviderOptions{ - Name: "gce", - CloudConfigFile: "/cloud-config", - }, - Debugging: &cmoptions.DebuggingOptions{ - EnableProfiling: false, - EnableContentionProfiling: true, - }, - GenericComponent: &cmoptions.GenericComponentConfigOptions{ - MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour}, - ContentType: "application/json", - KubeAPIQPS: 50.0, - KubeAPIBurst: 100, - ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, - LeaderElection: componentconfig.LeaderElectionConfiguration{ - ResourceLock: "configmap", - LeaderElect: false, - LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, - RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, - RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, - }, - }, - KubeCloudShared: &cmoptions.KubeCloudSharedOptions{ - Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + AllocateNodeCIDRs: true, - Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config - UseServiceAccountCredentials: true, - RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, - NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second}, - ClusterName: "k8s", - ClusterCIDR: "1.2.3.4/24", - AllocateNodeCIDRs: true, - CIDRAllocatorType: "CloudAllocator", - ConfigureCloudRoutes: false, - ServiceAccountKeyFile: "/service-account-private-key", - }, - AttachDetachController: &cmoptions.AttachDetachControllerOptions{ - ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second}, - DisableAttachDetachReconcilerSync: true, - }, - CSRSigningController: &cmoptions.CSRSigningControllerOptions{ - ClusterSigningCertFile: "/cluster-signing-cert", - ClusterSigningKeyFile: "/cluster-signing-key", - ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour}, - }, - DaemonSetController: &cmoptions.DaemonSetControllerOptions{ - ConcurrentDaemonSetSyncs: 2, - }, - DeploymentController: &cmoptions.DeploymentControllerOptions{ - ConcurrentDeploymentSyncs: 10, - DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, - }, - DeprecatedFlags: &cmoptions.DeprecatedControllerOptions{ - DeletingPodsQPS: 0.1, - RegisterRetryCount: 10, - }, - EndPointController: &cmoptions.EndPointControllerOptions{ - ConcurrentEndpointSyncs: 10, - }, - GarbageCollectorController: &cmoptions.GarbageCollectorControllerOptions{ - ConcurrentGCSyncs: 30, - GCIgnoredResources: []componentconfig.GroupResource{ - {Group: "extensions", Resource: "replicationcontrollers"}, - {Group: "", Resource: "bindings"}, - {Group: "", Resource: "componentstatuses"}, - {Group: "", Resource: "events"}, - {Group: "authentication.k8s.io", Resource: "tokenreviews"}, - {Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}, - {Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}, - {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}, - {Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}, - {Group: "apiregistration.k8s.io", Resource: "apiservices"}, - {Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}, - }, - EnableGarbageCollector: false, - }, - HPAController: &cmoptions.HPAControllerOptions{ - HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, - HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute}, - HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute}, - HorizontalPodAutoscalerTolerance: 0.1, - HorizontalPodAutoscalerUseRESTClients: true, - }, - JobController: &cmoptions.JobControllerOptions{ - ConcurrentJobSyncs: 5, - }, - NamespaceController: &cmoptions.NamespaceControllerOptions{ - NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, - ConcurrentNamespaceSyncs: 20, - }, - NodeIpamController: &cmoptions.NodeIpamControllerOptions{ - NodeCIDRMaskSize: 48, - }, - NodeLifecycleController: &cmoptions.NodeLifecycleControllerOptions{ - EnableTaintManager: false, - NodeEvictionRate: 0.2, - SecondaryNodeEvictionRate: 0.05, - NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, - NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second}, - PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute}, - LargeClusterSizeThreshold: 100, - UnhealthyZoneThreshold: 0.6, - }, - PersistentVolumeBinderController: &cmoptions.PersistentVolumeBinderControllerOptions{ - PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, - VolumeConfiguration: componentconfig.VolumeConfiguration{ - EnableDynamicProvisioning: false, - EnableHostPathProvisioning: true, - FlexVolumePluginDir: "/flex-volume-plugin", - PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ - MaximumRetry: 3, - MinimumTimeoutNFS: 200, - IncrementTimeoutNFS: 45, - MinimumTimeoutHostPath: 45, - IncrementTimeoutHostPath: 45, - }, - }, - }, - PodGCController: &cmoptions.PodGCControllerOptions{ - TerminatedPodGCThreshold: 12000, - }, - ReplicaSetController: &cmoptions.ReplicaSetControllerOptions{ - ConcurrentRSSyncs: 10, - }, - ReplicationController: &cmoptions.ReplicationControllerOptions{ - ConcurrentRCSyncs: 10, - }, - ResourceQuotaController: &cmoptions.ResourceQuotaControllerOptions{ - ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, - ConcurrentResourceQuotaSyncs: 10, - }, - SAController: &cmoptions.SAControllerOptions{ - ConcurrentSATokenSyncs: 10, - }, - ServiceController: &cmoptions.ServiceControllerOptions{ - ConcurrentServiceSyncs: 2, - }, - Controllers: []string{"foo", "bar"}, - SecureServing: &apiserveroptions.SecureServingOptions{ - BindPort: 10001, - BindAddress: net.ParseIP("192.168.4.21"), - ServerCert: apiserveroptions.GeneratableKeyCert{ - CertDirectory: "/a/b/c", - PairName: "kube-controller-manager", - }, - HTTP2MaxStreamsPerConnection: 47, - }, - InsecureServing: &cmoptions.InsecureServingOptions{ - BindAddress: net.ParseIP("192.168.4.10"), - BindPort: int(10000), - BindNetwork: "tcp", - }, - Kubeconfig: "/kubeconfig", - Master: "192.168.4.20", + CloudProvider: &cmoptions.CloudProviderOptions{ + Name: "gce", + CloudConfigFile: "/cloud-config", }, + Debugging: &cmoptions.DebuggingOptions{ + EnableProfiling: false, + EnableContentionProfiling: true, + }, + GenericComponent: &cmoptions.GenericComponentConfigOptions{ + MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour}, + ContentType: "application/json", + KubeAPIQPS: 50.0, + KubeAPIBurst: 100, + ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute}, + LeaderElection: componentconfig.LeaderElectionConfiguration{ + ResourceLock: "configmap", + LeaderElect: false, + LeaseDuration: metav1.Duration{Duration: 30 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 15 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 5 * time.Second}, + }, + }, + KubeCloudShared: &cmoptions.KubeCloudSharedOptions{ + Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + AllocateNodeCIDRs: true, + Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config + UseServiceAccountCredentials: true, + RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second}, + NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second}, + ClusterName: "k8s", + ClusterCIDR: "1.2.3.4/24", + AllocateNodeCIDRs: true, + CIDRAllocatorType: "CloudAllocator", + ConfigureCloudRoutes: false, + ServiceAccountKeyFile: "/service-account-private-key", + }, + AttachDetachController: &cmoptions.AttachDetachControllerOptions{ + ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second}, + DisableAttachDetachReconcilerSync: true, + }, + CSRSigningController: &cmoptions.CSRSigningControllerOptions{ + ClusterSigningCertFile: "/cluster-signing-cert", + ClusterSigningKeyFile: "/cluster-signing-key", + ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour}, + }, + DaemonSetController: &cmoptions.DaemonSetControllerOptions{ + ConcurrentDaemonSetSyncs: 2, + }, + DeploymentController: &cmoptions.DeploymentControllerOptions{ + ConcurrentDeploymentSyncs: 10, + DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, + }, + DeprecatedFlags: &cmoptions.DeprecatedControllerOptions{ + DeletingPodsQPS: 0.1, + RegisterRetryCount: 10, + }, + EndPointController: &cmoptions.EndPointControllerOptions{ + ConcurrentEndpointSyncs: 10, + }, + GarbageCollectorController: &cmoptions.GarbageCollectorControllerOptions{ + ConcurrentGCSyncs: 30, + GCIgnoredResources: []componentconfig.GroupResource{ + {Group: "extensions", Resource: "replicationcontrollers"}, + {Group: "", Resource: "bindings"}, + {Group: "", Resource: "componentstatuses"}, + {Group: "", Resource: "events"}, + {Group: "authentication.k8s.io", Resource: "tokenreviews"}, + {Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}, + {Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}, + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}, + {Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}, + {Group: "apiregistration.k8s.io", Resource: "apiservices"}, + {Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}, + }, + EnableGarbageCollector: false, + }, + HPAController: &cmoptions.HPAControllerOptions{ + HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second}, + HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute}, + HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute}, + HorizontalPodAutoscalerTolerance: 0.1, + HorizontalPodAutoscalerUseRESTClients: true, + }, + JobController: &cmoptions.JobControllerOptions{ + ConcurrentJobSyncs: 5, + }, + NamespaceController: &cmoptions.NamespaceControllerOptions{ + NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, + ConcurrentNamespaceSyncs: 20, + }, + NodeIpamController: &cmoptions.NodeIpamControllerOptions{ + NodeCIDRMaskSize: 48, + }, + NodeLifecycleController: &cmoptions.NodeLifecycleControllerOptions{ + EnableTaintManager: false, + NodeEvictionRate: 0.2, + SecondaryNodeEvictionRate: 0.05, + NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second}, + NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second}, + PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute}, + LargeClusterSizeThreshold: 100, + UnhealthyZoneThreshold: 0.6, + }, + PersistentVolumeBinderController: &cmoptions.PersistentVolumeBinderControllerOptions{ + PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second}, + VolumeConfiguration: componentconfig.VolumeConfiguration{ + EnableDynamicProvisioning: false, + EnableHostPathProvisioning: true, + FlexVolumePluginDir: "/flex-volume-plugin", + PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ + MaximumRetry: 3, + MinimumTimeoutNFS: 200, + IncrementTimeoutNFS: 45, + MinimumTimeoutHostPath: 45, + IncrementTimeoutHostPath: 45, + }, + }, + }, + PodGCController: &cmoptions.PodGCControllerOptions{ + TerminatedPodGCThreshold: 12000, + }, + ReplicaSetController: &cmoptions.ReplicaSetControllerOptions{ + ConcurrentRSSyncs: 10, + }, + ReplicationController: &cmoptions.ReplicationControllerOptions{ + ConcurrentRCSyncs: 10, + }, + ResourceQuotaController: &cmoptions.ResourceQuotaControllerOptions{ + ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute}, + ConcurrentResourceQuotaSyncs: 10, + }, + SAController: &cmoptions.SAControllerOptions{ + ConcurrentSATokenSyncs: 10, + }, + ServiceController: &cmoptions.ServiceControllerOptions{ + ConcurrentServiceSyncs: 2, + }, + Controllers: []string{"foo", "bar"}, + SecureServing: &apiserveroptions.SecureServingOptions{ + BindPort: 10001, + BindAddress: net.ParseIP("192.168.4.21"), + ServerCert: apiserveroptions.GeneratableKeyCert{ + CertDirectory: "/a/b/c", + PairName: "kube-controller-manager", + }, + HTTP2MaxStreamsPerConnection: 47, + }, + InsecureServing: &cmoptions.InsecureServingOptions{ + BindAddress: net.ParseIP("192.168.4.10"), + BindPort: int(10000), + BindNetwork: "tcp", + }, + Kubeconfig: "/kubeconfig", + Master: "192.168.4.20", } // Sort GCIgnoredResources because it's built from a map, which means the // insertion order is random. - sort.Sort(sortedGCIgnoredResources(expected.Generic.GarbageCollectorController.GCIgnoredResources)) + sort.Sort(sortedGCIgnoredResources(expected.GarbageCollectorController.GCIgnoredResources)) if !reflect.DeepEqual(expected, s) { t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected, s)) From 14f7b959ff3f2d6cdcc83e3035813b9496c6a152 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 16 May 2018 16:26:48 +0800 Subject: [PATCH 029/307] modify cloud-controller manager config struct to adapt option change --- .../app/config/config.go | 42 ++++++---- .../app/controllermanager.go | 78 +++++++++---------- 2 files changed, 65 insertions(+), 55 deletions(-) diff --git a/cmd/cloud-controller-manager/app/config/config.go b/cmd/cloud-controller-manager/app/config/config.go index ca31d87cd61..3d201e47546 100644 --- a/cmd/cloud-controller-manager/app/config/config.go +++ b/cmd/cloud-controller-manager/app/config/config.go @@ -17,25 +17,39 @@ limitations under the License. package app import ( - "time" - + apiserver "k8s.io/apiserver/pkg/server" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" + "k8s.io/kubernetes/pkg/apis/componentconfig" ) -// ExtraConfig are part of Config, also can place your custom config here. -type ExtraConfig struct { - NodeStatusUpdateFrequency time.Duration -} - // Config is the main context object for the cloud controller manager. type Config struct { - Generic genericcontrollermanager.Config - Extra ExtraConfig + ComponentConfig componentconfig.CloudControllerManagerConfiguration + + SecureServing *apiserver.SecureServingInfo + // TODO: remove deprecated insecure serving + InsecureServing *genericcontrollermanager.InsecureServingInfo + Authentication apiserver.AuthenticationInfo + Authorization apiserver.AuthorizationInfo + + // the general kube client + Client *clientset.Clientset + + // the client only used for leader election + LeaderElectionClient *clientset.Clientset + + // the rest config for the master + Kubeconfig *restclient.Config + + // the event sink + EventRecorder record.EventRecorder } type completedConfig struct { - Generic genericcontrollermanager.CompletedConfig - Extra *ExtraConfig + *Config } // CompletedConfig same as Config, just to swap private object. @@ -46,10 +60,6 @@ type CompletedConfig struct { // Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. func (c *Config) Complete() *CompletedConfig { - cc := completedConfig{ - c.Generic.Complete(), - &c.Extra, - } - + cc := completedConfig{c} return &CompletedConfig{&cc} } diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 8589ba35d5c..988bf5edf36 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -85,13 +85,13 @@ the cloud specific control loops shipped with Kubernetes.`, func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 - return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor) + return time.Duration(float64(c.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor) } } // Run runs the ExternalCMServer. This should never exit. func Run(c *cloudcontrollerconfig.CompletedConfig) error { - cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider.Name, c.Generic.ComponentConfig.CloudProvider.CloudConfigFile) + cloud, err := cloudprovider.InitCloudProvider(c.ComponentConfig.CloudProvider.Name, c.ComponentConfig.CloudProvider.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } @@ -100,7 +100,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { } if cloud.HasClusterID() == false { - if c.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true { + if c.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true { glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") } else { glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") @@ -109,38 +109,38 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { // setup /configz endpoint if cz, err := configz.New("componentconfig"); err == nil { - cz.Set(c.Generic.ComponentConfig) + cz.Set(c.ComponentConfig) } else { glog.Errorf("unable to register configz: %c", err) } // Start the controller manager HTTP server stopCh := make(chan struct{}) - if c.Generic.SecureServing != nil { - handler := genericcontrollermanager.NewBaseHandler(&c.Generic) - handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Generic) - if err := c.Generic.SecureServing.Serve(handler, 0, stopCh); err != nil { + if c.SecureServing != nil { + handler := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Debugging) + handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Authorization, &c.Authentication) + if err := c.SecureServing.Serve(handler, 0, stopCh); err != nil { return err } } - if c.Generic.InsecureServing != nil { - handler := genericcontrollermanager.NewBaseHandler(&c.Generic) - handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Generic) - if err := c.Generic.InsecureServing.Serve(handler, 0, stopCh); err != nil { + if c.InsecureServing != nil { + handler := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Debugging) + handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Authorization, &c.Authentication) + if err := c.InsecureServing.Serve(handler, 0, stopCh); err != nil { return err } } run := func(stop <-chan struct{}) { rootClientBuilder := controller.SimpleControllerClientBuilder{ - ClientConfig: c.Generic.Kubeconfig, + ClientConfig: c.Kubeconfig, } var clientBuilder controller.ControllerClientBuilder - if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { + if c.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { clientBuilder = controller.SAControllerClientBuilder{ - ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig), - CoreClient: c.Generic.Client.CoreV1(), - AuthenticationClient: c.Generic.Client.AuthenticationV1(), + ClientConfig: restclient.AnonymousClientConfig(c.Kubeconfig), + CoreClient: c.Client.CoreV1(), + AuthenticationClient: c.Client.AuthenticationV1(), Namespace: "kube-system", } } else { @@ -152,7 +152,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { } } - if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect { + if !c.ComponentConfig.GenericComponent.LeaderElection.LeaderElect { run(nil) panic("unreachable") } @@ -166,13 +166,13 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { id = id + "_" + string(uuid.NewUUID()) // Lock required for leader election - rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock, + rl, err := resourcelock.New(c.ComponentConfig.GenericComponent.LeaderElection.ResourceLock, "kube-system", "cloud-controller-manager", - c.Generic.LeaderElectionClient.CoreV1(), + c.LeaderElectionClient.CoreV1(), resourcelock.ResourceLockConfig{ Identity: id, - EventRecorder: c.Generic.EventRecorder, + EventRecorder: c.EventRecorder, }) if err != nil { glog.Fatalf("error creating lock: %v", err) @@ -181,9 +181,9 @@ func Run(c *cloudcontrollerconfig.CompletedConfig) error { // Try and become the leader and start cloud controller manager loops leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration, - RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration, - RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration, + LeaseDuration: c.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration, + RenewDeadline: c.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration, + RetryPeriod: c.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { @@ -213,16 +213,16 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde nodeController := cloudcontrollers.NewCloudNodeController( sharedInformers.Core().V1().Nodes(), client("cloud-node-controller"), cloud, - c.Generic.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration, - c.Extra.NodeStatusUpdateFrequency) + c.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration, + c.ComponentConfig.NodeStatusUpdateFrequency.Duration) nodeController.Run(stop) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) // Start the PersistentVolumeLabelController pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud) go pvlController.Run(5, stop) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) + time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) // Start the service controller serviceController, err := servicecontroller.New( @@ -230,34 +230,34 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, rootClientBuilde client("service-controller"), sharedInformers.Core().V1().Services(), sharedInformers.Core().V1().Nodes(), - c.Generic.ComponentConfig.KubeCloudShared.ClusterName, + c.ComponentConfig.KubeCloudShared.ClusterName, ) if err != nil { glog.Errorf("Failed to start service controller: %v", err) } else { - go serviceController.Run(stop, int(c.Generic.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) + go serviceController.Run(stop, int(c.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) + time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) } // If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller - if c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes { + if c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes { if routes, ok := cloud.Routes(); !ok { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") } else { var clusterCIDR *net.IPNet - if len(strings.TrimSpace(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 { - _, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR) + if len(strings.TrimSpace(c.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 { + _, clusterCIDR, err = net.ParseCIDR(c.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.KubeCloudShared.ClusterCIDR, err) + glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } } - routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR) - go routeController.Run(stop, c.Generic.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration) - time.Sleep(wait.Jitter(c.Generic.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) + routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR) + go routeController.Run(stop, c.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration) + time.Sleep(wait.Jitter(c.ComponentConfig.GenericComponent.ControllerStartInterval.Duration, ControllerStartJitter)) } } else { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.Generic.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) + glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) } // If apiserver is not running we should wait for some time and fail only then. This is particularly From f21475ac9553fd33a38f5c30e496fbfdde2bb7a4 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 16 May 2018 16:45:30 +0800 Subject: [PATCH 030/307] modify kube-controller manager config struct to adapt option change --- cmd/controller-manager/app/config.go | 64 ------------------- cmd/controller-manager/app/serve.go | 14 ++-- .../app/config/config.go | 42 +++++++----- .../app/controllermanager.go | 52 +++++++-------- 4 files changed, 60 insertions(+), 112 deletions(-) delete mode 100644 cmd/controller-manager/app/config.go diff --git a/cmd/controller-manager/app/config.go b/cmd/controller-manager/app/config.go deleted file mode 100644 index 6a2fc592e12..00000000000 --- a/cmd/controller-manager/app/config.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package app - -import ( - apiserver "k8s.io/apiserver/pkg/server" - clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/apis/componentconfig" -) - -// Config is the main context object for the controller manager. -type Config struct { - ComponentConfig componentconfig.KubeControllerManagerConfiguration - - SecureServing *apiserver.SecureServingInfo - // TODO: remove deprecated insecure serving - InsecureServing *InsecureServingInfo - Authentication apiserver.AuthenticationInfo - Authorization apiserver.AuthorizationInfo - - // the general kube client - Client *clientset.Clientset - - // the client only used for leader election - LeaderElectionClient *clientset.Clientset - - // the rest config for the master - Kubeconfig *restclient.Config - - // the event sink - EventRecorder record.EventRecorder -} - -type completedConfig struct { - *Config -} - -// CompletedConfig same as Config, just to swap private object. -type CompletedConfig struct { - // Embed a private pointer that cannot be instantiated outside of this package. - *completedConfig -} - -// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. -func (c *Config) Complete() CompletedConfig { - cc := completedConfig{c} - return CompletedConfig{&cc} -} diff --git a/cmd/controller-manager/app/serve.go b/cmd/controller-manager/app/serve.go index 0b7251067da..305a3ba9d3b 100644 --- a/cmd/controller-manager/app/serve.go +++ b/cmd/controller-manager/app/serve.go @@ -24,21 +24,23 @@ import ( genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" apirequest "k8s.io/apiserver/pkg/endpoints/request" + apiserver "k8s.io/apiserver/pkg/server" genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/routes" "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/componentconfig" "k8s.io/kubernetes/pkg/util/configz" ) // BuildHandlerChain builds a handler chain with a base handler and CompletedConfig. -func BuildHandlerChain(apiHandler http.Handler, c *CompletedConfig) http.Handler { +func BuildHandlerChain(apiHandler http.Handler, authorizationInfo *apiserver.AuthorizationInfo, authenticationInfo *apiserver.AuthenticationInfo) http.Handler { requestInfoResolver := &apirequest.RequestInfoFactory{} failedHandler := genericapifilters.Unauthorized(legacyscheme.Codecs, false) - handler := genericapifilters.WithAuthorization(apiHandler, c.Authorization.Authorizer, legacyscheme.Codecs) - handler = genericapifilters.WithAuthentication(handler, c.Authentication.Authenticator, failedHandler) + handler := genericapifilters.WithAuthorization(apiHandler, authorizationInfo.Authorizer, legacyscheme.Codecs) + handler = genericapifilters.WithAuthentication(handler, authenticationInfo.Authenticator, failedHandler) handler = genericapifilters.WithRequestInfo(handler, requestInfoResolver) handler = genericfilters.WithPanicRecovery(handler) @@ -46,12 +48,12 @@ func BuildHandlerChain(apiHandler http.Handler, c *CompletedConfig) http.Handler } // NewBaseHandler takes in CompletedConfig and returns a handler. -func NewBaseHandler(c *CompletedConfig) http.Handler { +func NewBaseHandler(c *componentconfig.DebuggingConfiguration) http.Handler { mux := mux.NewPathRecorderMux("controller-manager") healthz.InstallHandler(mux) - if c.ComponentConfig.Debugging.EnableProfiling { + if c.EnableProfiling { routes.Profiling{}.Install(mux) - if c.ComponentConfig.Debugging.EnableContentionProfiling { + if c.EnableContentionProfiling { goruntime.SetBlockProfileRate(1) } } diff --git a/cmd/kube-controller-manager/app/config/config.go b/cmd/kube-controller-manager/app/config/config.go index 4eb9c3ff812..4713e9dfe2e 100644 --- a/cmd/kube-controller-manager/app/config/config.go +++ b/cmd/kube-controller-manager/app/config/config.go @@ -17,25 +17,39 @@ limitations under the License. package config import ( - "time" - + apiserver "k8s.io/apiserver/pkg/server" + clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" + "k8s.io/kubernetes/pkg/apis/componentconfig" ) -// ExtraConfig are part of Config, also can place your custom config here. -type ExtraConfig struct { - NodeStatusUpdateFrequency time.Duration -} - // Config is the main context object for the controller manager. type Config struct { - Generic genericcontrollermanager.Config - Extra ExtraConfig + ComponentConfig componentconfig.KubeControllerManagerConfiguration + + SecureServing *apiserver.SecureServingInfo + // TODO: remove deprecated insecure serving + InsecureServing *genericcontrollermanager.InsecureServingInfo + Authentication apiserver.AuthenticationInfo + Authorization apiserver.AuthorizationInfo + + // the general kube client + Client *clientset.Clientset + + // the client only used for leader election + LeaderElectionClient *clientset.Clientset + + // the rest config for the master + Kubeconfig *restclient.Config + + // the event sink + EventRecorder record.EventRecorder } type completedConfig struct { - Generic genericcontrollermanager.CompletedConfig - Extra *ExtraConfig + *Config } // CompletedConfig same as Config, just to swap private object. @@ -46,10 +60,6 @@ type CompletedConfig struct { // Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. func (c *Config) Complete() *CompletedConfig { - cc := completedConfig{ - c.Generic.Complete(), - &c.Extra, - } - + cc := completedConfig{c} return &CompletedConfig{&cc} } diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 9830be09690..b03ba75667e 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -108,7 +108,7 @@ controller, and serviceaccounts controller.`, func ResyncPeriod(c *config.CompletedConfig) func() time.Duration { return func() time.Duration { factor := rand.Float64() + 1 - return time.Duration(float64(c.Generic.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor) + return time.Duration(float64(c.ComponentConfig.GenericComponent.MinResyncPeriod.Nanoseconds()) * factor) } } @@ -118,43 +118,43 @@ func Run(c *config.CompletedConfig) error { glog.Infof("Version: %+v", version.Get()) if cfgz, err := configz.New("componentconfig"); err == nil { - cfgz.Set(c.Generic.ComponentConfig) + cfgz.Set(c.ComponentConfig) } else { glog.Errorf("unable to register configz: %c", err) } // Start the controller manager HTTP server stopCh := make(chan struct{}) - if c.Generic.SecureServing != nil { - handler := genericcontrollermanager.NewBaseHandler(&c.Generic) - handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Generic) - if err := c.Generic.SecureServing.Serve(handler, 0, stopCh); err != nil { + if c.SecureServing != nil { + handler := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Debugging) + handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Authorization, &c.Authentication) + if err := c.SecureServing.Serve(handler, 0, stopCh); err != nil { return err } } - if c.Generic.InsecureServing != nil { - handler := genericcontrollermanager.NewBaseHandler(&c.Generic) - handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Generic) - if err := c.Generic.InsecureServing.Serve(handler, 0, stopCh); err != nil { + if c.InsecureServing != nil { + handler := genericcontrollermanager.NewBaseHandler(&c.ComponentConfig.Debugging) + handler = genericcontrollermanager.BuildHandlerChain(handler, &c.Authorization, &c.Authentication) + if err := c.InsecureServing.Serve(handler, 0, stopCh); err != nil { return err } } run := func(stop <-chan struct{}) { rootClientBuilder := controller.SimpleControllerClientBuilder{ - ClientConfig: c.Generic.Kubeconfig, + ClientConfig: c.Kubeconfig, } var clientBuilder controller.ControllerClientBuilder - if c.Generic.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { - if len(c.Generic.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) == 0 { + if c.ComponentConfig.KubeCloudShared.UseServiceAccountCredentials { + if len(c.ComponentConfig.KubeCloudShared.ServiceAccountKeyFile) == 0 { // It'c possible another controller process is creating the tokens for us. // If one isn't, we'll timeout and exit when our client builder is unable to create the tokens. glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") } clientBuilder = controller.SAControllerClientBuilder{ - ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig), - CoreClient: c.Generic.Client.CoreV1(), - AuthenticationClient: c.Generic.Client.AuthenticationV1(), + ClientConfig: restclient.AnonymousClientConfig(c.Kubeconfig), + CoreClient: c.Client.CoreV1(), + AuthenticationClient: c.Client.AuthenticationV1(), Namespace: "kube-system", } } else { @@ -176,7 +176,7 @@ func Run(c *config.CompletedConfig) error { select {} } - if !c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaderElect { + if !c.ComponentConfig.GenericComponent.LeaderElection.LeaderElect { run(wait.NeverStop) panic("unreachable") } @@ -188,13 +188,13 @@ func Run(c *config.CompletedConfig) error { // add a uniquifier so that two processes on the same host don't accidentally both become active id = id + "_" + string(uuid.NewUUID()) - rl, err := resourcelock.New(c.Generic.ComponentConfig.GenericComponent.LeaderElection.ResourceLock, + rl, err := resourcelock.New(c.ComponentConfig.GenericComponent.LeaderElection.ResourceLock, "kube-system", "kube-controller-manager", - c.Generic.LeaderElectionClient.CoreV1(), + c.LeaderElectionClient.CoreV1(), resourcelock.ResourceLockConfig{ Identity: id, - EventRecorder: c.Generic.EventRecorder, + EventRecorder: c.EventRecorder, }) if err != nil { glog.Fatalf("error creating lock: %v", err) @@ -202,9 +202,9 @@ func Run(c *config.CompletedConfig) error { leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: rl, - LeaseDuration: c.Generic.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration, - RenewDeadline: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration, - RetryPeriod: c.Generic.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration, + LeaseDuration: c.ComponentConfig.GenericComponent.LeaderElection.LeaseDuration.Duration, + RenewDeadline: c.ComponentConfig.GenericComponent.LeaderElection.RenewDeadline.Duration, + RetryPeriod: c.ComponentConfig.GenericComponent.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { @@ -409,8 +409,8 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien return ControllerContext{}, err } - cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider.Name, s.Generic.ComponentConfig.ExternalCloudVolumePlugin, - s.Generic.ComponentConfig.CloudProvider.CloudConfigFile, s.Generic.ComponentConfig.KubeCloudShared.AllowUntaggedCloud, sharedInformers) + cloud, loopMode, err := createCloudProvider(s.ComponentConfig.CloudProvider.Name, s.ComponentConfig.ExternalCloudVolumePlugin, + s.ComponentConfig.CloudProvider.CloudConfigFile, s.ComponentConfig.KubeCloudShared.AllowUntaggedCloud, sharedInformers) if err != nil { return ControllerContext{}, err } @@ -418,7 +418,7 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien ctx := ControllerContext{ ClientBuilder: clientBuilder, InformerFactory: sharedInformers, - ComponentConfig: s.Generic.ComponentConfig, + ComponentConfig: s.ComponentConfig, RESTMapper: restMapper, AvailableResources: availableResources, Cloud: cloud, From 3d20f1a99caaf3019d483d2c2c8fdb3d641661d2 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 16 May 2018 17:05:56 +0800 Subject: [PATCH 031/307] auto generated file --- cmd/cloud-controller-manager/app/config/BUILD | 9 +++- .../app/options/BUILD | 12 +++++ cmd/controller-manager/app/BUILD | 3 -- cmd/controller-manager/app/options/BUILD | 11 ---- cmd/kube-controller-manager/app/config/BUILD | 9 +++- cmd/kube-controller-manager/app/options/BUILD | 11 ++++ .../v1alpha1/zz_generated.conversion.go | 52 +++++++++++++++++++ .../v1alpha1/zz_generated.deepcopy.go | 31 +++++++++++ .../v1alpha1/zz_generated.defaults.go | 12 +++++ .../componentconfig/zz_generated.deepcopy.go | 31 +++++++++++ 10 files changed, 165 insertions(+), 16 deletions(-) diff --git a/cmd/cloud-controller-manager/app/config/BUILD b/cmd/cloud-controller-manager/app/config/BUILD index 5f4ce185b20..157c45435eb 100644 --- a/cmd/cloud-controller-manager/app/config/BUILD +++ b/cmd/cloud-controller-manager/app/config/BUILD @@ -5,7 +5,14 @@ go_library( srcs = ["config.go"], importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config", visibility = ["//visibility:public"], - deps = ["//cmd/controller-manager/app:go_default_library"], + deps = [ + "//cmd/controller-manager/app:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], ) filegroup( diff --git a/cmd/cloud-controller-manager/app/options/BUILD b/cmd/cloud-controller-manager/app/options/BUILD index 71b86717784..7e36ffa8d08 100644 --- a/cmd/cloud-controller-manager/app/options/BUILD +++ b/cmd/cloud-controller-manager/app/options/BUILD @@ -13,12 +13,24 @@ go_library( deps = [ "//cmd/cloud-controller-manager/app/config:go_default_library", "//cmd/controller-manager/app/options:go_default_library", + "//pkg/api/legacyscheme:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/cmd/controller-manager/app/BUILD b/cmd/controller-manager/app/BUILD index d36a9526ec5..c0bd8593975 100644 --- a/cmd/controller-manager/app/BUILD +++ b/cmd/controller-manager/app/BUILD @@ -3,7 +3,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "config.go", "helper.go", "insecure_serving.go", "serve.go", @@ -25,8 +24,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/mux:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/routes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/cmd/controller-manager/app/options/BUILD b/cmd/controller-manager/app/options/BUILD index ea9ff4417df..83b848891f1 100644 --- a/cmd/controller-manager/app/options/BUILD +++ b/cmd/controller-manager/app/options/BUILD @@ -20,7 +20,6 @@ go_library( "namespacecontroller.go", "nodeipamcontroller.go", "nodelifecyclecontroller.go", - "options.go", "persistentvolumebindercontroller.go", "podgccontroller.go", "replicasetcontroller.go", @@ -33,21 +32,11 @@ go_library( visibility = ["//visibility:public"], deps = [ "//cmd/controller-manager/app:go_default_library", - "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/componentconfig:go_default_library", - "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/client/leaderelectionconfig:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/cmd/kube-controller-manager/app/config/BUILD b/cmd/kube-controller-manager/app/config/BUILD index a9c61b0abad..73a13e94f17 100644 --- a/cmd/kube-controller-manager/app/config/BUILD +++ b/cmd/kube-controller-manager/app/config/BUILD @@ -5,7 +5,14 @@ go_library( srcs = ["config.go"], importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/config", visibility = ["//visibility:public"], - deps = ["//cmd/controller-manager/app:go_default_library"], + deps = [ + "//cmd/controller-manager/app:go_default_library", + "//pkg/apis/componentconfig:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + ], ) filegroup( diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index 70d887cc7b0..3bba70af884 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -13,14 +13,25 @@ go_library( deps = [ "//cmd/controller-manager/app/options:go_default_library", "//cmd/kube-controller-manager/app/config:go_default_library", + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/componentconfig:go_default_library", + "//pkg/apis/componentconfig/v1alpha1:go_default_library", "//pkg/controller/garbagecollector:go_default_library", "//pkg/features:go_default_library", "//pkg/master/ports:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index c4b56fc563b..9e6cc33cb32 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -43,6 +43,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_componentconfig_CSRSigningControllerConfiguration_To_v1alpha1_CSRSigningControllerConfiguration, Convert_v1alpha1_ClientConnectionConfiguration_To_componentconfig_ClientConnectionConfiguration, Convert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration, + Convert_v1alpha1_CloudControllerManagerConfiguration_To_componentconfig_CloudControllerManagerConfiguration, + Convert_componentconfig_CloudControllerManagerConfiguration_To_v1alpha1_CloudControllerManagerConfiguration, Convert_v1alpha1_CloudProviderConfiguration_To_componentconfig_CloudProviderConfiguration, Convert_componentconfig_CloudProviderConfiguration_To_v1alpha1_CloudProviderConfiguration, Convert_v1alpha1_DaemonSetControllerConfiguration_To_componentconfig_DaemonSetControllerConfiguration, @@ -184,6 +186,56 @@ func Convert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientCon return autoConvert_componentconfig_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(in, out, s) } +func autoConvert_v1alpha1_CloudControllerManagerConfiguration_To_componentconfig_CloudControllerManagerConfiguration(in *CloudControllerManagerConfiguration, out *componentconfig.CloudControllerManagerConfiguration, s conversion.Scope) error { + if err := Convert_v1alpha1_CloudProviderConfiguration_To_componentconfig_CloudProviderConfiguration(&in.CloudProvider, &out.CloudProvider, s); err != nil { + return err + } + if err := Convert_v1alpha1_DebuggingConfiguration_To_componentconfig_DebuggingConfiguration(&in.Debugging, &out.Debugging, s); err != nil { + return err + } + if err := Convert_v1alpha1_GenericComponentConfiguration_To_componentconfig_GenericComponentConfiguration(&in.GenericComponent, &out.GenericComponent, s); err != nil { + return err + } + if err := Convert_v1alpha1_KubeCloudSharedConfiguration_To_componentconfig_KubeCloudSharedConfiguration(&in.KubeCloudShared, &out.KubeCloudShared, s); err != nil { + return err + } + if err := Convert_v1alpha1_ServiceControllerConfiguration_To_componentconfig_ServiceControllerConfiguration(&in.ServiceController, &out.ServiceController, s); err != nil { + return err + } + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + return nil +} + +// Convert_v1alpha1_CloudControllerManagerConfiguration_To_componentconfig_CloudControllerManagerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_CloudControllerManagerConfiguration_To_componentconfig_CloudControllerManagerConfiguration(in *CloudControllerManagerConfiguration, out *componentconfig.CloudControllerManagerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_CloudControllerManagerConfiguration_To_componentconfig_CloudControllerManagerConfiguration(in, out, s) +} + +func autoConvert_componentconfig_CloudControllerManagerConfiguration_To_v1alpha1_CloudControllerManagerConfiguration(in *componentconfig.CloudControllerManagerConfiguration, out *CloudControllerManagerConfiguration, s conversion.Scope) error { + if err := Convert_componentconfig_CloudProviderConfiguration_To_v1alpha1_CloudProviderConfiguration(&in.CloudProvider, &out.CloudProvider, s); err != nil { + return err + } + if err := Convert_componentconfig_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.Debugging, &out.Debugging, s); err != nil { + return err + } + if err := Convert_componentconfig_GenericComponentConfiguration_To_v1alpha1_GenericComponentConfiguration(&in.GenericComponent, &out.GenericComponent, s); err != nil { + return err + } + if err := Convert_componentconfig_KubeCloudSharedConfiguration_To_v1alpha1_KubeCloudSharedConfiguration(&in.KubeCloudShared, &out.KubeCloudShared, s); err != nil { + return err + } + if err := Convert_componentconfig_ServiceControllerConfiguration_To_v1alpha1_ServiceControllerConfiguration(&in.ServiceController, &out.ServiceController, s); err != nil { + return err + } + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + return nil +} + +// Convert_componentconfig_CloudControllerManagerConfiguration_To_v1alpha1_CloudControllerManagerConfiguration is an autogenerated conversion function. +func Convert_componentconfig_CloudControllerManagerConfiguration_To_v1alpha1_CloudControllerManagerConfiguration(in *componentconfig.CloudControllerManagerConfiguration, out *CloudControllerManagerConfiguration, s conversion.Scope) error { + return autoConvert_componentconfig_CloudControllerManagerConfiguration_To_v1alpha1_CloudControllerManagerConfiguration(in, out, s) +} + func autoConvert_v1alpha1_CloudProviderConfiguration_To_componentconfig_CloudProviderConfiguration(in *CloudProviderConfiguration, out *componentconfig.CloudProviderConfiguration, s conversion.Scope) error { out.Name = in.Name out.CloudConfigFile = in.CloudConfigFile diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index 0103c1a6e3e..45cf5d9afe0 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -74,6 +74,37 @@ func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfigurati return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudControllerManagerConfiguration) DeepCopyInto(out *CloudControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CloudProvider = in.CloudProvider + out.Debugging = in.Debugging + in.GenericComponent.DeepCopyInto(&out.GenericComponent) + in.KubeCloudShared.DeepCopyInto(&out.KubeCloudShared) + out.ServiceController = in.ServiceController + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerConfiguration. +func (in *CloudControllerManagerConfiguration) DeepCopy() *CloudControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(CloudControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudProviderConfiguration) DeepCopyInto(out *CloudProviderConfiguration) { *out = *in diff --git a/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go b/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go index 8c6ac4518be..75fd9b76226 100644 --- a/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/zz_generated.defaults.go @@ -28,6 +28,9 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CloudControllerManagerConfiguration{}, func(obj interface{}) { + SetObjectDefaults_CloudControllerManagerConfiguration(obj.(*CloudControllerManagerConfiguration)) + }) scheme.AddTypeDefaultingFunc(&KubeControllerManagerConfiguration{}, func(obj interface{}) { SetObjectDefaults_KubeControllerManagerConfiguration(obj.(*KubeControllerManagerConfiguration)) }) @@ -35,9 +38,18 @@ func RegisterDefaults(scheme *runtime.Scheme) error { return nil } +func SetObjectDefaults_CloudControllerManagerConfiguration(in *CloudControllerManagerConfiguration) { + SetDefaults_CloudControllerManagerConfiguration(in) + SetDefaults_GenericComponentConfiguration(&in.GenericComponent) + SetDefaults_LeaderElectionConfiguration(&in.GenericComponent.LeaderElection) + SetDefaults_KubeCloudSharedConfiguration(&in.KubeCloudShared) +} + func SetObjectDefaults_KubeControllerManagerConfiguration(in *KubeControllerManagerConfiguration) { SetDefaults_KubeControllerManagerConfiguration(in) + SetDefaults_GenericComponentConfiguration(&in.GenericComponent) SetDefaults_LeaderElectionConfiguration(&in.GenericComponent.LeaderElection) + SetDefaults_KubeCloudSharedConfiguration(&in.KubeCloudShared) SetDefaults_VolumeConfiguration(&in.PersistentVolumeBinderController.VolumeConfiguration) SetDefaults_PersistentVolumeRecyclerConfiguration(&in.PersistentVolumeBinderController.VolumeConfiguration.PersistentVolumeRecyclerConfiguration) } diff --git a/pkg/apis/componentconfig/zz_generated.deepcopy.go b/pkg/apis/componentconfig/zz_generated.deepcopy.go index cf3a2c295f0..c8317d119b3 100644 --- a/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -74,6 +74,37 @@ func (in *ClientConnectionConfiguration) DeepCopy() *ClientConnectionConfigurati return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudControllerManagerConfiguration) DeepCopyInto(out *CloudControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + out.CloudProvider = in.CloudProvider + out.Debugging = in.Debugging + out.GenericComponent = in.GenericComponent + out.KubeCloudShared = in.KubeCloudShared + out.ServiceController = in.ServiceController + out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerConfiguration. +func (in *CloudControllerManagerConfiguration) DeepCopy() *CloudControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(CloudControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloudProviderConfiguration) DeepCopyInto(out *CloudProviderConfiguration) { *out = *in From 55bc8d74a20e0fc018982d810ea5e7e711da2093 Mon Sep 17 00:00:00 2001 From: Yecheng Fu Date: Fri, 18 May 2018 16:55:00 +0800 Subject: [PATCH 032/307] Handle TERM signal to reduce pod terminating time. --- test/e2e/framework/deployment_util.go | 2 +- test/e2e/framework/pv_util.go | 4 ++-- test/e2e/framework/util.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index bd9b50c1bb9..df8f68af86b 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -231,7 +231,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[ // name. A slice of BASH commands can be supplied as args to be run by the pod func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment { if len(command) == 0 { - command = "while true; do sleep 1; done" + command = "trap exit TERM; while true; do sleep 1; done" } zero := int64(0) deploymentName := "deployment-" + string(uuid.NewUUID()) diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index 1952d8d8157..131806ad4ee 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -815,7 +815,7 @@ func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod { // name. A slice of BASH commands can be supplied as args to be run by the pod func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod { if len(command) == 0 { - command = "while true; do sleep 1; done" + command = "trap exit TERM; while true; do sleep 1; done" } podSpec := &v1.Pod{ TypeMeta: metav1.TypeMeta{ @@ -902,7 +902,7 @@ func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.Pers // SELinux testing requires to pass HostIPC and HostPID as booleansi arguments. func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod { if len(command) == 0 { - command = "while true; do sleep 1; done" + command = "trap exit TERM; while true; do sleep 1; done" } podName := "security-context-" + string(uuid.NewUUID()) if fsGroup == nil { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 36d88ef2d21..2824e33271e 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3472,7 +3472,7 @@ func newExecPodSpec(ns, generateName string) *v1.Pod { { Name: "exec", Image: BusyBoxImage, - Command: []string{"sh", "-c", "while true; do sleep 5; done"}, + Command: []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"}, }, }, }, From 5e9e3afb2e7f8623b9bb4fa9c9a0bd299e2a01ee Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Tue, 15 May 2018 16:55:44 +0800 Subject: [PATCH 033/307] kubectl: add aggregation rule support to clusterrole --- hack/make-rules/test-cmd-util.sh | 2 + pkg/kubectl/cmd/create/BUILD | 1 + pkg/kubectl/cmd/create/create_clusterrole.go | 36 ++++++++-- .../cmd/create/create_clusterrole_test.go | 66 +++++++++++++++++++ 4 files changed, 100 insertions(+), 5 deletions(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index bba8d4656cc..c2d6e8392b2 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -3802,6 +3802,8 @@ run_clusterroles_tests() { kubectl create "${kube_flags[@]}" clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/* kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:' kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:' + kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2" + kube::test::get_object_assert clusterrole/aggregation-reader "{{$id_field}}" 'aggregation-reader' # test `kubectl create clusterrolebinding` # test `kubectl set subject clusterrolebinding` diff --git a/pkg/kubectl/cmd/create/BUILD b/pkg/kubectl/cmd/create/BUILD index 131dc94981d..4b54ade35f8 100644 --- a/pkg/kubectl/cmd/create/BUILD +++ b/pkg/kubectl/cmd/create/BUILD @@ -45,6 +45,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", diff --git a/pkg/kubectl/cmd/create/create_clusterrole.go b/pkg/kubectl/cmd/create/create_clusterrole.go index e87e771f120..4f3002549fc 100644 --- a/pkg/kubectl/cmd/create/create_clusterrole.go +++ b/pkg/kubectl/cmd/create/create_clusterrole.go @@ -24,6 +24,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -48,7 +49,10 @@ var ( kubectl create clusterrole foo --verb=get,list,watch --resource=pods,pods/status # Create a ClusterRole name "foo" with NonResourceURL specified - kubectl create clusterrole "foo" --verb=get --non-resource-url=/logs/*`)) + kubectl create clusterrole "foo" --verb=get --non-resource-url=/logs/* + + # Create a ClusterRole name "monitoring" with AggregationRule specified + kubectl create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true"`)) // Valid nonResource verb list for validation. validNonResourceVerbs = []string{"*", "get", "post", "put", "delete", "patch", "head", "options"} @@ -57,12 +61,14 @@ var ( type CreateClusterRoleOptions struct { *CreateRoleOptions NonResourceURLs []string + AggregationRule map[string]string } // ClusterRole is a command to ease creating ClusterRoles. func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { c := &CreateClusterRoleOptions{ CreateRoleOptions: NewCreateRoleOptions(ioStreams), + AggregationRule: map[string]string{}, } cmd := &cobra.Command{ Use: "clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run]", @@ -86,6 +92,7 @@ func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericclioptions.IOSt cmd.Flags().StringSliceVar(&c.NonResourceURLs, "non-resource-url", c.NonResourceURLs, "A partial url that user should have access to.") cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") cmd.Flags().StringArrayVar(&c.ResourceNames, "resource-name", c.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") + cmd.Flags().Var(utilflag.NewMapStringString(&c.AggregationRule), "aggregation-rule", "An aggregation label selector for combining ClusterRoles.") return cmd } @@ -108,6 +115,13 @@ func (c *CreateClusterRoleOptions) Validate() error { return fmt.Errorf("name must be specified") } + if len(c.AggregationRule) > 0 { + if len(c.NonResourceURLs) > 0 || len(c.Verbs) > 0 || len(c.Resources) > 0 || len(c.ResourceNames) > 0 { + return fmt.Errorf("aggregation rule must be specified without nonResourceURLs, verbs, resources or resourceNames") + } + return nil + } + // validate verbs. if len(c.Verbs) == 0 { return fmt.Errorf("at least one verb must be specified") @@ -162,11 +176,23 @@ func (c *CreateClusterRoleOptions) RunCreateRole() error { TypeMeta: metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: "ClusterRole"}, } clusterRole.Name = c.Name - rules, err := generateResourcePolicyRules(c.Mapper, c.Verbs, c.Resources, c.ResourceNames, c.NonResourceURLs) - if err != nil { - return err + + var err error + if len(c.AggregationRule) == 0 { + rules, err := generateResourcePolicyRules(c.Mapper, c.Verbs, c.Resources, c.ResourceNames, c.NonResourceURLs) + if err != nil { + return err + } + clusterRole.Rules = rules + } else { + clusterRole.AggregationRule = &rbacv1.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{ + { + MatchLabels: c.AggregationRule, + }, + }, + } } - clusterRole.Rules = rules // Create ClusterRole. if !c.DryRun { diff --git a/pkg/kubectl/cmd/create/create_clusterrole_test.go b/pkg/kubectl/cmd/create/create_clusterrole_test.go index 031c9435e58..4dc876f9a9e 100644 --- a/pkg/kubectl/cmd/create/create_clusterrole_test.go +++ b/pkg/kubectl/cmd/create/create_clusterrole_test.go @@ -22,6 +22,7 @@ import ( rbac "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" restclient "k8s.io/client-go/rest" @@ -47,6 +48,7 @@ func TestCreateClusterRole(t *testing.T) { resources string nonResourceURL string resourceNames string + aggregationRule string expectedClusterRole *rbac.ClusterRole }{ "test-duplicate-resources": { @@ -130,6 +132,25 @@ func TestCreateClusterRole(t *testing.T) { }, }, }, + "test-aggregation-rules": { + aggregationRule: "foo1=foo2,foo3=foo4", + expectedClusterRole: &rbac.ClusterRole{ + TypeMeta: v1.TypeMeta{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "ClusterRole"}, + ObjectMeta: v1.ObjectMeta{ + Name: clusterRoleName, + }, + AggregationRule: &rbac.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{ + { + MatchLabels: map[string]string{ + "foo1": "foo2", + "foo3": "foo4", + }, + }, + }, + }, + }, + }, } for name, test := range tests { @@ -140,6 +161,7 @@ func TestCreateClusterRole(t *testing.T) { cmd.Flags().Set("verb", test.verbs) cmd.Flags().Set("resource", test.resources) cmd.Flags().Set("non-resource-url", test.nonResourceURL) + cmd.Flags().Set("aggregation-rule", test.aggregationRule) if test.resourceNames != "" { cmd.Flags().Set("resource-name", test.resourceNames) } @@ -433,6 +455,50 @@ func TestClusterRoleValidate(t *testing.T) { }, expectErr: false, }, + "test-aggregation-rule-with-verb": { + clusterRoleOptions: &CreateClusterRoleOptions{ + CreateRoleOptions: &CreateRoleOptions{ + Name: "my-clusterrole", + Verbs: []string{"get"}, + }, + AggregationRule: map[string]string{"foo-key": "foo-vlue"}, + }, + expectErr: true, + }, + "test-aggregation-rule-with-resource": { + clusterRoleOptions: &CreateClusterRoleOptions{ + CreateRoleOptions: &CreateRoleOptions{ + Name: "my-clusterrole", + Resources: []ResourceOptions{ + { + Resource: "replicasets", + SubResource: "scale", + }, + }, + }, + AggregationRule: map[string]string{"foo-key": "foo-vlue"}, + }, + expectErr: true, + }, + "test-aggregation-rule-with-no-resource-url": { + clusterRoleOptions: &CreateClusterRoleOptions{ + CreateRoleOptions: &CreateRoleOptions{ + Name: "my-clusterrole", + }, + NonResourceURLs: []string{"/logs/"}, + AggregationRule: map[string]string{"foo-key": "foo-vlue"}, + }, + expectErr: true, + }, + "test-aggregation-rule": { + clusterRoleOptions: &CreateClusterRoleOptions{ + CreateRoleOptions: &CreateRoleOptions{ + Name: "my-clusterrole", + }, + AggregationRule: map[string]string{"foo-key": "foo-vlue"}, + }, + expectErr: false, + }, } for name, test := range tests { From 080d2dfe8f1e3cf6bad8ef2c5fecbe695ee9d90a Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 17 May 2018 13:48:45 +0200 Subject: [PATCH 034/307] Add SELinux support to CSI --- pkg/volume/csi/csi_mounter.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index 812b7de72e2..53908c76b93 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -252,10 +252,18 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { } func (c *csiMountMgr) GetAttributes() volume.Attributes { + mounter := c.plugin.host.GetMounter(c.plugin.GetPluginName()) + path := c.GetPath() + supportSelinux, err := mounter.GetSELinuxSupport(path) + if err != nil { + glog.V(2).Info(log("error checking for SELinux support: %s", err)) + // Best guess + supportSelinux = false + } return volume.Attributes{ ReadOnly: c.readOnly, Managed: !c.readOnly, - SupportsSELinux: false, + SupportsSELinux: supportSelinux, } } From 0fec56c946b2001420423f329a7d2dc8abd5f72d Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Tue, 15 May 2018 15:19:30 +0200 Subject: [PATCH 035/307] kubeadm: crictl reset commands fixes Signed-off-by: Antonio Murdaca --- cmd/kubeadm/app/cmd/reset.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index 22d7a44126b..81d2cc83cc0 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -221,14 +221,14 @@ func resetWithCrictl(execer utilsexec.Interface, dockerCheck preflight.Checker, if strings.TrimSpace(s) == "" { continue } - params = []string{"-r", criSocketPath, "stop", s} + params = []string{"-r", criSocketPath, "stopp", s} glog.V(1).Infof("[reset] Executing command %s %s", crictlPath, strings.Join(params, " ")) if err := execer.Command(crictlPath, params...).Run(); err != nil { glog.Infof("[reset] failed to stop the running containers using crictl: %v. Trying to use docker instead", err) resetWithDocker(execer, dockerCheck) return } - params = []string{"-r", criSocketPath, "rm", s} + params = []string{"-r", criSocketPath, "rmp", s} glog.V(1).Infof("[reset] Executing command %s %s", crictlPath, strings.Join(params, " ")) if err := execer.Command(crictlPath, params...).Run(); err != nil { glog.Infof("[reset] failed to remove the running containers using crictl: %v. Trying to use docker instead", err) From 9219a762661758190d37a2edfcb8eba925d6f5e1 Mon Sep 17 00:00:00 2001 From: Guoliang Wang Date: Fri, 18 May 2018 22:20:42 +0800 Subject: [PATCH 036/307] remove unused code of (pkg/scheduler) --- .../priorities/selector_spreading_test.go | 1 - pkg/scheduler/core/BUILD | 1 - pkg/scheduler/core/equivalence_cache_test.go | 2 -- pkg/scheduler/core/extender_test.go | 18 +++++++----------- pkg/scheduler/schedulercache/cache_test.go | 1 - 5 files changed, 7 insertions(+), 16 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index bf6fda25635..71b440755fc 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -409,7 +409,6 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { tests := []struct { pod *v1.Pod pods []*v1.Pod - nodes []string rcs []*v1.ReplicationController rss []*extensions.ReplicaSet services []*v1.Service diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index c90aefbee40..8068659c199 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -27,7 +27,6 @@ go_test( "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/scheduler/core/equivalence_cache_test.go b/pkg/scheduler/core/equivalence_cache_test.go index 9654515c2b4..3b33917a14d 100644 --- a/pkg/scheduler/core/equivalence_cache_test.go +++ b/pkg/scheduler/core/equivalence_cache_test.go @@ -635,7 +635,6 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { tests := []struct { podName string nodeName string - predicateKey string equivalenceHashForUpdatePredicate uint64 cachedItem predicateItemType }{ @@ -707,7 +706,6 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { tests := []struct { podName string nodeName string - predicateKey string equivalenceHashForUpdatePredicate uint64 cachedItem predicateItemType }{ diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 80ed8cc31f0..b0e7489312d 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -22,7 +22,6 @@ import ( "time" "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/scheduler/algorithm" @@ -117,7 +116,6 @@ type FakeExtender struct { // Cached node information for fake extender cachedNodeNameToInfo map[string]*schedulercache.NodeInfo - cachedPDBs []*policy.PodDisruptionBudget } func (f *FakeExtender) IsIgnorable() bool { @@ -332,15 +330,13 @@ var _ algorithm.SchedulerExtender = &FakeExtender{} func TestGenericSchedulerWithExtenders(t *testing.T) { tests := []struct { - name string - predicates map[string]algorithm.FitPredicate - prioritizers []algorithm.PriorityConfig - extenders []FakeExtender - extenderPredicates []fitPredicate - extenderPrioritizers []priorityConfig - nodes []string - expectedHost string - expectsErr bool + name string + predicates map[string]algorithm.FitPredicate + prioritizers []algorithm.PriorityConfig + extenders []FakeExtender + nodes []string + expectedHost string + expectsErr bool }{ { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, diff --git a/pkg/scheduler/schedulercache/cache_test.go b/pkg/scheduler/schedulercache/cache_test.go index 748691ffefb..f3a560155ac 100644 --- a/pkg/scheduler/schedulercache/cache_test.go +++ b/pkg/scheduler/schedulercache/cache_test.go @@ -517,7 +517,6 @@ func TestUpdatePod(t *testing.T) { makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 8080, Protocol: "TCP"}}), } tests := []struct { - podsToAssume []*v1.Pod podsToAdd []*v1.Pod podsToUpdate []*v1.Pod From a3593b5aa69f6b654a5fa7a8ef26f83075c67ebe Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Fri, 18 May 2018 10:09:09 -0700 Subject: [PATCH 037/307] Graduate CRIContainerLogRotation to beta --- pkg/features/kube_features.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 3e4c6f269fd..f2287028919 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -248,7 +248,7 @@ const ( TokenRequest utilfeature.Feature = "TokenRequest" // owner: @Random-Liu - // alpha: v1.10 + // beta: v1.11 // // Enable container log rotation for cri container runtime CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation" @@ -322,7 +322,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, TokenRequest: {Default: false, PreRelease: utilfeature.Alpha}, - CRIContainerLogRotation: {Default: false, PreRelease: utilfeature.Alpha}, + CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, From ea057eb9b52e375fff857a04a787dc4c30cfa697 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Fri, 18 May 2018 14:19:26 -0400 Subject: [PATCH 038/307] Fix TestSchedulerWithVolumeBinding to avoid setting predicate ordering. It is causing data race condition as predicate ordering is changing global variable predicatesOrdering. Infact this test does not require any special predicate order and should work on default predicate ordering as far as VolumeScheduling feature is enabled. --- pkg/scheduler/scheduler_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index abd131708bc..ebdb5f19937 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -660,8 +660,6 @@ func makePredicateError(failReason string) error { } func TestSchedulerWithVolumeBinding(t *testing.T) { - order := []string{predicates.CheckVolumeBindingPred, predicates.GeneralPred} - predicates.SetPredicatesOrdering(order) findErr := fmt.Errorf("find err") assumeErr := fmt.Errorf("assume err") bindErr := fmt.Errorf("bind err") From 83509a092f388405b381ce8f099211d101c48865 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Tue, 15 May 2018 16:08:46 -0700 Subject: [PATCH 039/307] Refactor test utils that deal with Kubelet metrics for clarity I found these functions hard to understand, because the names did not accurately reflect their behavior. For example, GetKubeletMetrics assumed that all of the metrics passed in were measuring latency. The caller of GetKubeletMetrics was implicitly making this assumption, but it was not obvious at the call site. --- test/e2e/framework/kubelet_stats.go | 23 +++++++++++++---------- test/e2e_node/eviction_test.go | 6 +++--- test/e2e_node/util.go | 10 ++++++---- 3 files changed, 22 insertions(+), 17 deletions(-) diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index 7bc06edde88..1304cbfcf59 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -97,10 +97,11 @@ func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletM return kubeletMetrics, nil } -// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis -// passed in should not contain subsystem prefix. -func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics { - latencyMethods := sets.NewString( +// GetDefaultKubeletLatencyMetrics calls GetKubeletLatencyMetrics with a set of default metricNames +// identifying common latency metrics. +// Note that the KubeletMetrics passed in should not contain subsystem prefix. +func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics { + latencyMetricNames := sets.NewString( kubeletmetrics.PodWorkerLatencyKey, kubeletmetrics.PodWorkerStartLatencyKey, kubeletmetrics.PodStartLatencyKey, @@ -109,13 +110,15 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics { kubeletmetrics.PodWorkerStartLatencyKey, kubeletmetrics.PLEGRelistLatencyKey, ) - return GetKubeletMetrics(ms, latencyMethods) + return GetKubeletLatencyMetrics(ms, latencyMetricNames) } -func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLatencyMetrics { +// GetKubeletLatencyMetrics filters ms to include only those contained in the metricNames set, +// then constructs a KubeletLatencyMetrics list based on the samples associated with those metrics. +func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics, filterMetricNames sets.String) KubeletLatencyMetrics { var latencyMetrics KubeletLatencyMetrics - for method, samples := range ms { - if !methods.Has(method) { + for name, samples := range ms { + if !filterMetricNames.Has(name) { continue } for _, sample := range samples { @@ -131,7 +134,7 @@ func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLa latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{ Operation: operation, - Method: method, + Method: name, Quantile: quantile, Latency: time.Duration(int64(latency)) * time.Microsecond, }) @@ -265,7 +268,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration if err != nil { return KubeletLatencyMetrics{}, err } - latencyMetrics := GetKubeletLatencyMetrics(ms) + latencyMetrics := GetDefaultKubeletLatencyMetrics(ms) sort.Sort(latencyMetrics) var badMetrics KubeletLatencyMetrics logFunc("\nLatency metrics for node %v", nodeName) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index fb59ca781f2..c93d54dbb32 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -411,7 +411,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe framework.Logf("Node does NOT have %s", expectedNodeCondition) } } - logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) + logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) logFunc() return verifyEvictionOrdering(f, testSpecs) }, pressureTimeout, evictionPollInterval).Should(BeNil()) @@ -425,7 +425,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition)) Eventually(func() error { logFunc() - logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) + logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition) } @@ -438,7 +438,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition) } logFunc() - logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) + logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey) return verifyEvictionOrdering(f, testSpecs) }, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil()) }) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 4df85bcdebb..52ec5a90f41 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -331,17 +331,19 @@ func getLocalNode(f *framework.Framework) *apiv1.Node { return &nodeList.Items[0] } -// logs prometheus metrics from the local kubelet. -func logKubeletMetrics(metricKeys ...string) { +// logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus +// metrics exposed on the current node and identified by the metricNames. +// The Kubelet subsystem prefix is automatically prepended to these metric names. +func logKubeletLatencyMetrics(metricNames ...string) { metricSet := sets.NewString() - for _, key := range metricKeys { + for _, key := range metricNames { metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key) } metric, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255") if err != nil { framework.Logf("Error getting kubelet metrics: %v", err) } else { - framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletMetrics(metric, metricSet)) + framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletLatencyMetrics(metric, metricSet)) } } From afa2a1cfe559400f67d8bd4826a257444e1be4e4 Mon Sep 17 00:00:00 2001 From: Guoliang Wang Date: Sat, 19 May 2018 08:09:39 +0800 Subject: [PATCH 040/307] Fixing wrong unit test naming --- .../podautoscaler/legacy_horizontal_test.go | 28 +++++------ .../legacy_replica_calculator_test.go | 46 +++++++++---------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/pkg/controller/podautoscaler/legacy_horizontal_test.go b/pkg/controller/podautoscaler/legacy_horizontal_test.go index 59d53447073..b6ffc18c312 100644 --- a/pkg/controller/podautoscaler/legacy_horizontal_test.go +++ b/pkg/controller/podautoscaler/legacy_horizontal_test.go @@ -525,7 +525,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) { tc.verifyResults(t) } -func LegacyTestScaleUp(t *testing.T) { +func TestLegacyScaleUp(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -540,7 +540,7 @@ func LegacyTestScaleUp(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpUnreadyLessScale(t *testing.T) { +func TestLegacyScaleUpUnreadyLessScale(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -557,7 +557,7 @@ func LegacyTestScaleUpUnreadyLessScale(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpUnreadyNoScale(t *testing.T) { +func TestLegacyScaleUpUnreadyNoScale(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -574,7 +574,7 @@ func LegacyTestScaleUpUnreadyNoScale(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpDeployment(t *testing.T) { +func TestLegacyScaleUpDeployment(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -594,7 +594,7 @@ func LegacyTestScaleUpDeployment(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpReplicaSet(t *testing.T) { +func TestLegacyScaleUpReplicaSet(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -614,7 +614,7 @@ func LegacyTestScaleUpReplicaSet(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpCM(t *testing.T) { +func TestLegacyScaleUpCM(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -636,7 +636,7 @@ func LegacyTestScaleUpCM(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpCMUnreadyLessScale(t *testing.T) { +func TestLegacyScaleUpCMUnreadyLessScale(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -659,7 +659,7 @@ func LegacyTestScaleUpCMUnreadyLessScale(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { +func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -682,7 +682,7 @@ func LegacyTestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleDown(t *testing.T) { +func TestLegacyScaleDown(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -697,7 +697,7 @@ func LegacyTestScaleDown(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleDownCM(t *testing.T) { +func TestLegacyScaleDownCM(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -719,7 +719,7 @@ func LegacyTestScaleDownCM(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleDownIgnoresUnreadyPods(t *testing.T) { +func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -841,7 +841,7 @@ func LegacyTestMaxReplicas(t *testing.T) { tc.runTest(t) } -func LegacyTestSuperfluousMetrics(t *testing.T) { +func TestLegacySuperfluousMetrics(t *testing.T) { tc := legacyTestCase{ minReplicas: 2, maxReplicas: 6, @@ -1023,7 +1023,7 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleUpRCImmediately(t *testing.T) { +func TestLegacyScaleUpRCImmediately(t *testing.T) { time := metav1.Time{Time: time.Now()} tc := legacyTestCase{ minReplicas: 2, @@ -1039,7 +1039,7 @@ func LegacyTestScaleUpRCImmediately(t *testing.T) { tc.runTest(t) } -func LegacyTestScaleDownRCImmediately(t *testing.T) { +func TestLegacyScaleDownRCImmediately(t *testing.T) { time := metav1.Time{Time: time.Now()} tc := legacyTestCase{ minReplicas: 2, diff --git a/pkg/controller/podautoscaler/legacy_replica_calculator_test.go b/pkg/controller/podautoscaler/legacy_replica_calculator_test.go index 85cc30d35a3..7f3e872a502 100644 --- a/pkg/controller/podautoscaler/legacy_replica_calculator_test.go +++ b/pkg/controller/podautoscaler/legacy_replica_calculator_test.go @@ -227,7 +227,7 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) { } } -func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) { +func TestLegacyReplicaCalcDisjointResourcesMetrics(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 1, expectedError: fmt.Errorf("no metrics returned matched known pods"), @@ -243,7 +243,7 @@ func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleUp(t *testing.T) { +func TestLegacyReplicaCalcScaleUp(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 5, @@ -260,7 +260,7 @@ func LegacyTestReplicaCalcScaleUp(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) { +func TestLegacyReplicaCalcScaleUpUnreadyLessScale(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 4, @@ -278,7 +278,7 @@ func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) { +func TestLegacyReplicaCalcScaleUpUnreadyNoScale(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 3, @@ -296,7 +296,7 @@ func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleUpCM(t *testing.T) { +func TestLegacyReplicaCalcScaleUpCM(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 4, @@ -310,7 +310,7 @@ func LegacyTestReplicaCalcScaleUpCM(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) { +func TestLegacyReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 4, @@ -325,7 +325,7 @@ func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { +func TestLegacyReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 3, @@ -340,7 +340,7 @@ func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleDown(t *testing.T) { +func TestLegacyReplicaCalcScaleDown(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 5, expectedReplicas: 3, @@ -357,7 +357,7 @@ func LegacyTestReplicaCalcScaleDown(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleDownCM(t *testing.T) { +func TestLegacyReplicaCalcScaleDownCM(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 5, expectedReplicas: 3, @@ -371,7 +371,7 @@ func LegacyTestReplicaCalcScaleDownCM(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) { +func TestLegacyReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 5, expectedReplicas: 2, @@ -389,7 +389,7 @@ func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcTolerance(t *testing.T) { +func TestLegacyReplicaCalcTolerance(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 3, @@ -406,7 +406,7 @@ func LegacyTestReplicaCalcTolerance(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcToleranceCM(t *testing.T) { +func TestLegacyReplicaCalcToleranceCM(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 3, @@ -420,7 +420,7 @@ func LegacyTestReplicaCalcToleranceCM(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) { +func TestLegacyReplicaCalcSuperfluousMetrics(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 4, expectedReplicas: 24, @@ -436,7 +436,7 @@ func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetrics(t *testing.T) { +func TestLegacyReplicaCalcMissingMetrics(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 4, expectedReplicas: 3, @@ -453,7 +453,7 @@ func LegacyTestReplicaCalcMissingMetrics(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) { +func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 4, expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"), @@ -468,7 +468,7 @@ func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) { +func TestLegacyReplicaCalcEmptyCPURequest(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 1, expectedError: fmt.Errorf("missing request for"), @@ -483,7 +483,7 @@ func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) { +func TestLegacyReplicaCalcMissingMetricsNoChangeEq(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 2, expectedReplicas: 2, @@ -500,7 +500,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) { +func TestLegacyReplicaCalcMissingMetricsNoChangeGt(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 2, expectedReplicas: 2, @@ -517,7 +517,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) { +func TestLegacyReplicaCalcMissingMetricsNoChangeLt(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 2, expectedReplicas: 2, @@ -534,7 +534,7 @@ func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) { +func TestLegacyReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 3, @@ -552,7 +552,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) { +func TestLegacyReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 3, expectedReplicas: 4, @@ -570,7 +570,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) { tc.runTest(t) } -func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) { +func TestLegacyReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) { tc := legacyReplicaCalcTestCase{ currentReplicas: 4, expectedReplicas: 3, @@ -591,7 +591,7 @@ func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) { // TestComputedToleranceAlgImplementation is a regression test which // back-calculates a minimal percentage for downscaling based on a small percentage // increase in pod utilization which is calibrated against the tolerance value. -func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) { +func TestLegacyReplicaCalcComputedToleranceAlgImplementation(t *testing.T) { startPods := int32(10) // 150 mCPU per pod. From 923fb8ed391e783ede9f46073d64f754e1022bf0 Mon Sep 17 00:00:00 2001 From: Harry Zhang Date: Fri, 18 May 2018 17:17:03 -0700 Subject: [PATCH 041/307] remove knob of equiv class in perf test --- test/integration/scheduler_perf/util.go | 2 +- test/integration/util/util.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index 20abd6f584d..13f71a6c1b8 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -40,7 +40,7 @@ func mustSetupScheduler() (scheduler.Configurator, util.ShutdownFunc) { QPS: 5000.0, Burst: 5000, }) - schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet, true) + schedulerConfig, schedulerShutdown := util.StartScheduler(clientSet) shutdownFunc := func() { schedulerShutdown() diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 9c061091677..705abbfc0c7 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -58,7 +58,7 @@ func StartApiserver() (string, ShutdownFunc) { // StartScheduler configures and starts a scheduler given a handle to the clientSet interface // and event broadcaster. It returns a handle to the configurator for the running scheduler // and the shutdown function to stop it. -func StartScheduler(clientSet clientset.Interface, enableEquivalenceCache bool) (scheduler.Configurator, ShutdownFunc) { +func StartScheduler(clientSet clientset.Interface) (scheduler.Configurator, ShutdownFunc) { informerFactory := informers.NewSharedInformerFactory(clientSet, 0) evtBroadcaster := record.NewBroadcaster() From cd13c41ddec7ed31931823fefcaba68daaf69061 Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Fri, 18 May 2018 18:22:15 -0700 Subject: [PATCH 042/307] Add GET PATCH support for two /status: apiservices/status under apiregistration.k8s.io certificatesigningrequests/status under certificates.k8s.io --- pkg/registry/certificates/certificates/storage/storage.go | 8 ++++++++ .../kube-aggregator/pkg/registry/apiservice/etcd/etcd.go | 8 +++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/registry/certificates/certificates/storage/storage.go b/pkg/registry/certificates/certificates/storage/storage.go index 507269a72ba..378c6f29a0b 100644 --- a/pkg/registry/certificates/certificates/storage/storage.go +++ b/pkg/registry/certificates/certificates/storage/storage.go @@ -19,6 +19,7 @@ package storage import ( "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" @@ -83,11 +84,18 @@ func (r *StatusREST) New() runtime.Object { return &certificates.CertificateSigningRequest{} } +// Get retrieves the object from the storage. It is required to support Patch. +func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + return r.store.Get(ctx, name, options) +} + // Update alters the status subset of an object. func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) } +var _ = rest.Patcher(&StatusREST{}) + // ApprovalREST implements the REST endpoint for changing the approval state of a CSR. type ApprovalREST struct { store *genericregistry.Store diff --git a/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go b/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go index 8eca6be0a25..c7c49072a3b 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/etcd.go @@ -19,6 +19,7 @@ package etcd import ( "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" @@ -66,12 +67,17 @@ type StatusREST struct { store *genericregistry.Store } -var _ = rest.Updater(&StatusREST{}) +var _ = rest.Patcher(&StatusREST{}) func (r *StatusREST) New() runtime.Object { return &apiregistration.APIService{} } +// Get retrieves the object from the storage. It is required to support Patch. +func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { + return r.store.Get(ctx, name, options) +} + // Update alters the status subset of an object. func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) (runtime.Object, bool, error) { return r.store.Update(ctx, name, objInfo, createValidation, updateValidation) From 4e114fd65b57039b3f94edfb7565ef3fb6064be4 Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Fri, 18 May 2018 18:41:57 -0700 Subject: [PATCH 043/307] generated --- api/openapi-spec/swagger.json | 246 ++++++++++++++++ .../certificates.k8s.io_v1beta1.json | 88 ++++++ .../v1beta1/operations.html | 266 +++++++++++++++++- .../v1beta1/definitions.html | 2 +- .../scheduling.k8s.io/v1beta1/operations.html | 2 +- .../certificates/certificates/storage/BUILD | 1 + .../pkg/registry/apiservice/etcd/BUILD | 1 + 7 files changed, 589 insertions(+), 17 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index ce0f8e8a7aa..3bea4f40b8d 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -20984,6 +20984,41 @@ ] }, "/apis/apiregistration.k8s.io/v1/apiservices/{name}/status": { + "get": { + "description": "read status of the specified APIService", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "apiregistration_v1" + ], + "operationId": "readApiregistrationV1APIServiceStatus", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1" + } + }, "put": { "description": "replace status of the specified APIService", "consumes": [ @@ -21035,6 +21070,53 @@ "version": "v1" } }, + "patch": { + "description": "partially update status of the specified APIService", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "apiregistration_v1" + ], + "operationId": "patchApiregistrationV1APIServiceStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1" + } + }, "parameters": [ { "uniqueItems": true, @@ -21793,6 +21875,41 @@ ] }, "/apis/apiregistration.k8s.io/v1beta1/apiservices/{name}/status": { + "get": { + "description": "read status of the specified APIService", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "apiregistration_v1beta1" + ], + "operationId": "readApiregistrationV1beta1APIServiceStatus", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1beta1" + } + }, "put": { "description": "replace status of the specified APIService", "consumes": [ @@ -21844,6 +21961,53 @@ "version": "v1beta1" } }, + "patch": { + "description": "partially update status of the specified APIService", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "apiregistration_v1beta1" + ], + "operationId": "patchApiregistrationV1beta1APIServiceStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIService" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1beta1" + } + }, "parameters": [ { "uniqueItems": true, @@ -44754,6 +44918,41 @@ ] }, "/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/status": { + "get": { + "description": "read status of the specified CertificateSigningRequest", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "certificates_v1beta1" + ], + "operationId": "readCertificatesV1beta1CertificateSigningRequestStatus", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "certificates.k8s.io", + "kind": "CertificateSigningRequest", + "version": "v1beta1" + } + }, "put": { "description": "replace status of the specified CertificateSigningRequest", "consumes": [ @@ -44805,6 +45004,53 @@ "version": "v1beta1" } }, + "patch": { + "description": "partially update status of the specified CertificateSigningRequest", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "certificates_v1beta1" + ], + "operationId": "patchCertificatesV1beta1CertificateSigningRequestStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.certificates.v1beta1.CertificateSigningRequest" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "certificates.k8s.io", + "kind": "CertificateSigningRequest", + "version": "v1beta1" + } + }, "parameters": [ { "uniqueItems": true, diff --git a/api/swagger-spec/certificates.k8s.io_v1beta1.json b/api/swagger-spec/certificates.k8s.io_v1beta1.json index 01c34cfc030..45bfd314456 100644 --- a/api/swagger-spec/certificates.k8s.io_v1beta1.json +++ b/api/swagger-spec/certificates.k8s.io_v1beta1.json @@ -764,6 +764,45 @@ "path": "/apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/status", "description": "API at /apis/certificates.k8s.io/v1beta1", "operations": [ + { + "type": "v1beta1.CertificateSigningRequest", + "method": "GET", + "summary": "read status of the specified CertificateSigningRequest", + "nickname": "readCertificateSigningRequestStatus", + "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "pretty", + "description": "If 'true', then the output is pretty printed.", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "path", + "name": "name", + "description": "name of the CertificateSigningRequest", + "required": true, + "allowMultiple": false + } + ], + "responseMessages": [ + { + "code": 200, + "message": "OK", + "responseModel": "v1beta1.CertificateSigningRequest" + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "consumes": [ + "*/*" + ] + }, { "type": "v1beta1.CertificateSigningRequest", "method": "PUT", @@ -815,6 +854,55 @@ "consumes": [ "*/*" ] + }, + { + "type": "v1beta1.CertificateSigningRequest", + "method": "PATCH", + "summary": "partially update status of the specified CertificateSigningRequest", + "nickname": "patchCertificateSigningRequestStatus", + "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "pretty", + "description": "If 'true', then the output is pretty printed.", + "required": false, + "allowMultiple": false + }, + { + "type": "v1.Patch", + "paramType": "body", + "name": "body", + "description": "", + "required": true, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "path", + "name": "name", + "description": "name of the CertificateSigningRequest", + "required": true, + "allowMultiple": false + } + ], + "responseMessages": [ + { + "code": 200, + "message": "OK", + "responseModel": "v1beta1.CertificateSigningRequest" + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json" + ] } ] }, diff --git a/docs/api-reference/certificates.k8s.io/v1beta1/operations.html b/docs/api-reference/certificates.k8s.io/v1beta1/operations.html index fca601ec992..fff9fff02bf 100755 --- a/docs/api-reference/certificates.k8s.io/v1beta1/operations.html +++ b/docs/api-reference/certificates.k8s.io/v1beta1/operations.html @@ -1549,6 +1549,117 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
+

read status of the specified CertificateSigningRequest

+
+
+
GET /apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/status
+
+
+
+

Parameters

+ ++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameDescriptionRequiredSchemaDefault

QueryParameter

pretty

If true, then the output is pretty printed.

false

string

PathParameter

name

name of the CertificateSigningRequest

true

string

+ +
+
+

Responses

+ +++++ + + + + + + + + + + + + + + +
HTTP CodeDescriptionSchema

200

success

v1beta1.CertificateSigningRequest

+ +
+
+

Consumes

+
+
    +
  • +

    /

    +
  • +
+
+
+
+

Produces

+
+
    +
  • +

    application/json

    +
  • +
  • +

    application/yaml

    +
  • +
  • +

    application/vnd.kubernetes.protobuf

    +
  • +
+
+
+
+

Tags

+
+
    +
  • +

    apiscertificates.k8s.iov1beta1

    +
  • +
+
+
+
+

replace status of the specified CertificateSigningRequest

@@ -1556,7 +1667,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Parameters

+

Parameters

@@ -1606,7 +1717,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Responses

+

Responses

@@ -1636,7 +1747,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Consumes

+

Consumes

  • @@ -1646,7 +1757,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Produces

+

Produces

  • @@ -1662,7 +1773,132 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Tags

+

Tags

+
+
    +
  • +

    apiscertificates.k8s.iov1beta1

    +
  • +
+
+
+ +
+

partially update status of the specified CertificateSigningRequest

+
+
+
PATCH /apis/certificates.k8s.io/v1beta1/certificatesigningrequests/{name}/status
+
+
+
+

Parameters

+
++++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TypeNameDescriptionRequiredSchemaDefault

QueryParameter

pretty

If true, then the output is pretty printed.

false

string

BodyParameter

body

true

v1.Patch

PathParameter

name

name of the CertificateSigningRequest

true

string

+ +
+
+

Responses

+ +++++ + + + + + + + + + + + + + + +
HTTP CodeDescriptionSchema

200

success

v1beta1.CertificateSigningRequest

+ +
+
+

Consumes

+
+
    +
  • +

    application/json-patch+json

    +
  • +
  • +

    application/merge-patch+json

    +
  • +
  • +

    application/strategic-merge-patch+json

    +
  • +
+
+
+
+

Produces

+
+
    +
  • +

    application/json

    +
  • +
  • +

    application/yaml

    +
  • +
  • +

    application/vnd.kubernetes.protobuf

    +
  • +
+
+
+
+

Tags

  • @@ -1680,7 +1916,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Parameters

+

Parameters

@@ -1779,7 +2015,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Responses

+

Responses

@@ -1804,7 +2040,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Consumes

+

Consumes

  • @@ -1814,7 +2050,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Produces

+

Produces

  • @@ -1836,7 +2072,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Tags

+

Tags

  • @@ -1854,7 +2090,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Parameters

+

Parameters

@@ -1961,7 +2197,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Responses

+

Responses

@@ -1986,7 +2222,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Consumes

+

Consumes

  • @@ -1996,7 +2232,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Produces

+

Produces

  • @@ -2018,7 +2254,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
-

Tags

+

Tags

  • diff --git a/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html b/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html index f1dd21bda95..4b8adef5819 100755 --- a/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html +++ b/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html @@ -1368,7 +1368,7 @@ Examples:
diff --git a/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html b/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html index c70d1a259d2..8c9c1566c9d 100755 --- a/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html +++ b/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html @@ -1785,7 +1785,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
diff --git a/pkg/registry/certificates/certificates/storage/BUILD b/pkg/registry/certificates/certificates/storage/BUILD index 7889ede2652..35113cdd6ae 100644 --- a/pkg/registry/certificates/certificates/storage/BUILD +++ b/pkg/registry/certificates/certificates/storage/BUILD @@ -15,6 +15,7 @@ go_library( "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", "//pkg/registry/certificates/certificates:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD index ea90ecbf0e5..9e051a3830e 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd/BUILD @@ -10,6 +10,7 @@ go_library( srcs = ["etcd.go"], importpath = "k8s.io/kube-aggregator/pkg/registry/apiservice/etcd", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", From eb6bd67446db0fc9323d817a1dcad28c1d67071f Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sat, 19 May 2018 16:52:00 -0400 Subject: [PATCH 044/307] Bump grpc max message size for docker service --- pkg/kubelet/dockershim/remote/docker_server.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/kubelet/dockershim/remote/docker_server.go b/pkg/kubelet/dockershim/remote/docker_server.go index 53f52526e94..5e3fb14544a 100644 --- a/pkg/kubelet/dockershim/remote/docker_server.go +++ b/pkg/kubelet/dockershim/remote/docker_server.go @@ -28,6 +28,10 @@ import ( "k8s.io/kubernetes/pkg/util/interrupt" ) +// maxMsgSize use 8MB as the default message size limit. +// grpc library default is 4MB +const maxMsgSize = 1024 * 1024 * 8 + // DockerServer is the grpc server of dockershim. type DockerServer struct { // endpoint is the endpoint to serve on. @@ -60,7 +64,10 @@ func (s *DockerServer) Start() error { return fmt.Errorf("failed to listen on %q: %v", s.endpoint, err) } // Create the grpc server and register runtime and image services. - s.server = grpc.NewServer() + s.server = grpc.NewServer( + grpc.MaxRecvMsgSize(maxMsgSize), + grpc.MaxSendMsgSize(maxMsgSize), + ) runtimeapi.RegisterRuntimeServiceServer(s.server, s.service) runtimeapi.RegisterImageServiceServer(s.server, s.service) go func() { From 73f4cd89af99e0b3f5e39d6c90ba90e47532c629 Mon Sep 17 00:00:00 2001 From: Ibrahim AshShohail Date: Sun, 20 May 2018 02:22:09 +0300 Subject: [PATCH 045/307] Fix error message in Equalities.DeepEqual Signed-off-by: Ibrahim AshShohail --- .../third_party/forked/golang/reflect/deep_equal.go | 2 +- third_party/forked/golang/reflect/deep_equal.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go index 9e45dbe1d21..7ed1d1cffec 100644 --- a/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go +++ b/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -44,7 +44,7 @@ func (e Equalities) AddFunc(eqFunc interface{}) error { return fmt.Errorf("expected func, got: %v", ft) } if ft.NumIn() != 2 { - return fmt.Errorf("expected three 'in' params, got: %v", ft) + return fmt.Errorf("expected two 'in' params, got: %v", ft) } if ft.NumOut() != 1 { return fmt.Errorf("expected one 'out' param, got: %v", ft) diff --git a/third_party/forked/golang/reflect/deep_equal.go b/third_party/forked/golang/reflect/deep_equal.go index 9e45dbe1d21..7ed1d1cffec 100644 --- a/third_party/forked/golang/reflect/deep_equal.go +++ b/third_party/forked/golang/reflect/deep_equal.go @@ -44,7 +44,7 @@ func (e Equalities) AddFunc(eqFunc interface{}) error { return fmt.Errorf("expected func, got: %v", ft) } if ft.NumIn() != 2 { - return fmt.Errorf("expected three 'in' params, got: %v", ft) + return fmt.Errorf("expected two 'in' params, got: %v", ft) } if ft.NumOut() != 1 { return fmt.Errorf("expected one 'out' param, got: %v", ft) From 647e90341ca08640ab8fb3d49edb8027faf4836f Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 3 May 2018 11:05:33 -0700 Subject: [PATCH 046/307] Kubelet config: Validate new config against future feature gates This fixes an issue with KubeletConfiguration validation, where the feature gates set by the new config were not taken into account. Also fixes a validation issue with dynamic Kubelet config, where flag precedence was not enforced prior to dynamic config validation in the controller; this prevented rejection of dynamic configs that don't merge well with values set via legacy flags. --- cmd/kubelet/app/BUILD | 1 + cmd/kubelet/app/server.go | 40 +++++++++---------- .../kubeletconfig/validation/validation.go | 7 +++- pkg/kubelet/kubeletconfig/controller.go | 33 ++++++++++++++- .../pkg/util/feature/feature_gate.go | 39 ++++++++++++++++++ 5 files changed, 95 insertions(+), 25 deletions(-) diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index 1eadc2c6e90..af2fcc09ad5 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -84,6 +84,7 @@ go_library( "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", + "//pkg/kubelet/apis/kubeletconfig/validation:go_default_library", "//pkg/kubelet/cadvisor:go_default_library", "//pkg/kubelet/certificate:go_default_library", "//pkg/kubelet/certificate/bootstrap:go_default_library", diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 7adfb77b7d2..2c0e47f99ed 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -68,6 +68,7 @@ import ( kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" + kubeletconfigvalidation "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" "k8s.io/kubernetes/pkg/kubelet/cadvisor" kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate" "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap" @@ -198,43 +199,38 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API } } - // TODO(#63305): always validate the combination of the local config file and flags, this is the fallback - // when the dynamic config controller tells us to use local config (this can be fixed alongside other validation fixes). + // We always validate the local configuration (command line + config file). + // This is the default "last-known-good" config for dynamic config, and must always remain valid. + if err := kubeletconfigvalidation.ValidateKubeletConfiguration(kubeletConfig); err != nil { + glog.Fatal(err) + } // use dynamic kubelet config, if enabled var kubeletConfigController *dynamickubeletconfig.Controller if dynamicConfigDir := kubeletFlags.DynamicConfigDir.Value(); len(dynamicConfigDir) > 0 { var dynamicKubeletConfig *kubeletconfiginternal.KubeletConfiguration - dynamicKubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(dynamicConfigDir) + dynamicKubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(dynamicConfigDir, + func(kc *kubeletconfiginternal.KubeletConfiguration) error { + // Here, we enforce flag precedence inside the controller, prior to the controller's validation sequence, + // so that we get a complete validation at the same point where we can decide to reject dynamic config. + // This fixes the flag-precedence component of issue #63305. + // See issue #56171 for general details on flag precedence. + return kubeletConfigFlagPrecedence(kc, args) + }) if err != nil { glog.Fatal(err) } // If we should just use our existing, local config, the controller will return a nil config if dynamicKubeletConfig != nil { kubeletConfig = dynamicKubeletConfig - // We must enforce flag precedence by re-parsing the command line into the new object. - // This is necessary to preserve backwards-compatibility across binary upgrades. - // See issue #56171 for more details. - if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil { - glog.Fatal(err) - } - // update feature gates based on new config + // Note: flag precedence was already enforced in the controller, prior to validation, + // by our above transform function. Now we simply update feature gates from the new config. if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { glog.Fatal(err) } } } - // TODO(#63305): need to reconcile that validation performed inside the dynamic config controller - // will happen against currently set feature gates, rather than future adjustments from combination of files - // and flags. There's a potential scenario where a valid config (because it sets new gates) is considered - // invalid against current gates (at least until --feature-gates flag is removed). - // We should validate against the combination of current feature gates, overrides from feature gates in the file, - // and overrides from feature gates set via flags, rather than currently set feature gates. - // Once the --feature-gates flag is removed, we should strictly validate against the combination of current - // feature gates and feature gates in the file (always need to validate against the combo, because feature-gates - // can layer between the file and dynamic config right now - though maybe we should change this). - // construct a KubeletServer from kubeletFlags and kubeletConfig kubeletServer := &options.KubeletServer{ KubeletFlags: *kubeletFlags, @@ -1108,7 +1104,7 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) { } // BootstrapKubeletConfigController constructs and bootstrap a configuration controller -func BootstrapKubeletConfigController(dynamicConfigDir string) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) { +func BootstrapKubeletConfigController(dynamicConfigDir string, transform dynamickubeletconfig.TransformFunc) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { return nil, nil, fmt.Errorf("failed to bootstrap Kubelet config controller, you must enable the DynamicKubeletConfig feature gate") } @@ -1122,7 +1118,7 @@ func BootstrapKubeletConfigController(dynamicConfigDir string) (*kubeletconfigin return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir=%s", dynamicConfigDir) } // get the latest KubeletConfiguration checkpoint from disk, or return the default config if no valid checkpoints exist - c := dynamickubeletconfig.NewController(dir) + c := dynamickubeletconfig.NewController(dir, transform) kc, err := c.Bootstrap() if err != nil { return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %v", err) diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation.go b/pkg/kubelet/apis/kubeletconfig/validation/validation.go index e060ee46555..ab3bc4e14b4 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/validation.go +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation.go @@ -31,6 +31,11 @@ import ( func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error { allErrors := []error{} + // Make a local copy of the global feature gates and combine it with the gates set by this configuration. + // This allows us to validate the config against the set of gates it will actually run against. + localFeatureGate := utilfeature.DefaultFeatureGate.DeepCopy() + localFeatureGate.SetFromMap(kc.FeatureGates) + if !kc.CgroupsPerQOS && len(kc.EnforceNodeAllocatable) > 0 { allErrors = append(allErrors, fmt.Errorf("invalid configuration: EnforceNodeAllocatable (--enforce-node-allocatable) is not supported unless CgroupsPerQOS (--cgroups-per-qos) feature is turned on")) } @@ -88,7 +93,7 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error if kc.RegistryPullQPS < 0 { allErrors = append(allErrors, fmt.Errorf("invalid configuration: RegistryPullQPS (--registry-qps) %v must not be a negative number", kc.RegistryPullQPS)) } - if kc.ServerTLSBootstrap && !utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) { + if kc.ServerTLSBootstrap && !localFeatureGate.Enabled(features.RotateKubeletServerCertificate) { allErrors = append(allErrors, fmt.Errorf("invalid configuration: ServerTLSBootstrap %v requires feature gate RotateKubeletServerCertificate", kc.ServerTLSBootstrap)) } for _, val := range kc.EnforceNodeAllocatable { diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index f665745ab26..85b2ae2dab2 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -43,9 +43,22 @@ const ( configTrialDuration = 10 * time.Minute ) +// TransformFunc edits the KubeletConfiguration in-place, and returns an +// error if any of the transformations failed. +type TransformFunc func(kc *kubeletconfig.KubeletConfiguration) error + // Controller manages syncing dynamic Kubelet configurations // For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/dynamic-kubelet-configuration.md type Controller struct { + // transform applies an arbitrary transformation to config after loading, and before validation. + // This can be used, for example, to include config from flags before the controller's validation step. + // If transform returns an error, loadConfig will fail, and an InternalError will be reported. + // Be wary if using this function as an extension point, in most cases the controller should + // probably just be natively extended to do what you need. Injecting flag precedence transformations + // is something of an exception because the caller of this controller (cmd/) is aware of flags, but this + // controller's tree (pkg/) is not. + transform TransformFunc + // pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server pendingConfigSource chan bool @@ -59,9 +72,17 @@ type Controller struct { checkpointStore store.Store } -// NewController constructs a new Controller object and returns it. Directory paths must be absolute. -func NewController(dynamicConfigDir string) *Controller { +// NewController constructs a new Controller object and returns it. The dynamicConfigDir +// path must be absolute. transform applies an arbitrary transformation to config after loading, and before validation. +// This can be used, for example, to include config from flags before the controller's validation step. +// If transform returns an error, loadConfig will fail, and an InternalError will be reported. +// Be wary if using this function as an extension point, in most cases the controller should +// probably just be natively extended to do what you need. Injecting flag precedence transformations +// is something of an exception because the caller of this controller (cmd/) is aware of flags, but this +// controller's tree (pkg/) is not. +func NewController(dynamicConfigDir string, transform TransformFunc) *Controller { return &Controller{ + transform: transform, // channels must have capacity at least 1, since we signal with non-blocking writes pendingConfigSource: make(chan bool, 1), configStatus: status.NewNodeConfigStatus(), @@ -71,6 +92,7 @@ func NewController(dynamicConfigDir string) *Controller { // Bootstrap attempts to return a valid KubeletConfiguration based on the configuration of the Controller, // or returns an error if no valid configuration could be produced. Bootstrap should be called synchronously before StartSync. +// If the pre-existing local configuration should be used, Bootstrap returns a nil config. func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { utillog.Infof("starting controller") @@ -194,6 +216,13 @@ func (cc *Controller) loadConfig(source checkpoint.RemoteConfigSource) (*kubelet if err != nil { return nil, status.LoadError, err } + // apply any required transformations to the KubeletConfiguration + if cc.transform != nil { + if err := cc.transform(kc); err != nil { + return nil, status.InternalError, err + } + } + // validate the result if err := validation.ValidateKubeletConfiguration(kc); err != nil { return nil, status.ValidateError, err } diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go index 30687712489..fe35adc664e 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -88,6 +88,10 @@ type FeatureGate interface { Add(features map[Feature]FeatureSpec) error // KnownFeatures returns a slice of strings describing the FeatureGate's known features. KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() FeatureGate } // featureGate implements FeatureGate as well as pflag.Value for flag parsing. @@ -284,6 +288,10 @@ func (f *featureGate) Enabled(key Feature) bool { // AddFlag adds a flag for setting global feature gates to the specified FlagSet. func (f *featureGate) AddFlag(fs *pflag.FlagSet) { f.lock.Lock() + // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? + // Not all components expose a feature gates flag using this AddFlag method, and + // in the future, all components will completely stop exposing a feature gates flag, + // in favor of componentconfig. f.closed = true f.lock.Unlock() @@ -306,3 +314,34 @@ func (f *featureGate) KnownFeatures() []string { sort.Strings(known) return known } + +// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be +// set on the copy without mutating the original. This is useful for validating +// config against potential feature gate changes before committing those changes. +func (f *featureGate) DeepCopy() FeatureGate { + // Copy existing state. + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + // Store copied state in new atomics. + knownValue := &atomic.Value{} + knownValue.Store(known) + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + // Construct a new featureGate around the copied state. + // Note that specialFeatures is treated as immutable by convention, + // and we maintain the value of f.closed across the copy. + return &featureGate{ + special: specialFeatures, + known: knownValue, + enabled: enabledValue, + closed: f.closed, + } +} From 188b2bfabd5e1c9c8ceb52b778e054acc33d52e2 Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Mon, 21 May 2018 10:33:49 +0800 Subject: [PATCH 047/307] remove one duplicated unit test --- pkg/kubectl/cmd/get/get_test.go | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/pkg/kubectl/cmd/get/get_test.go b/pkg/kubectl/cmd/get/get_test.go index e094463cdd9..05fc905673f 100644 --- a/pkg/kubectl/cmd/get/get_test.go +++ b/pkg/kubectl/cmd/get/get_test.go @@ -480,33 +480,6 @@ bar 0/0 0 } } -func TestGetAllListObjects(t *testing.T) { - pods, _, _ := testData() - - tf := cmdtesting.NewTestFactory() - defer tf.Cleanup() - codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - - tf.UnstructuredClient = &fake.RESTClient{ - NegotiatedSerializer: unstructuredSerializer, - Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, - } - tf.Namespace = "test" - - streams, _, buf, _ := genericclioptions.NewTestIOStreams() - cmd := NewCmdGet("kubectl", tf, streams) - cmd.SetOutput(buf) - cmd.Run(cmd, []string{"pods"}) - - expected := `NAME READY STATUS RESTARTS AGE -foo 0/0 0 -bar 0/0 0 -` - if e, a := expected, buf.String(); e != a { - t.Errorf("expected %v, got %v", e, a) - } -} - func TestGetListComponentStatus(t *testing.T) { statuses := testComponentStatusData() From da23396e22fcc4f55000cf82309334f17bfbebac Mon Sep 17 00:00:00 2001 From: Tomoe Sugihara Date: Mon, 21 May 2018 11:38:41 +0900 Subject: [PATCH 048/307] Dump Stack when docker fails on healthcheck Send SIGUSR1 to dockerd to save stack dump of docker daemon in order to be able to investigate why docker daemon was unresposive to health check done by `docker ps`. See https://github.com/moby/moby/blob/master/daemon/daemon.go on how docker sets up a trap for SIGUSR1 with `setupDumpStackTrap()` --- cluster/gce/gci/health-monitor.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cluster/gce/gci/health-monitor.sh b/cluster/gce/gci/health-monitor.sh index cbf96dd3142..b2bca3d2a8b 100644 --- a/cluster/gce/gci/health-monitor.sh +++ b/cluster/gce/gci/health-monitor.sh @@ -50,6 +50,12 @@ function container_runtime_monitoring { while true; do if ! timeout 60 ${healthcheck_command} > /dev/null; then echo "Container runtime ${container_runtime_name} failed!" + if [[ "$container_runtime_name" == "docker" ]]; then + # Dump stack of docker daemon for investigation. + # Log fle name looks like goroutine-stacks-TIMESTAMP and will be saved to + # the exec root directory, which is /var/run/docker/ on Ubuntu and COS. + pkill -SIGUSR1 dockerd + fi systemctl kill --kill-who=main "${container_runtime_name}" # Wait for a while, as we don't want to kill it again before it is really up. sleep 120 From e979b1698779b49002c3cffca70b05059773603d Mon Sep 17 00:00:00 2001 From: mbohlool Date: Tue, 6 Feb 2018 04:10:18 -0800 Subject: [PATCH 049/307] Fix cyclic dependency of apiserver test for OpenAPI test --- .../pkg/server/genericapiserver_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go index b68b30982e7..4b6e7b7ddad 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go @@ -31,7 +31,6 @@ import ( "testing" "time" - // "github.com/go-openapi/spec" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,6 +49,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" restclient "k8s.io/client-go/rest" + kubeopenapi "k8s.io/kube-openapi/pkg/common" ) const ( @@ -77,6 +77,12 @@ func init() { examplev1.AddToScheme(scheme) } +func testGetOpenAPIDefinitions(_ kubeopenapi.ReferenceCallback) map[string]kubeopenapi.OpenAPIDefinition { + return map[string]kubeopenapi.OpenAPIDefinition{ + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": {}, + } +} + // setUp is a convience function for setting up for (most) tests. func setUp(t *testing.T) (Config, *assert.Assertions) { config := NewConfig(codecs) @@ -89,14 +95,8 @@ func setUp(t *testing.T) (Config, *assert.Assertions) { t.Fatal("unable to create fake client set") } - // TODO restore this test, but right now, eliminate our cycle - // config.OpenAPIConfig = DefaultOpenAPIConfig(testGetOpenAPIDefinitions, runtime.NewScheme()) - // config.OpenAPIConfig.Info = &spec.Info{ - // InfoProps: spec.InfoProps{ - // Title: "Kubernetes", - // Version: "unversioned", - // }, - // } + config.OpenAPIConfig = DefaultOpenAPIConfig(testGetOpenAPIDefinitions, runtime.NewScheme()) + config.OpenAPIConfig.Info.Version = "unversioned" config.SwaggerConfig = DefaultSwaggerConfig() sharedInformers := informers.NewSharedInformerFactory(clientset, config.LoopbackClientConfig.Timeout) config.Complete(sharedInformers) From 9e94cf72a8218911ea5e90a50a3d403755edbb04 Mon Sep 17 00:00:00 2001 From: mbohlool Date: Mon, 12 Feb 2018 11:17:16 -0800 Subject: [PATCH 050/307] Update bazel --- staging/src/k8s.io/apiserver/pkg/server/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/staging/src/k8s.io/apiserver/pkg/server/BUILD b/staging/src/k8s.io/apiserver/pkg/server/BUILD index fc925bfe3b0..96dbb82a22b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/BUILD @@ -33,6 +33,7 @@ go_test( "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) From 4f0020d1b4caec992460859ceb792f1b785a85fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sat, 19 May 2018 15:49:28 +0100 Subject: [PATCH 051/307] Don't support marshalling using the v1alpha1 version in kubeadm v1.11 --- .../app/util/config/masterconfig_test.go | 17 +--- ...defaulted_v1alpha2.yaml => defaulted.yaml} | 3 - .../defaulting/master/defaulted_v1alpha1.yaml | 78 ------------------- 3 files changed, 2 insertions(+), 96 deletions(-) rename cmd/kubeadm/app/util/config/testdata/defaulting/master/{defaulted_v1alpha2.yaml => defaulted.yaml} (98%) delete mode 100644 cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha1.yaml diff --git a/cmd/kubeadm/app/util/config/masterconfig_test.go b/cmd/kubeadm/app/util/config/masterconfig_test.go index 8b3409399cb..ee46de4cd14 100644 --- a/cmd/kubeadm/app/util/config/masterconfig_test.go +++ b/cmd/kubeadm/app/util/config/masterconfig_test.go @@ -39,8 +39,7 @@ const ( master_v1alpha2YAML = "testdata/conversion/master/v1alpha2.yaml" master_internalYAML = "testdata/conversion/master/internal.yaml" master_incompleteYAML = "testdata/defaulting/master/incomplete.yaml" - master_defaultedv1alpha1YAML = "testdata/defaulting/master/defaulted_v1alpha1.yaml" - master_defaultedv1alpha2YAML = "testdata/defaulting/master/defaulted_v1alpha2.yaml" + master_defaultedYAML = "testdata/defaulting/master/defaulted.yaml" master_invalidYAML = "testdata/validation/invalid_mastercfg.yaml" master_beforeUpgradeYAML = "testdata/v1alpha1_upgrade/before.yaml" master_afterUpgradeYAML = "testdata/v1alpha1_upgrade/after.yaml" @@ -79,12 +78,6 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) { out: master_internalYAML, groupVersion: kubeadm.SchemeGroupVersion, }, - { // v1alpha1 (faulty) -> internal -> v1alpha1 - name: "v1alpha1WithoutTypeMetaTov1alpha1", - in: master_v1alpha1WithoutTypeMetaYAML, - out: master_v1alpha1YAML, - groupVersion: v1alpha1.SchemeGroupVersion, - }, { // v1alpha2 -> internal name: "v1alpha2ToInternal", in: master_v1alpha2YAML, @@ -105,16 +98,10 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) { }, // These tests are reading one file that has only a subset of the fields populated, loading it using ConfigFileAndDefaultsToInternalConfig, // and then marshals the internal object to the expected groupVersion - { // v1alpha1 (faulty) -> default -> validate -> internal -> v1alpha1 - name: "incompleteYAMLToDefaultedv1alpha1", - in: master_incompleteYAML, - out: master_defaultedv1alpha1YAML, - groupVersion: v1alpha1.SchemeGroupVersion, - }, { // v1alpha1 (faulty) -> default -> validate -> internal -> v1alpha2 name: "incompleteYAMLToDefaultedv1alpha2", in: master_incompleteYAML, - out: master_defaultedv1alpha2YAML, + out: master_defaultedYAML, groupVersion: v1alpha2.SchemeGroupVersion, }, { // v1alpha1 (faulty) -> validation should fail diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml similarity index 98% rename from cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha2.yaml rename to cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index ee133e25dc6..09506810936 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -7,9 +7,6 @@ auditPolicy: logDir: /var/log/kubernetes/audit logMaxAge: 2 path: "" -authorizationModes: -- Node -- RBAC certificatesDir: /var/lib/kubernetes/pki clusterName: kubernetes criSocket: /var/run/criruntime.sock diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha1.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha1.yaml deleted file mode 100644 index e36204f99fb..00000000000 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted_v1alpha1.yaml +++ /dev/null @@ -1,78 +0,0 @@ -api: - advertiseAddress: 192.168.2.2 - bindPort: 6443 - controlPlaneEndpoint: "" -apiVersion: kubeadm.k8s.io/v1alpha1 -auditPolicy: - logDir: /var/log/kubernetes/audit - logMaxAge: 2 - path: "" -authorizationModes: -- Node -- RBAC -certificatesDir: /var/lib/kubernetes/pki -cloudProvider: "" -clusterName: kubernetes -criSocket: /var/run/criruntime.sock -etcd: - caFile: "" - certFile: "" - dataDir: /var/lib/etcd - endpoints: null - image: "" - keyFile: "" -imageRepository: my-company.com -kind: MasterConfiguration -kubeProxy: - config: - bindAddress: 0.0.0.0 - clientConnection: - acceptContentTypes: "" - burst: 10 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: /var/lib/kube-proxy/kubeconfig.conf - qps: 5 - clusterCIDR: "" - configSyncPeriod: 15m0s - conntrack: - max: null - maxPerCore: 32768 - min: 131072 - tcpCloseWaitTimeout: 1h0m0s - tcpEstablishedTimeout: 24h0m0s - enableProfiling: false - healthzBindAddress: 0.0.0.0:10256 - hostnameOverride: "" - iptables: - masqueradeAll: false - masqueradeBit: 14 - minSyncPeriod: 0s - syncPeriod: 30s - ipvs: - ExcludeCIDRs: null - minSyncPeriod: 0s - scheduler: "" - syncPeriod: 30s - metricsBindAddress: 127.0.0.1:10249 - mode: "" - nodePortAddresses: null - oomScoreAdj: -999 - portRange: "" - resourceContainer: /kube-proxy - udpIdleTimeout: 250ms -kubeletConfiguration: {} -kubernetesVersion: v1.10.2 -networking: - dnsDomain: cluster.global - podSubnet: "" - serviceSubnet: 10.196.0.0/12 -nodeName: master-1 -privilegedPods: false -token: s73ybu.6tw6wnqgp5z0wb77 -tokenGroups: -- system:bootstrappers:kubeadm:default-node-token -tokenTTL: 24h0m0s -tokenUsages: -- signing -- authentication -unifiedControlPlaneImage: "" From 5687f652db97504c5278732c69c6bbc968da5261 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 21 May 2018 08:49:12 +0300 Subject: [PATCH 052/307] kubeadm: Remove .AuthorizationModes in the v1alpha2 API --- cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 1 - cmd/kubeadm/app/apis/kubeadm/types.go | 4 --- .../app/apis/kubeadm/v1alpha1/conversion.go | 15 ++++++++ .../app/apis/kubeadm/v1alpha2/defaults.go | 7 ---- .../app/apis/kubeadm/v1alpha2/types.go | 4 --- .../app/apis/kubeadm/validation/validation.go | 31 ----------------- .../kubeadm/validation/validation_test.go | 34 ------------------- cmd/kubeadm/app/cmd/init.go | 1 - cmd/kubeadm/app/constants/constants.go | 5 --- .../app/phases/upgrade/staticpods_test.go | 6 ++-- cmd/kubeadm/app/preflight/checks.go | 11 ------ .../app/util/config/masterconfig_test.go | 2 +- .../testdata/conversion/master/internal.yaml | 6 ++-- .../testdata/conversion/master/v1alpha1.yaml | 1 + .../master/v1alpha1_without_TypeMeta.yaml | 1 + .../testdata/conversion/master/v1alpha2.yaml | 5 ++- 16 files changed, 25 insertions(+), 109 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index 89d8cd4fc82..a8e8c8ce447 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -39,7 +39,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.API.AdvertiseAddress = "foo" obj.Networking.ServiceSubnet = "foo" obj.Networking.DNSDomain = "foo" - obj.AuthorizationModes = []string{"foo"} obj.CertificatesDir = "foo" obj.APIServerCertSANs = []string{"foo"} obj.Etcd.ServerCertSANs = []string{"foo"} diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index 8ffdfb2c497..f3b9df49138 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -45,10 +45,6 @@ type MasterConfiguration struct { // NodeName is the name of the node that will host the k8s control plane. // Defaults to the hostname if not provided. NodeName string - // AuthorizationModes is a set of authorization modes used inside the cluster. - // If not specified, defaults to Node and RBAC, meaning both the node - // authorizer and RBAC are enabled. - AuthorizationModes []string // NoTaintMaster will, if set, suppress the tainting of the // master node allowing workloads to be run on it (e.g. in // single node configurations). diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go index 814ad8b0ed7..9baad9d1d42 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go @@ -17,6 +17,9 @@ limitations under the License. package v1alpha1 import ( + "reflect" + "strings" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -41,6 +44,7 @@ func Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in *Mas } UpgradeCloudProvider(in, out) + UpgradeAuthorizationModes(in, out) // We don't support migrating information from the .PrivilegedPods field which was removed in v1alpha2 return nil @@ -69,3 +73,14 @@ func UpgradeCloudProvider(in *MasterConfiguration, out *kubeadm.MasterConfigurat out.ControllerManagerExtraArgs["cloud-provider"] = in.CloudProvider } } + +func UpgradeAuthorizationModes(in *MasterConfiguration, out *kubeadm.MasterConfiguration) { + // If .AuthorizationModes was set to something else than the default, preserve the information via extraargs + if !reflect.DeepEqual(in.AuthorizationModes, strings.Split(DefaultAuthorizationModes, ",")) { + + if out.APIServerExtraArgs == nil { + out.APIServerExtraArgs = map[string]string{} + } + out.APIServerExtraArgs["authorization-mode"] = strings.Join(in.AuthorizationModes, ",") + } +} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go index ca5fe1cc748..266f0033a9b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go @@ -18,7 +18,6 @@ package v1alpha2 import ( "net/url" - "strings" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,8 +41,6 @@ const ( DefaultKubernetesVersion = "stable-1.10" // DefaultAPIBindPort defines default API port DefaultAPIBindPort = 6443 - // DefaultAuthorizationModes defines default authorization modes - DefaultAuthorizationModes = "Node,RBAC" // DefaultCertificatesDir defines default certificate directory DefaultCertificatesDir = "/etc/kubernetes/pki" // DefaultImageRepository defines default image registry @@ -96,10 +93,6 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { obj.Networking.DNSDomain = DefaultServiceDNSDomain } - if len(obj.AuthorizationModes) == 0 { - obj.AuthorizationModes = strings.Split(DefaultAuthorizationModes, ",") - } - if obj.CertificatesDir == "" { obj.CertificatesDir = DefaultCertificatesDir } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go index 1a34dc7d8ae..dadaab24352 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go @@ -45,10 +45,6 @@ type MasterConfiguration struct { // NodeName is the name of the node that will host the k8s control plane. // Defaults to the hostname if not provided. NodeName string `json:"nodeName"` - // AuthorizationModes is a set of authorization modes used inside the cluster. - // If not specified, defaults to Node and RBAC, meaning both the node - // authorizer and RBAC are enabled. - AuthorizationModes []string `json:"authorizationModes,omitempty"` // NoTaintMaster will, if set, suppress the tainting of the // master node allowing workloads to be run on it (e.g. in // single node configurations). diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index a4ad6f04c58..a038a723591 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -37,7 +37,6 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token" apivalidation "k8s.io/kubernetes/pkg/apis/core/validation" - authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletvalidation "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation" @@ -49,16 +48,9 @@ import ( "k8s.io/kubernetes/pkg/util/node" ) -// Describes the authorization modes that are enforced by kubeadm -var requiredAuthzModes = []string{ - authzmodes.ModeRBAC, - authzmodes.ModeNode, -} - // ValidateMasterConfiguration validates master configuration and collects all encountered errors func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateAuthorizationModes(c.AuthorizationModes, field.NewPath("authorizationModes"))...) allErrs = append(allErrs, ValidateNetworking(&c.Networking, field.NewPath("networking"))...) allErrs = append(allErrs, ValidateCertSANs(c.APIServerCertSANs, field.NewPath("apiServerCertSANs"))...) allErrs = append(allErrs, ValidateCertSANs(c.Etcd.ServerCertSANs, field.NewPath("etcd").Child("serverCertSANs"))...) @@ -102,29 +94,6 @@ func ValidateNodeConfiguration(c *kubeadm.NodeConfiguration) field.ErrorList { return allErrs } -// ValidateAuthorizationModes validates authorization modes and collects all encountered errors -func ValidateAuthorizationModes(authzModes []string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - found := map[string]bool{} - for _, authzMode := range authzModes { - if !authzmodes.IsValidAuthorizationMode(authzMode) { - allErrs = append(allErrs, field.Invalid(fldPath, authzMode, "invalid authorization mode")) - } - - if found[authzMode] { - allErrs = append(allErrs, field.Invalid(fldPath, authzMode, "duplicate authorization mode")) - continue - } - found[authzMode] = true - } - for _, requiredMode := range requiredAuthzModes { - if !found[requiredMode] { - allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("authorization mode %s must be enabled", requiredMode))) - } - } - return allErrs -} - // ValidateDiscovery validates discovery related configuration and collects all encountered errors func ValidateDiscovery(c *kubeadm.NodeConfiguration) field.ErrorList { allErrs := field.ErrorList{} diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index a5427546775..8c51a354000 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -104,34 +104,6 @@ func TestValidateTokenGroups(t *testing.T) { } } -func TestValidateAuthorizationModes(t *testing.T) { - var tests = []struct { - s []string - f *field.Path - expected bool - }{ - {[]string{""}, nil, false}, - {[]string{"rBAC"}, nil, false}, // mode not supported - {[]string{"rBAC", "Webhook"}, nil, false}, // mode not supported - {[]string{"RBAC", "Webhook"}, nil, false}, // mode Node required - {[]string{"Node", "RBAC", "Webhook", "Webhook"}, nil, false}, // no duplicates allowed - {[]string{"not valid"}, nil, false}, // invalid mode - {[]string{"Node", "RBAC"}, nil, true}, // supported - {[]string{"RBAC", "Node"}, nil, true}, // supported - {[]string{"Node", "RBAC", "Webhook", "ABAC"}, nil, true}, // supported - } - for _, rt := range tests { - actual := ValidateAuthorizationModes(rt.s, rt.f) - if (len(actual) == 0) != rt.expected { - t.Errorf( - "failed ValidateAuthorizationModes:\n\texpected: %t\n\t actual: %t", - rt.expected, - (len(actual) == 0), - ) - } - } -} - func TestValidateNodeName(t *testing.T) { var tests = []struct { s string @@ -431,7 +403,6 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", BindPort: 6443, }, - AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", @@ -445,7 +416,6 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", BindPort: 6443, }, - AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "2001:db8::1/98", DNSDomain: "cluster.local", @@ -459,7 +429,6 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", BindPort: 6443, }, - AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", @@ -473,7 +442,6 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", BindPort: 6443, }, - AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", @@ -515,7 +483,6 @@ func TestValidateMasterConfiguration(t *testing.T) { }, }, }, - AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", @@ -557,7 +524,6 @@ func TestValidateMasterConfiguration(t *testing.T) { }, }, }, - AuthorizationModes: []string{"Node", "RBAC"}, Networking: kubeadm.Networking{ ServiceSubnet: "2001:db8::1/98", DNSDomain: "cluster.local", diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index a901b25a208..94655063933 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -252,7 +252,6 @@ func NewInit(cfgPath string, externalcfg *kubeadmapiv1alpha2.MasterConfiguration } glog.Infof("[init] using Kubernetes version: %s\n", cfg.KubernetesVersion) - glog.Infof("[init] using Authorization modes: %v\n", cfg.AuthorizationModes) glog.Infoln("[preflight] running pre-flight checks") diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 5137e198a32..ea53fade786 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -275,11 +275,6 @@ var ( Effect: v1.TaintEffectNoSchedule, } - // AuthorizationPolicyPath defines the supported location of authorization policy file - AuthorizationPolicyPath = filepath.Join(KubernetesDir, "abac_policy.json") - // AuthorizationWebhookConfigPath defines the supported location of webhook config file - AuthorizationWebhookConfigPath = filepath.Join(KubernetesDir, "webhook_authz.conf") - // DefaultTokenUsages specifies the default functions a token will get DefaultTokenUsages = bootstrapapi.KnownTokenUsages diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 127d3bb26d7..060f5e094be 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -46,14 +46,13 @@ const ( waitForPodsWithLabel = "wait-for-pods-with-label" testConfiguration = ` +apiVersion: kubeadm.k8s.io/v1alpha2 +kind: MasterConfiguration api: advertiseAddress: 1.2.3.4 bindPort: 6443 apiServerCertSANs: null apiServerExtraArgs: null -authorizationModes: -- Node -- RBAC certificatesDir: %s controllerManagerExtraArgs: null etcd: @@ -508,6 +507,7 @@ func getAPIServerHash(dir string) (string, error) { return fmt.Sprintf("%x", sha256.Sum256(fileBytes)), nil } +// TODO: Make this test function use the rest of the "official" API machinery helper funcs we have inside of kubeadm func getConfig(version, certsDir, etcdDataDir string) (*kubeadmapi.MasterConfiguration, error) { externalcfg := &kubeadmapiv1alpha2.MasterConfiguration{} internalcfg := &kubeadmapi.MasterConfiguration{} diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index a7e7312c363..a7f9df241dd 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -47,7 +47,6 @@ import ( kubeadmdefaults "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/pkg/apis/core/validation" - authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/initsystem" "k8s.io/kubernetes/pkg/util/procfs" @@ -889,16 +888,6 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi ) } - // Check the config for authorization mode - for _, authzMode := range cfg.AuthorizationModes { - switch authzMode { - case authzmodes.ModeABAC: - checks = append(checks, FileExistingCheck{Path: kubeadmconstants.AuthorizationPolicyPath}) - case authzmodes.ModeWebhook: - checks = append(checks, FileExistingCheck{Path: kubeadmconstants.AuthorizationWebhookConfigPath}) - } - } - if ip := net.ParseIP(cfg.API.AdvertiseAddress); ip != nil { if ip.To4() == nil && ip.To16() != nil { checks = append(checks, diff --git a/cmd/kubeadm/app/util/config/masterconfig_test.go b/cmd/kubeadm/app/util/config/masterconfig_test.go index ee46de4cd14..997b4bd9c07 100644 --- a/cmd/kubeadm/app/util/config/masterconfig_test.go +++ b/cmd/kubeadm/app/util/config/masterconfig_test.go @@ -39,7 +39,7 @@ const ( master_v1alpha2YAML = "testdata/conversion/master/v1alpha2.yaml" master_internalYAML = "testdata/conversion/master/internal.yaml" master_incompleteYAML = "testdata/defaulting/master/incomplete.yaml" - master_defaultedYAML = "testdata/defaulting/master/defaulted.yaml" + master_defaultedYAML = "testdata/defaulting/master/defaulted.yaml" master_invalidYAML = "testdata/validation/invalid_mastercfg.yaml" master_beforeUpgradeYAML = "testdata/v1alpha1_upgrade/before.yaml" master_afterUpgradeYAML = "testdata/v1alpha1_upgrade/after.yaml" diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index 04f70585496..04da36c1d2f 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -3,15 +3,13 @@ API: BindPort: 6443 ControlPlaneEndpoint: "" APIServerCertSANs: null -APIServerExtraArgs: null +APIServerExtraArgs: + authorization-mode: Node,RBAC,Webhook APIServerExtraVolumes: null AuditPolicyConfiguration: LogDir: /var/log/kubernetes/audit LogMaxAge: 2 Path: "" -AuthorizationModes: -- Node -- RBAC CIImageRepository: "" CRISocket: /var/run/dockershim.sock CertificatesDir: /etc/kubernetes/pki diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1.yaml index 4edd30abbdb..75f36c4279f 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1.yaml @@ -10,6 +10,7 @@ auditPolicy: authorizationModes: - Node - RBAC +- Webhook certificatesDir: /etc/kubernetes/pki cloudProvider: "" clusterName: kubernetes diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1_without_TypeMeta.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1_without_TypeMeta.yaml index 904c942bc41..e8065236cae 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1_without_TypeMeta.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha1_without_TypeMeta.yaml @@ -10,6 +10,7 @@ auditPolicy: authorizationModes: - Node - RBAC +- Webhook certificatesDir: /etc/kubernetes/pki cloudProvider: "" clusterName: kubernetes diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml index 540c5a5392b..de6b2724910 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml @@ -2,14 +2,13 @@ api: advertiseAddress: 192.168.2.2 bindPort: 6443 controlPlaneEndpoint: "" +apiServerExtraArgs: + authorization-mode: Node,RBAC,Webhook apiVersion: kubeadm.k8s.io/v1alpha2 auditPolicy: logDir: /var/log/kubernetes/audit logMaxAge: 2 path: "" -authorizationModes: -- Node -- RBAC certificatesDir: /etc/kubernetes/pki clusterName: kubernetes criSocket: /var/run/dockershim.sock From 687fe22a6b3b059fc9ffa505e29e8e18ce3b6b71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 21 May 2018 08:49:58 +0300 Subject: [PATCH 053/307] autogenerated --- .../app/apis/kubeadm/v1alpha1/zz_generated.conversion.go | 3 +-- .../app/apis/kubeadm/v1alpha2/zz_generated.conversion.go | 2 -- .../app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go | 5 ----- cmd/kubeadm/app/apis/kubeadm/validation/BUILD | 1 - cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go | 5 ----- cmd/kubeadm/app/preflight/BUILD | 1 - 6 files changed, 1 insertion(+), 16 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 31a48329085..0d2ff1123e6 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -230,7 +230,7 @@ func autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in out.KubernetesVersion = in.KubernetesVersion // WARNING: in.CloudProvider requires manual conversion: does not exist in peer-type out.NodeName = in.NodeName - out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes)) + // WARNING: in.AuthorizationModes requires manual conversion: does not exist in peer-type out.NoTaintMaster = in.NoTaintMaster // WARNING: in.PrivilegedPods requires manual conversion: does not exist in peer-type out.Token = in.Token @@ -275,7 +275,6 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in } out.KubernetesVersion = in.KubernetesVersion out.NodeName = in.NodeName - out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes)) out.NoTaintMaster = in.NoTaintMaster out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go index 3a1f2c9a2cb..0f3cb7c24d3 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go @@ -233,7 +233,6 @@ func autoConvert_v1alpha2_MasterConfiguration_To_kubeadm_MasterConfiguration(in } out.KubernetesVersion = in.KubernetesVersion out.NodeName = in.NodeName - out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes)) out.NoTaintMaster = in.NoTaintMaster out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) @@ -282,7 +281,6 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha2_MasterConfiguration(in } out.KubernetesVersion = in.KubernetesVersion out.NodeName = in.NodeName - out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes)) out.NoTaintMaster = in.NoTaintMaster out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go index 40a9517ae99..108bfbd0dc3 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go @@ -181,11 +181,6 @@ func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { in.Etcd.DeepCopyInto(&out.Etcd) in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) out.Networking = in.Networking - if in.AuthorizationModes != nil { - in, out := &in.AuthorizationModes, &out.AuthorizationModes - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.TokenTTL != nil { in, out := &in.TokenTTL, &out.TokenTTL if *in == nil { diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD index 5611e972b00..12cb119844f 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/validation/BUILD @@ -12,7 +12,6 @@ go_library( "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/token:go_default_library", "//pkg/apis/core/validation:go_default_library", - "//pkg/kubeapiserver/authorizer/modes:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/validation:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go index 6e63771e975..0e080e0323d 100644 --- a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -181,11 +181,6 @@ func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { in.Etcd.DeepCopyInto(&out.Etcd) in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) out.Networking = in.Networking - if in.AuthorizationModes != nil { - in, out := &in.AuthorizationModes, &out.AuthorizationModes - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.TokenTTL != nil { in, out := &in.TokenTTL, &out.TokenTTL if *in == nil { diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index 35717029fd7..d4ddb62f37d 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -53,7 +53,6 @@ go_library( "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//pkg/apis/core/validation:go_default_library", - "//pkg/kubeapiserver/authorizer/modes:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/initsystem:go_default_library", "//pkg/util/procfs:go_default_library", From ddca1be88c2190ab3f9961e9ab2ede7dfad29e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 21 May 2018 09:20:11 +0300 Subject: [PATCH 054/307] kubeadm: Remove .ImagePullPolicy --- cmd/kubeadm/app/apis/kubeadm/types.go | 3 --- cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go | 1 + .../kubeadm/v1alpha1/zz_generated.conversion.go | 3 +-- cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go | 2 -- .../kubeadm/v1alpha2/zz_generated.conversion.go | 2 -- cmd/kubeadm/app/cmd/init.go | 13 +++++-------- cmd/kubeadm/app/phases/controlplane/manifests.go | 6 +++--- cmd/kubeadm/app/phases/etcd/local.go | 2 +- .../config/testdata/conversion/master/internal.yaml | 1 - 9 files changed, 11 insertions(+), 22 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index 8ffdfb2c497..9fe9b0ba811 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -97,9 +97,6 @@ type MasterConfiguration struct { // CertificatesDir specifies where to store or look for all required certificates. CertificatesDir string - // ImagePullPolicy for control plane images. Can be Always, IfNotPresent or Never. - ImagePullPolicy v1.PullPolicy - // ImageRepository is the container registry to pull control plane images from. ImageRepository string diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go index 814ad8b0ed7..9d4033acd84 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go @@ -42,6 +42,7 @@ func Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in *Mas UpgradeCloudProvider(in, out) // We don't support migrating information from the .PrivilegedPods field which was removed in v1alpha2 + // We don't support migrating information from the .ImagePullPolicy field which was removed in v1alpha2 return nil } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 31a48329085..5b150e68042 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -247,7 +247,7 @@ func autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in out.APIServerCertSANs = *(*[]string)(unsafe.Pointer(&in.APIServerCertSANs)) out.CertificatesDir = in.CertificatesDir out.ImageRepository = in.ImageRepository - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) + // WARNING: in.ImagePullPolicy requires manual conversion: does not exist in peer-type out.UnifiedControlPlaneImage = in.UnifiedControlPlaneImage if err := Convert_v1alpha1_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(&in.AuditPolicyConfiguration, &out.AuditPolicyConfiguration, s); err != nil { return err @@ -290,7 +290,6 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in out.SchedulerExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.SchedulerExtraVolumes)) out.APIServerCertSANs = *(*[]string)(unsafe.Pointer(&in.APIServerCertSANs)) out.CertificatesDir = in.CertificatesDir - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) out.ImageRepository = in.ImageRepository // INFO: in.CIImageRepository opted out of conversion generation out.UnifiedControlPlaneImage = in.UnifiedControlPlaneImage diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go index 1a34dc7d8ae..cef930625c4 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go @@ -98,8 +98,6 @@ type MasterConfiguration struct { // ImageRepository what container registry to pull control plane images from ImageRepository string `json:"imageRepository"` - // ImagePullPolicy that control plane images. Can be Always, IfNotPresent or Never. - ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` // UnifiedControlPlaneImage specifies if a specific container image should // be used for all control plane components. UnifiedControlPlaneImage string `json:"unifiedControlPlaneImage"` diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go index 3a1f2c9a2cb..9953540f873 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go @@ -249,7 +249,6 @@ func autoConvert_v1alpha2_MasterConfiguration_To_kubeadm_MasterConfiguration(in out.APIServerCertSANs = *(*[]string)(unsafe.Pointer(&in.APIServerCertSANs)) out.CertificatesDir = in.CertificatesDir out.ImageRepository = in.ImageRepository - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) out.UnifiedControlPlaneImage = in.UnifiedControlPlaneImage if err := Convert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(&in.AuditPolicyConfiguration, &out.AuditPolicyConfiguration, s); err != nil { return err @@ -297,7 +296,6 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha2_MasterConfiguration(in out.SchedulerExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.SchedulerExtraVolumes)) out.APIServerCertSANs = *(*[]string)(unsafe.Pointer(&in.APIServerCertSANs)) out.CertificatesDir = in.CertificatesDir - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) out.ImageRepository = in.ImageRepository // INFO: in.CIImageRepository opted out of conversion generation out.UnifiedControlPlaneImage = in.UnifiedControlPlaneImage diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index a901b25a208..3464c5fdb53 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -31,7 +31,6 @@ import ( "github.com/spf13/cobra" flag "github.com/spf13/pflag" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -92,12 +91,13 @@ var ( This error is likely caused by: - The kubelet is not running - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled) - - Either there is no internet connection, or imagePullPolicy is set to "Never", - so the kubelet cannot pull or find the following control plane images: + - No internet connection is available so the kubelet cannot pull or find the following control plane images: - {{ .APIServerImage }} - {{ .ControllerManagerImage }} - {{ .SchedulerImage }} - {{ .EtcdImage }} (only if no external etcd endpoints are configured) + - You can check or miligate this in beforehand with "kubeadm config images pull" to make sure the images + are downloaded locally and cached. If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands: - 'systemctl status kubelet' @@ -538,12 +538,9 @@ func getWaiter(i *Init, client clientset.Interface) apiclient.Waiter { return dryrunutil.NewWaiter() } + // TODO: List images locally using `crictl` and pull in preflight checks if not available + // When we do that, we can always assume the images exist at this point and have a shorter timeout. timeout := 30 * time.Minute - - // No need for a large timeout if we don't expect downloads - if i.cfg.ImagePullPolicy == v1.PullNever { - timeout = 60 * time.Second - } return apiclient.NewKubeWaiter(client, timeout, os.Stdout) } diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index 5fd0aaea091..473472f50ee 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -76,7 +76,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version. kubeadmconstants.KubeAPIServer: staticpodutil.ComponentPod(v1.Container{ Name: kubeadmconstants.KubeAPIServer, Image: images.GetCoreImage(kubeadmconstants.KubeAPIServer, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage), - ImagePullPolicy: cfg.ImagePullPolicy, + ImagePullPolicy: v1.PullIfNotPresent, Command: getAPIServerCommand(cfg), VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)), LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeAPIServer, int(cfg.API.BindPort), "/healthz", v1.URISchemeHTTPS), @@ -86,7 +86,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version. kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{ Name: kubeadmconstants.KubeControllerManager, Image: images.GetCoreImage(kubeadmconstants.KubeControllerManager, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage), - ImagePullPolicy: cfg.ImagePullPolicy, + ImagePullPolicy: v1.PullIfNotPresent, Command: getControllerManagerCommand(cfg, k8sVersion), VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)), LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeControllerManager, 10252, "/healthz", v1.URISchemeHTTP), @@ -96,7 +96,7 @@ func GetStaticPodSpecs(cfg *kubeadmapi.MasterConfiguration, k8sVersion *version. kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{ Name: kubeadmconstants.KubeScheduler, Image: images.GetCoreImage(kubeadmconstants.KubeScheduler, cfg.GetControlPlaneImageRepository(), cfg.KubernetesVersion, cfg.UnifiedControlPlaneImage), - ImagePullPolicy: cfg.ImagePullPolicy, + ImagePullPolicy: v1.PullIfNotPresent, Command: getSchedulerCommand(cfg), VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)), LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeScheduler, 10251, "/healthz", v1.URISchemeHTTP), diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index f705131c511..8af1bd8db34 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -61,7 +61,7 @@ func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod { Name: kubeadmconstants.Etcd, Command: getEtcdCommand(cfg), Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image), - ImagePullPolicy: cfg.ImagePullPolicy, + ImagePullPolicy: v1.PullIfNotPresent, // Mount the etcd datadir path read-write so etcd can store data in a more persistent manner VolumeMounts: []v1.VolumeMount{ staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.DataDir, false), diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index 04f70585496..b994ae72efa 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -29,7 +29,6 @@ Etcd: PeerCertSANs: null ServerCertSANs: null FeatureGates: null -ImagePullPolicy: "" ImageRepository: k8s.gcr.io KubeProxy: Config: From 8259dcbaa78a44bac9da6349c2a150430327e934 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 7 May 2018 09:14:40 +0000 Subject: [PATCH 055/307] add block device support for azure disk add plugin field for azure dataDisk struct add azure_dd_block_test fix comments fix comments --- pkg/volume/azure_dd/BUILD | 5 + pkg/volume/azure_dd/attacher.go | 10 +- pkg/volume/azure_dd/azure_common.go | 12 +- pkg/volume/azure_dd/azure_dd.go | 12 +- pkg/volume/azure_dd/azure_dd_block.go | 152 +++++++++++++++++++++ pkg/volume/azure_dd/azure_dd_block_test.go | 145 ++++++++++++++++++++ pkg/volume/azure_dd/azure_mounter.go | 4 +- pkg/volume/azure_dd/azure_provision.go | 2 +- 8 files changed, 323 insertions(+), 19 deletions(-) create mode 100644 pkg/volume/azure_dd/azure_dd_block.go create mode 100644 pkg/volume/azure_dd/azure_dd_block_test.go diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index 3ffe7075050..c4913bfba6e 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -12,6 +12,7 @@ go_library( "attacher.go", "azure_common.go", "azure_dd.go", + "azure_dd_block.go", "azure_mounter.go", "azure_provision.go", ] + select({ @@ -60,6 +61,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -89,6 +91,7 @@ go_test( name = "go_default_test", srcs = [ "azure_common_test.go", + "azure_dd_block_test.go", "azure_dd_test.go", ], embed = [":go_default_library"], @@ -97,6 +100,8 @@ go_test( "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], ) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 06c67a57934..4f7b50f378a 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -57,7 +57,7 @@ var getLunMutex = keymutex.NewKeyMutex() // Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { glog.Warningf("failed to get azure disk spec (%v)", err) return "", err @@ -114,7 +114,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty volumeSpecMap := make(map[string]*volume.Spec) volumeIDList := []string{} for _, spec := range specs { - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) continue @@ -151,7 +151,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { var err error - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err } @@ -203,7 +203,7 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, // this is generalized for both managed and blob disks // we also prefix the hash with m/b based on disk kind func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err } @@ -250,7 +250,7 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str } } - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return err } diff --git a/pkg/volume/azure_dd/azure_common.go b/pkg/volume/azure_dd/azure_common.go index 54d05f5dabd..eeb95e6eb0a 100644 --- a/pkg/volume/azure_dd/azure_common.go +++ b/pkg/volume/azure_dd/azure_common.go @@ -46,6 +46,7 @@ type dataDisk struct { volumeName string diskName string podUID types.UID + plugin *azureDataDiskPlugin } var ( @@ -82,7 +83,7 @@ func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (s return pdPath, nil } -func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk { +func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost, plugin *azureDataDiskPlugin) *dataDisk { var metricProvider volume.MetricsProvider if podUID != "" { metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host)) @@ -93,19 +94,20 @@ func makeDataDisk(volumeName string, podUID types.UID, diskName string, host vol volumeName: volumeName, diskName: diskName, podUID: podUID, + plugin: plugin, } } -func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { +func getVolumeSource(spec *volume.Spec) (volumeSource *v1.AzureDiskVolumeSource, readOnly bool, err error) { if spec.Volume != nil && spec.Volume.AzureDisk != nil { - return spec.Volume.AzureDisk, nil + return spec.Volume.AzureDisk, spec.Volume.AzureDisk.ReadOnly != nil && *spec.Volume.AzureDisk.ReadOnly, nil } if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { - return spec.PersistentVolume.Spec.AzureDisk, nil + return spec.PersistentVolume.Spec.AzureDisk, spec.ReadOnly, nil } - return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type") + return nil, false, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type") } func normalizeKind(kind string) (v1.AzureDataDiskKind, error) { diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 21249594852..ec7ab8debf5 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -81,7 +81,7 @@ func (plugin *azureDataDiskPlugin) GetPluginName() string { } func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) { - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err } @@ -140,12 +140,12 @@ func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { } func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return nil, err } - disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host) + disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host, plugin) return &azureDiskDeleter{ spec: spec, @@ -166,11 +166,11 @@ func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) } func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) { - volumeSource, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return nil, err } - disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host) + disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host, plugin) return &azureDiskMounter{ plugin: plugin, @@ -181,7 +181,7 @@ func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, op } func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - disk := makeDataDisk(volName, podUID, "", plugin.host) + disk := makeDataDisk(volName, podUID, "", plugin.host, plugin) return &azureDiskUnmounter{ plugin: plugin, diff --git a/pkg/volume/azure_dd/azure_dd_block.go b/pkg/volume/azure_dd/azure_dd_block.go new file mode 100644 index 00000000000..6a02e786235 --- /dev/null +++ b/pkg/volume/azure_dd/azure_dd_block.go @@ -0,0 +1,152 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "path/filepath" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" + kstrings "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" +) + +var _ volume.VolumePlugin = &azureDataDiskPlugin{} +var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{} +var _ volume.BlockVolumePlugin = &azureDataDiskPlugin{} +var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} + +func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { + pluginDir := plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName) + blkutil := volumepathhandler.NewBlockVolumePathHandler() + globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) + if err != nil { + return nil, err + } + glog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID) + + globalMapPath := filepath.Dir(globalMapPathUUID) + if len(globalMapPath) <= 1 { + return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) + } + + return getVolumeSpecFromGlobalMapPath(globalMapPath) +} + +func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { + // Get volume spec information from globalMapPath + // globalMapPath example: + // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} + // plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX + diskName := filepath.Base(globalMapPath) + if len(diskName) <= 1 { + return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath) + } + glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName) + block := v1.PersistentVolumeBlock + pv := &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AzureDisk: &v1.AzureDiskVolumeSource{ + DiskName: diskName, + }, + }, + VolumeMode: &block, + }, + } + + return volume.NewSpecFromPersistentVolume(pv, true), nil +} + +// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. +func (plugin *azureDataDiskPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) { + // If this is called via GenerateUnmapDeviceFunc(), pod is nil. + // Pass empty string as dummy uid since uid isn't used in the case. + var uid types.UID + if pod != nil { + uid = pod.UID + } + + return plugin.newBlockVolumeMapperInternal(spec, uid, plugin.host.GetMounter(plugin.GetPluginName())) +} + +func (plugin *azureDataDiskPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeMapper, error) { + volumeSource, readOnly, err := getVolumeSource(spec) + if err != nil { + return nil, err + } + + disk := makeDataDisk(spec.Name(), podUID, volumeSource.DiskName, plugin.host, plugin) + + return &azureDataDiskMapper{ + dataDisk: disk, + readOnly: readOnly, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) { + return plugin.newUnmapperInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName())) +} + +func (plugin *azureDataDiskPlugin) newUnmapperInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) { + disk := makeDataDisk(volName, podUID, "", plugin.host, plugin) + return &azureDataDiskUnmapper{dataDisk: disk}, nil +} + +func (c *azureDataDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { + return nil +} + +type azureDataDiskUnmapper struct { + *dataDisk +} + +var _ volume.BlockVolumeUnmapper = &azureDataDiskUnmapper{} + +type azureDataDiskMapper struct { + *dataDisk + readOnly bool +} + +var _ volume.BlockVolumeMapper = &azureDataDiskMapper{} + +func (b *azureDataDiskMapper) SetUpDevice() (string, error) { + return "", nil +} + +// GetGlobalMapPath returns global map path and error +// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID +// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX +func (disk *dataDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + return filepath.Join(disk.plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName), string(volumeSource.DiskName)), nil +} + +// GetPodDeviceMapPath returns pod device map path and volume name +// path: pods/{podUid}/volumeDevices/kubernetes.io~azure +func (disk *dataDisk) GetPodDeviceMapPath() (string, string) { + name := azureDataDiskPluginName + return disk.plugin.host.GetPodVolumeDeviceDir(disk.podUID, kstrings.EscapeQualifiedNameForDisk(name)), disk.volumeName +} diff --git a/pkg/volume/azure_dd/azure_dd_block_test.go b/pkg/volume/azure_dd/azure_dd_block_test.go new file mode 100644 index 00000000000..127870f707d --- /dev/null +++ b/pkg/volume/azure_dd/azure_dd_block_test.go @@ -0,0 +1,145 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "os" + "path/filepath" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utiltesting "k8s.io/client-go/util/testing" + "k8s.io/kubernetes/pkg/volume" + volumetest "k8s.io/kubernetes/pkg/volume/testing" +) + +const ( + testDiskName = "disk1" + testPVName = "pv1" + testGlobalPath = "plugins/kubernetes.io/azure-disk/volumeDevices/disk1" + testPodPath = "pods/poduid/volumeDevices/kubernetes.io~azure-disk" +) + +func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { + // make our test path for fake GlobalMapPath + // /tmp symbolized our pluginDir + // /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/azure-disk/volumeDevices/disk1 + tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest") + if err != nil { + t.Fatalf("can't make a temp dir: %v", err) + } + //deferred clean up + defer os.RemoveAll(tmpVDir) + + expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) + + //Bad Path + badspec, err := getVolumeSpecFromGlobalMapPath("") + if badspec != nil || err == nil { + t.Errorf("Expected not to get spec from GlobalMapPath but did") + } + + // Good Path + spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath) + if spec == nil || err != nil { + t.Fatalf("Failed to get spec from GlobalMapPath: %v", err) + } + if spec.PersistentVolume.Spec.AzureDisk.DiskName != testDiskName { + t.Errorf("Invalid pdName from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AzureDisk.DiskName) + } + block := v1.PersistentVolumeBlock + specMode := spec.PersistentVolume.Spec.VolumeMode + if &specMode == nil { + t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block) + } + if *specMode != block { + t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block) + } +} + +func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec { + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPVName, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AzureDisk: &v1.AzureDiskVolumeSource{ + DiskName: testDiskName, + }, + }, + }, + } + + if isBlock { + blockMode := v1.PersistentVolumeBlock + pv.Spec.VolumeMode = &blockMode + } + return volume.NewSpecFromPersistentVolume(pv, readOnly) +} + +func TestGetPodAndPluginMapPaths(t *testing.T) { + tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest") + if err != nil { + t.Fatalf("can't make a temp dir: %v", err) + } + //deferred clean up + defer os.RemoveAll(tmpVDir) + + expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) + expectedPodPath := filepath.Join(tmpVDir, testPodPath) + + spec := getTestVolume(false, tmpVDir, true /*isBlock*/) + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil)) + plug, err := plugMgr.FindMapperPluginByName(azureDataDiskPluginName) + if err != nil { + os.RemoveAll(tmpVDir) + t.Fatalf("Can't find the plugin by name: %q", azureDataDiskPluginName) + } + if plug.GetPluginName() != azureDataDiskPluginName { + t.Fatalf("Wrong name: %s", plug.GetPluginName()) + } + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} + mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{}) + if err != nil { + t.Fatalf("Failed to make a new Mounter: %v", err) + } + if mapper == nil { + t.Fatalf("Got a nil Mounter") + } + + //GetGlobalMapPath + gMapPath, err := mapper.GetGlobalMapPath(spec) + if err != nil || len(gMapPath) == 0 { + t.Fatalf("Invalid GlobalMapPath from spec: %s, error: %v", spec.PersistentVolume.Spec.AzureDisk.DiskName, err) + } + if gMapPath != expectedGlobalPath { + t.Errorf("Failed to get GlobalMapPath: %s, expected %s", gMapPath, expectedGlobalPath) + } + + //GetPodDeviceMapPath + gDevicePath, gVolName := mapper.GetPodDeviceMapPath() + if gDevicePath != expectedPodPath { + t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath) + } + if gVolName != testPVName { + t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName) + } +} diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go index 514a6dcb108..d8b7ae50df6 100644 --- a/pkg/volume/azure_dd/azure_mounter.go +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -44,7 +44,7 @@ var _ volume.Mounter = &azureDiskMounter{} func (m *azureDiskMounter) GetAttributes() volume.Attributes { readOnly := false - volumeSource, err := getVolumeSource(m.spec) + volumeSource, _, err := getVolumeSource(m.spec) if err != nil { glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err) } else if volumeSource.ReadOnly != nil { @@ -71,7 +71,7 @@ func (m *azureDiskMounter) GetPath() string { func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName()) - volumeSource, err := getVolumeSource(m.spec) + volumeSource, _, err := getVolumeSource(m.spec) if err != nil { glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) diff --git a/pkg/volume/azure_dd/azure_provision.go b/pkg/volume/azure_dd/azure_provision.go index 6888bfc4ee9..bf7eae37db4 100644 --- a/pkg/volume/azure_dd/azure_provision.go +++ b/pkg/volume/azure_dd/azure_provision.go @@ -46,7 +46,7 @@ func (d *azureDiskDeleter) GetPath() string { } func (d *azureDiskDeleter) Delete() error { - volumeSource, err := getVolumeSource(d.spec) + volumeSource, _, err := getVolumeSource(d.spec) if err != nil { return err } From 0b7b2de8c80f89c0cb147da6df4adaafbd69c0b7 Mon Sep 17 00:00:00 2001 From: Yonatan Kiron Date: Wed, 25 Apr 2018 17:43:06 +0300 Subject: [PATCH 056/307] Add optional flag of node port range --- hack/local-up-cluster.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index a3d5312d2f0..390d96a94cd 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -217,6 +217,7 @@ API_SECURE_PORT=${API_SECURE_PORT:-6443} API_HOST=${API_HOST:-localhost} API_HOST_IP=${API_HOST_IP:-"127.0.0.1"} ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""} +NODE_PORT_RANGE=${NODE_PORT_RANGE:-""} API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"} EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost} @@ -524,6 +525,10 @@ function start_apiserver { if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then advertise_address="--advertise-address=${ADVERTISE_ADDRESS}" fi + node_port_range="" + if [[ "${NODE_PORT_RANGE}" != "" ]] ; then + node_port_range="--service-node-port-range=${NODE_PORT_RANGE}" + fi # Create CA signers if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then @@ -565,6 +570,7 @@ function start_apiserver { ${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${swagger_arg} ${audit_arg} ${authorizer_arg} ${priv_arg} ${runtime_config} \ ${cloud_config_arg} \ ${advertise_address} \ + ${node_port_range} \ --v=${LOG_LEVEL} \ --vmodule="${LOG_SPEC}" \ --cert-dir="${CERT_DIR}" \ From 4be6328126eb3473e5a3def7f87510e928a02171 Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Mon, 21 May 2018 17:29:12 +0800 Subject: [PATCH 057/307] remove unused gc code --- hack/.golint_failures | 1 - .../garbagecollector/metaonly/BUILD | 20 --- .../garbagecollector/metaonly/metaonly.go | 66 ------- .../metaonly/metaonly_test.go | 164 ------------------ 4 files changed, 251 deletions(-) delete mode 100644 pkg/controller/garbagecollector/metaonly/metaonly.go delete mode 100644 pkg/controller/garbagecollector/metaonly/metaonly_test.go diff --git a/hack/.golint_failures b/hack/.golint_failures index dfb01b7005c..de58733f6fd 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -103,7 +103,6 @@ pkg/controller/deployment/util pkg/controller/disruption pkg/controller/endpoint pkg/controller/garbagecollector -pkg/controller/garbagecollector/metaonly pkg/controller/job pkg/controller/namespace pkg/controller/namespace/deletion diff --git a/pkg/controller/garbagecollector/metaonly/BUILD b/pkg/controller/garbagecollector/metaonly/BUILD index d20f61e88b1..671a5e2b8e6 100644 --- a/pkg/controller/garbagecollector/metaonly/BUILD +++ b/pkg/controller/garbagecollector/metaonly/BUILD @@ -3,38 +3,18 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", - "go_test", ) go_library( name = "go_default_library", srcs = [ - "metaonly.go", "types.go", "zz_generated.deepcopy.go", ], importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", deps = [ - "//pkg/api/legacyscheme:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["metaonly_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core/install:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", ], ) diff --git a/pkg/controller/garbagecollector/metaonly/metaonly.go b/pkg/controller/garbagecollector/metaonly/metaonly.go deleted file mode 100644 index aec98ba3356..00000000000 --- a/pkg/controller/garbagecollector/metaonly/metaonly.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metaonly - -import ( - "fmt" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/kubernetes/pkg/api/legacyscheme" -) - -type metaOnlyJSONScheme struct{} - -// This function can be extended to mapping different gvk to different MetadataOnlyObject, -// which embedded with different version of ObjectMeta. Currently the system -// only supports metav1.ObjectMeta. -func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object { - if strings.HasSuffix(gvk.Kind, "List") { - return &MetadataOnlyObjectList{} - } else { - return &MetadataOnlyObject{} - } -} - -func NewMetadataCodecFactory() serializer.CodecFactory { - // populating another scheme from legacyscheme.Scheme, registering every kind with - // MetadataOnlyObject (or MetadataOnlyObjectList). - scheme := runtime.NewScheme() - allTypes := legacyscheme.Scheme.AllKnownTypes() - for kind := range allTypes { - if kind.Version == runtime.APIVersionInternal { - continue - } - if kind == metav1.Unversioned.WithKind("Status") { - // this is added below as unversioned - continue - } - metaOnlyObject := gvkToMetadataOnlyObject(kind) - scheme.AddKnownTypeWithName(kind, metaOnlyObject) - } - scheme.AddUnversionedTypes(metav1.Unversioned, &metav1.Status{}) - return serializer.NewCodecFactory(scheme) -} - -// String converts a MetadataOnlyObject to a human-readable string. -func (metaOnly MetadataOnlyObject) String() string { - return fmt.Sprintf("%s/%s, name: %s, DeletionTimestamp:%v", metaOnly.TypeMeta.APIVersion, metaOnly.TypeMeta.Kind, metaOnly.ObjectMeta.Name, metaOnly.ObjectMeta.DeletionTimestamp) -} diff --git a/pkg/controller/garbagecollector/metaonly/metaonly_test.go b/pkg/controller/garbagecollector/metaonly/metaonly_test.go deleted file mode 100644 index 2bdf9c879d5..00000000000 --- a/pkg/controller/garbagecollector/metaonly/metaonly_test.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metaonly - -import ( - "encoding/json" - "reflect" - "testing" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - _ "k8s.io/kubernetes/pkg/apis/core/install" -) - -func getPod() *v1.Pod { - return &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - OwnerReferences: []metav1.OwnerReference{ - {UID: "1234"}, - }, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "fake-name", - Image: "fakeimage", - }, - }, - }, - } -} - -func getPodJson(t *testing.T) []byte { - data, err := json.Marshal(getPod()) - if err != nil { - t.Fatal(err) - } - return data -} - -func getPodListJson(t *testing.T) []byte { - data, err := json.Marshal(&v1.PodList{ - TypeMeta: metav1.TypeMeta{ - Kind: "PodList", - APIVersion: "v1", - }, - Items: []v1.Pod{ - *getPod(), - *getPod(), - }, - }) - if err != nil { - t.Fatal(err) - } - return data -} - -func verfiyMetadata(description string, t *testing.T, in *MetadataOnlyObject) { - pod := getPod() - if e, a := pod.ObjectMeta, in.ObjectMeta; !reflect.DeepEqual(e, a) { - t.Errorf("%s: expected %#v, got %#v", description, e, a) - } -} - -func TestDecodeToMetadataOnlyObject(t *testing.T) { - data := getPodJson(t) - cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()} - info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON) - if !ok { - t.Fatalf("expected to get a JSON serializer") - } - codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"}) - // decode with into - into := &MetadataOnlyObject{} - ret, _, err := codec.Decode(data, nil, into) - if err != nil { - t.Fatal(err) - } - metaOnly, ok := ret.(*MetadataOnlyObject) - if !ok { - t.Fatalf("expected ret to be *runtime.MetadataOnlyObject") - } - verfiyMetadata("check returned metaonly with into", t, metaOnly) - verfiyMetadata("check into", t, into) - // decode without into - ret, _, err = codec.Decode(data, nil, nil) - if err != nil { - t.Fatal(err) - } - metaOnly, ok = ret.(*MetadataOnlyObject) - if !ok { - t.Fatalf("expected ret to be *runtime.MetadataOnlyObject") - } - verfiyMetadata("check returned metaonly without into", t, metaOnly) -} - -func verifyListMetadata(t *testing.T, metaOnlyList *MetadataOnlyObjectList) { - items, err := meta.ExtractList(metaOnlyList) - if err != nil { - t.Fatal(err) - } - for _, item := range items { - metaOnly, ok := item.(*MetadataOnlyObject) - if !ok { - t.Fatalf("expected item to be *MetadataOnlyObject") - } - verfiyMetadata("check list", t, metaOnly) - } -} - -func TestDecodeToMetadataOnlyObjectList(t *testing.T) { - data := getPodListJson(t) - cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()} - info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON) - if !ok { - t.Fatalf("expected to get a JSON serializer") - } - codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"}) - // decode with into - into := &MetadataOnlyObjectList{} - ret, _, err := codec.Decode(data, nil, into) - if err != nil { - t.Fatal(err) - } - metaOnlyList, ok := ret.(*MetadataOnlyObjectList) - if !ok { - t.Fatalf("expected ret to be *runtime.UnstructuredList") - } - verifyListMetadata(t, metaOnlyList) - verifyListMetadata(t, into) - // decode without into - ret, _, err = codec.Decode(data, nil, nil) - if err != nil { - t.Fatal(err) - } - metaOnlyList, ok = ret.(*MetadataOnlyObjectList) - if !ok { - t.Fatalf("expected ret to be *runtime.UnstructuredList") - } - verifyListMetadata(t, metaOnlyList) -} From 31bf75c116d774f69598dda0cf2f0e2c55d1283f Mon Sep 17 00:00:00 2001 From: Jacob Gillespie Date: Mon, 21 May 2018 09:16:36 -0500 Subject: [PATCH 058/307] Fix running e2e tests with completed kube-system pods --- test/e2e/framework/util.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index a3d99d92c20..73dc9de60ff 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -704,10 +704,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN case res && err == nil: nOk++ case pod.Status.Phase == v1.PodSucceeded: - Logf("The status of Pod %s is Succeeded which is unexpected", pod.ObjectMeta.Name) - badPods = append(badPods, pod) - // it doesn't make sense to wait for this pod - return false, errors.New("unexpected Succeeded pod state") + // pod status is succeeded, it doesn't make sense to wait for this pod + continue case pod.Status.Phase != v1.PodFailed: Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) notReady++ From b5648c3f61ef168e6a1c9c7b682745e28145dc1e Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Mon, 16 Apr 2018 15:15:03 -0700 Subject: [PATCH 059/307] dynamic Kubelet config reconciles ConfigMap updates --- cmd/kubelet/app/server.go | 4 +- pkg/apis/core/validation/validation.go | 14 +- pkg/kubelet/kubeletconfig/checkpoint/BUILD | 5 + .../kubeletconfig/checkpoint/configmap.go | 12 +- .../checkpoint/configmap_test.go | 53 +- .../kubeletconfig/checkpoint/download.go | 149 ++- .../kubeletconfig/checkpoint/download_test.go | 104 ++- .../kubeletconfig/checkpoint/store/fsstore.go | 46 +- .../checkpoint/store/fsstore_test.go | 375 ++++---- .../kubeletconfig/checkpoint/store/store.go | 1 + pkg/kubelet/kubeletconfig/configsync.go | 181 ++-- pkg/kubelet/kubeletconfig/controller.go | 111 ++- pkg/kubelet/kubeletconfig/status/status.go | 9 +- pkg/kubelet/kubeletconfig/watch.go | 59 +- test/e2e_node/dynamic_kubelet_config_test.go | 867 ++++++++++++------ test/integration/auth/node_test.go | 7 +- 16 files changed, 1303 insertions(+), 694 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 7adfb77b7d2..f386400a608 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -619,7 +619,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { // If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 && kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { - kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)) + if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil { + return err + } } if kubeDeps.Auth == nil { diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 6f0e3eaea7b..6a01b7f6254 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -4150,11 +4150,10 @@ func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field. // validation specific to Node.Spec.ConfigSource.ConfigMap func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - // TODO(#61643): Prevent ref.UID from being set here when we switch from requiring UID to respecting all ConfigMap updates - if string(source.UID) == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("uid"), "uid must be set in spec")) + // uid and resourceVersion must not be set in spec + if string(source.UID) != "" { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("uid"), "uid must not be set in spec")) } - // resourceVersion must not be set in spec if source.ResourceVersion != "" { allErrs = append(allErrs, field.Forbidden(fldPath.Child("resourceVersion"), "resourceVersion must not be set in spec")) } @@ -4196,12 +4195,13 @@ func validateNodeConfigSourceStatus(source *core.NodeConfigSource, fldPath *fiel // validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood).ConfigMap func validateConfigMapNodeConfigSourceStatus(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - + // uid and resourceVersion must be set in status if string(source.UID) == "" { allErrs = append(allErrs, field.Required(fldPath.Child("uid"), "uid must be set in status")) } - // TODO(#63221): require ResourceVersion in status when we start respecting ConfigMap mutations (the Kubelet isn't tracking it internally until - // that PR, which makes it difficult to report for now). + if source.ResourceVersion == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("resourceVersion"), "resourceVersion must be set in status")) + } return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...) } diff --git a/pkg/kubelet/kubeletconfig/checkpoint/BUILD b/pkg/kubelet/kubeletconfig/checkpoint/BUILD index cb21adad996..a0b74462938 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/BUILD +++ b/pkg/kubelet/kubeletconfig/checkpoint/BUILD @@ -19,7 +19,9 @@ go_test( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) @@ -40,8 +42,11 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/pkg/kubelet/kubeletconfig/checkpoint/configmap.go b/pkg/kubelet/kubeletconfig/checkpoint/configmap.go index 2cdc348f37d..b79e67e75b3 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/configmap.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/configmap.go @@ -34,9 +34,11 @@ var _ Payload = (*configMapPayload)(nil) // NewConfigMapPayload constructs a Payload backed by a ConfigMap, which must have a non-empty UID func NewConfigMapPayload(cm *apiv1.ConfigMap) (Payload, error) { if cm == nil { - return nil, fmt.Errorf("ConfigMap must be non-nil to be a Payload") - } else if len(cm.ObjectMeta.UID) == 0 { - return nil, fmt.Errorf("ConfigMap must have a UID to be a Payload") + return nil, fmt.Errorf("ConfigMap must be non-nil") + } else if cm.ObjectMeta.UID == "" { + return nil, fmt.Errorf("ConfigMap must have a non-empty UID") + } else if cm.ObjectMeta.ResourceVersion == "" { + return nil, fmt.Errorf("ConfigMap must have a non-empty ResourceVersion") } return &configMapPayload{cm}, nil @@ -46,6 +48,10 @@ func (p *configMapPayload) UID() string { return string(p.cm.UID) } +func (p *configMapPayload) ResourceVersion() string { + return p.cm.ResourceVersion +} + func (p *configMapPayload) Files() map[string]string { return p.cm.Data } diff --git a/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go b/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go index d7770f05a42..db44fab3bca 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/configmap_test.go @@ -34,19 +34,46 @@ func TestNewConfigMapPayload(t *testing.T) { cm *apiv1.ConfigMap err string }{ - {"nil v1/ConfigMap", nil, "must be non-nil"}, - {"empty v1/ConfigMap", &apiv1.ConfigMap{}, "must have a UID"}, - {"populated v1/ConfigMap", - &apiv1.ConfigMap{ + { + desc: "nil", + cm: nil, + err: "ConfigMap must be non-nil", + }, + { + desc: "missing uid", + cm: &apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + ResourceVersion: "rv", + }, + }, + err: "ConfigMap must have a non-empty UID", + }, + { + desc: "missing resourceVersion", + cm: &apiv1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "name", UID: "uid", }, + }, + err: "ConfigMap must have a non-empty ResourceVersion", + }, + { + desc: "populated v1/ConfigMap", + cm: &apiv1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + UID: "uid", + ResourceVersion: "rv", + }, Data: map[string]string{ "key1": "value1", "key2": "value2", }, - }, ""}, + }, + err: "", + }, } for _, c := range cases { @@ -66,7 +93,7 @@ func TestNewConfigMapPayload(t *testing.T) { func TestConfigMapPayloadUID(t *testing.T) { const expect = "uid" - payload, err := NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: expect}}) + payload, err := NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: expect, ResourceVersion: "rv"}}) if err != nil { t.Fatalf("error constructing payload: %v", err) } @@ -76,6 +103,18 @@ func TestConfigMapPayloadUID(t *testing.T) { } } +func TestConfigMapPayloadResourceVersion(t *testing.T) { + const expect = "rv" + payload, err := NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: "uid", ResourceVersion: expect}}) + if err != nil { + t.Fatalf("error constructing payload: %v", err) + } + resourceVersion := payload.ResourceVersion() + if expect != resourceVersion { + t.Errorf("expect %q, but got %q", expect, resourceVersion) + } +} + func TestConfigMapPayloadFiles(t *testing.T) { cases := []struct { desc string @@ -96,7 +135,7 @@ func TestConfigMapPayloadFiles(t *testing.T) { } for _, c := range cases { t.Run(c.desc, func(t *testing.T) { - payload, err := NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: "uid"}, Data: c.data}) + payload, err := NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: "uid", ResourceVersion: "rv"}, Data: c.data}) if err != nil { t.Fatalf("error constructing payload: %v", err) } diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download.go b/pkg/kubelet/kubeletconfig/checkpoint/download.go index 3b4854c0a55..eb7059806e6 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download.go @@ -18,12 +18,18 @@ package checkpoint import ( "fmt" + "math/rand" + "time" apiv1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" @@ -35,8 +41,13 @@ import ( // Payload represents a local copy of a config source (payload) object type Payload interface { // UID returns a globally unique (space and time) identifier for the payload. + // The return value is guaranteed non-empty. UID() string + // ResourceVersion returns a resource version for the payload. + // The return value is guaranteed non-empty. + ResourceVersion() string + // Files returns a map of filenames to file contents. Files() map[string]string @@ -46,16 +57,29 @@ type Payload interface { // RemoteConfigSource represents a remote config source object that can be downloaded as a Checkpoint type RemoteConfigSource interface { - // UID returns a globally unique identifier of the source described by the remote config source object - UID() string // KubeletFilename returns the name of the Kubelet config file as it should appear in the keys of Payload.Files() KubeletFilename() string + // APIPath returns the API path to the remote resource, e.g. its SelfLink APIPath() string - // Download downloads the remote config source object returns a Payload backed by the object, - // or a sanitized failure reason and error if the download fails - Download(client clientset.Interface) (Payload, string, error) - // Encode returns a []byte representation of the NodeConfigSource behind the RemoteConfigSource + + // UID returns the globally unique identifier for the most recently downloaded payload targeted by the source. + UID() string + + // ResourceVersion returns the resource version of the most recently downloaded payload targeted by the source. + ResourceVersion() string + + // Download downloads the remote config source's target object and returns a Payload backed by the object, + // or a sanitized failure reason and error if the download fails. + // Download takes an optional store as an argument. If provided, Download will check this store for the + // target object prior to contacting the API server. + // Download updates the local UID and ResourceVersion tracked by this source, based on the downloaded payload. + Download(client clientset.Interface, store cache.Store) (Payload, string, error) + + // Informer returns an informer that can be used to detect changes to the remote config source + Informer(client clientset.Interface, handler cache.ResourceEventHandlerFuncs) cache.SharedInformer + + // Encode returns a []byte representation of the object behind the RemoteConfigSource Encode() ([]byte, error) // NodeConfigSource returns a copy of the underlying apiv1.NodeConfigSource object. @@ -104,7 +128,11 @@ func DecodeRemoteConfigSource(data []byte) (RemoteConfigSource, error) { // we use the v1.NodeConfigSource type on internal and external, so no need to convert to external here source, _, err := NewRemoteConfigSource(&cs.Source) - return source, err + if err != nil { + return nil, err + } + + return source, nil } // EqualRemoteConfigSources is a helper for comparing remote config sources by @@ -123,10 +151,6 @@ type remoteConfigMap struct { var _ RemoteConfigSource = (*remoteConfigMap)(nil) -func (r *remoteConfigMap) UID() string { - return string(r.source.ConfigMap.UID) -} - func (r *remoteConfigMap) KubeletFilename() string { return r.source.ConfigMap.KubeletConfigKey } @@ -138,32 +162,82 @@ func (r *remoteConfigMap) APIPath() string { return fmt.Sprintf(configMapAPIPathFmt, ref.Namespace, ref.Name) } -func (r *remoteConfigMap) Download(client clientset.Interface) (Payload, string, error) { - var reason string - uid := string(r.source.ConfigMap.UID) +func (r *remoteConfigMap) UID() string { + return string(r.source.ConfigMap.UID) +} - utillog.Infof("attempting to download ConfigMap with UID %q", uid) +func (r *remoteConfigMap) ResourceVersion() string { + return r.source.ConfigMap.ResourceVersion +} - // get the ConfigMap via namespace/name, there doesn't seem to be a way to get it by UID - cm, err := client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{}) - if err != nil { - return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err) +func (r *remoteConfigMap) Download(client clientset.Interface, store cache.Store) (Payload, string, error) { + var ( + cm *apiv1.ConfigMap + err error + ) + // check the in-memory store for the ConfigMap, so we can skip unnecessary downloads + if store != nil { + utillog.Infof("checking in-memory store for %s", r.APIPath()) + cm, err = getConfigMapFromStore(store, r.source.ConfigMap.Namespace, r.source.ConfigMap.Name) + if err != nil { + // just log the error, we'll attempt a direct download instead + utillog.Errorf("failed to check in-memory store for %s, error: %v", r.APIPath(), err) + } else if cm != nil { + utillog.Infof("found %s in in-memory store, UID: %s, ResourceVersion: %s", r.APIPath(), cm.UID, cm.ResourceVersion) + } else { + utillog.Infof("did not find %s in in-memory store", r.APIPath()) + } } - - // ensure that UID matches the UID on the source - if r.source.ConfigMap.UID != cm.UID { - reason = fmt.Sprintf(status.UIDMismatchErrorFmt, r.source.ConfigMap.UID, r.APIPath(), cm.UID) - return nil, reason, fmt.Errorf(reason) - } - + // if we didn't find the ConfigMap in the in-memory store, download it from the API server + if cm == nil { + utillog.Infof("attempting to download %s", r.APIPath()) + cm, err = client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Get(r.source.ConfigMap.Name, metav1.GetOptions{}) + if err != nil { + return nil, status.DownloadError, fmt.Errorf("%s, error: %v", status.DownloadError, err) + } + utillog.Infof("successfully downloaded %s, UID: %s, ResourceVersion: %s", r.APIPath(), cm.UID, cm.ResourceVersion) + } // Assert: Now we have a non-nil ConfigMap + // construct Payload from the ConfigMap payload, err := NewConfigMapPayload(cm) if err != nil { - reason = fmt.Sprintf("invalid downloaded object") - return nil, reason, fmt.Errorf("%s, error: %v", reason, err) + // We only expect an error here if ObjectMeta is lacking UID or ResourceVersion. This should + // never happen on objects in the informer's store, or objects downloaded from the API server + // directly, so we report InternalError. + return nil, status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err) + } + // update internal UID and ResourceVersion based on latest ConfigMap + r.source.ConfigMap.UID = cm.UID + r.source.ConfigMap.ResourceVersion = cm.ResourceVersion + return payload, "", nil +} + +func (r *remoteConfigMap) Informer(client clientset.Interface, handler cache.ResourceEventHandlerFuncs) cache.SharedInformer { + // select ConfigMap by name + fieldselector := fields.OneTermEqualSelector("metadata.name", r.source.ConfigMap.Name) + + // add some randomness to resync period, which can help avoid controllers falling into lock-step + minResyncPeriod := 15 * time.Minute + factor := rand.Float64() + 1 + resyncPeriod := time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor) + + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (kuberuntime.Object, error) { + return client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).List(metav1.ListOptions{ + FieldSelector: fieldselector.String(), + }) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return client.CoreV1().ConfigMaps(r.source.ConfigMap.Namespace).Watch(metav1.ListOptions{ + FieldSelector: fieldselector.String(), + ResourceVersion: options.ResourceVersion, + }) + }, } - utillog.Infof("successfully downloaded ConfigMap with UID %q", uid) - return payload, "", nil + informer := cache.NewSharedInformer(lw, &apiv1.ConfigMap{}, resyncPeriod) + informer.AddEventHandler(handler) + + return informer } func (r *remoteConfigMap) Encode() ([]byte, error) { @@ -182,3 +256,18 @@ func (r *remoteConfigMap) Encode() ([]byte, error) { func (r *remoteConfigMap) NodeConfigSource() *apiv1.NodeConfigSource { return r.source.DeepCopy() } + +func getConfigMapFromStore(store cache.Store, namespace, name string) (*apiv1.ConfigMap, error) { + key := fmt.Sprintf("%s/%s", namespace, name) + obj, ok, err := store.GetByKey(key) + if err != nil || !ok { + return nil, err + } + cm, ok := obj.(*apiv1.ConfigMap) + if !ok { + err := fmt.Errorf("failed to cast object %s from informer's store to ConfigMap", key) + utillog.Errorf(err.Error()) + return nil, err + } + return cm, nil +} diff --git a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go index 39f394100cf..df92ee9f66d 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/download_test.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/download_test.go @@ -25,7 +25,9 @@ import ( apiv1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" fakeclient "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" utiltest "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/test" ) @@ -119,75 +121,93 @@ func TestRemoteConfigMapAPIPath(t *testing.T) { func TestRemoteConfigMapDownload(t *testing.T) { cm := &apiv1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "name", - Namespace: "namespace", - UID: "uid", + Name: "name", + Namespace: "namespace", + UID: "uid", + ResourceVersion: "1", }} - client := fakeclient.NewSimpleClientset(cm) - payload, err := NewConfigMapPayload(cm) + + source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + KubeletConfigKey: "kubelet", + }} + + expectPayload, err := NewConfigMapPayload(cm) if err != nil { t.Fatalf("error constructing payload: %v", err) } - makeSource := func(source *apiv1.NodeConfigSource) RemoteConfigSource { - s, _, err := NewRemoteConfigSource(source) - if err != nil { - t.Fatalf("error constructing remote config source %v", err) - } - return s + missingStore := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + hasStore := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + if err := hasStore.Add(cm); err != nil { + t.Fatalf("unexpected error constructing hasStore") } + missingClient := fakeclient.NewSimpleClientset() + hasClient := fakeclient.NewSimpleClientset(cm) + cases := []struct { desc string - source RemoteConfigSource - expect Payload + client clientset.Interface + store cache.Store err string }{ { - desc: "object doesn't exist", - source: makeSource(&apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "bogus", - Namespace: "namespace", - UID: "bogus", - KubeletConfigKey: "kubelet", - }}), - expect: nil, + desc: "nil store, object does not exist in API server", + client: missingClient, err: "not found", }, { - desc: "UID is incorrect for namespace/name", - source: makeSource(&apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "name", - Namespace: "namespace", - UID: "bogus", - KubeletConfigKey: "kubelet", - }}), - expect: nil, - err: "does not match", + desc: "nil store, object exists in API server", + client: hasClient, }, { - desc: "object exists and reference is correct", - source: makeSource(&apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "name", - Namespace: "namespace", - UID: "uid", - KubeletConfigKey: "kubelet", - }}), - expect: payload, - err: "", + desc: "object exists in store and API server", + store: hasStore, + client: hasClient, + }, + { + desc: "object exists in store, but does not exist in API server", + store: hasStore, + client: missingClient, + }, + { + desc: "object does not exist in store, but exists in API server", + store: missingStore, + client: hasClient, + }, + { + desc: "object does not exist in store or API server", + client: missingClient, + store: missingStore, + err: "not found", }, } for _, c := range cases { t.Run(c.desc, func(t *testing.T) { - payload, _, err := c.source.Download(client) + // deep copy so we can always check the UID/ResourceVersion are set after Download + s, _, err := NewRemoteConfigSource(source.DeepCopy()) + if err != nil { + t.Fatalf("error constructing remote config source %v", err) + } + // attempt download + p, _, err := s.Download(c.client, c.store) utiltest.ExpectError(t, err, c.err) if err != nil { return } // downloaded object should match the expected - if !apiequality.Semantic.DeepEqual(c.expect.object(), payload.object()) { - t.Errorf("case %q, expect Checkpoint %s but got %s", c.desc, spew.Sdump(c.expect), spew.Sdump(payload)) + if !apiequality.Semantic.DeepEqual(expectPayload.object(), p.object()) { + t.Errorf("expect Checkpoint %s but got %s", spew.Sdump(expectPayload), spew.Sdump(p)) + } + // source UID and ResourceVersion should be updated by Download + if p.UID() != s.UID() { + t.Errorf("expect UID to be updated by Download to match payload: %s, but got source UID: %s", p.UID(), s.UID()) + } + if p.ResourceVersion() != s.ResourceVersion() { + t.Errorf("expect ResourceVersion to be updated by Download to match payload: %s, but got source ResourceVersion: %s", p.ResourceVersion(), s.ResourceVersion()) } }) } diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go index 8fcc99e62a7..94cd42fea67 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore.go @@ -75,32 +75,46 @@ func (s *fsStore) Initialize() error { return utilfiles.EnsureDir(s.fs, filepath.Join(s.dir, checkpointsDir)) } -func (s *fsStore) Exists(c checkpoint.RemoteConfigSource) (bool, error) { +func (s *fsStore) Exists(source checkpoint.RemoteConfigSource) (bool, error) { + const errfmt = "failed to determine whether checkpoint exists for source %s, UID: %s, ResourceVersion: %s exists, error: %v" + if len(source.UID()) == 0 { + return false, fmt.Errorf(errfmt, source.APIPath(), source.UID(), source.ResourceVersion(), "empty UID is ambiguous") + } + if len(source.ResourceVersion()) == 0 { + return false, fmt.Errorf(errfmt, source.APIPath(), source.UID(), source.ResourceVersion(), "empty ResourceVersion is ambiguous") + } + // we check whether the directory was created for the resource - uid := c.UID() - ok, err := utilfiles.DirExists(s.fs, s.checkpointPath(uid)) + ok, err := utilfiles.DirExists(s.fs, s.checkpointPath(source.UID(), source.ResourceVersion())) if err != nil { - return false, fmt.Errorf("failed to determine whether checkpoint %q exists, error: %v", uid, err) + return false, fmt.Errorf(errfmt, source.APIPath(), source.UID(), source.ResourceVersion(), err) } return ok, nil } -func (s *fsStore) Save(c checkpoint.Payload) error { +func (s *fsStore) Save(payload checkpoint.Payload) error { + // Note: Payload interface guarantees UID() and ResourceVersion() to be non-empty + path := s.checkpointPath(payload.UID(), payload.ResourceVersion()) + // ensure the parent dir (checkpoints/uid) exists, since ReplaceDir requires the parent of the replacee + // to exist, and we checkpoint as checkpoints/uid/resourceVersion/files-from-configmap + if err := utilfiles.EnsureDir(s.fs, filepath.Dir(path)); err != nil { + return err + } // save the checkpoint's files in the appropriate checkpoint dir - return utilfiles.ReplaceDir(s.fs, s.checkpointPath(c.UID()), c.Files()) + return utilfiles.ReplaceDir(s.fs, path, payload.Files()) } func (s *fsStore) Load(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, error) { - sourceFmt := fmt.Sprintf("%s:%s", source.APIPath(), source.UID()) + sourceFmt := fmt.Sprintf("%s, UID: %s, ResourceVersion: %s", source.APIPath(), source.UID(), source.ResourceVersion()) // check if a checkpoint exists for the source if ok, err := s.Exists(source); err != nil { - return nil, fmt.Errorf("failed to determine if a checkpoint exists for source %s", sourceFmt) + return nil, err } else if !ok { return nil, fmt.Errorf("no checkpoint for source %s", sourceFmt) } // load the kubelet config file - utillog.Infof("loading kubelet configuration checkpoint for source %s", sourceFmt) - loader, err := configfiles.NewFsLoader(s.fs, filepath.Join(s.checkpointPath(source.UID()), source.KubeletFilename())) + utillog.Infof("loading Kubelet configuration checkpoint for source %s", sourceFmt) + loader, err := configfiles.NewFsLoader(s.fs, filepath.Join(s.checkpointPath(source.UID(), source.ResourceVersion()), source.KubeletFilename())) if err != nil { return nil, err } @@ -140,8 +154,8 @@ func (s *fsStore) Reset() (bool, error) { return reset(s) } -func (s *fsStore) checkpointPath(uid string) string { - return filepath.Join(s.dir, checkpointsDir, uid) +func (s *fsStore) checkpointPath(uid, resourceVersion string) string { + return filepath.Join(s.dir, checkpointsDir, uid, resourceVersion) } func (s *fsStore) metaPath(name string) string { @@ -163,6 +177,14 @@ func writeRemoteConfigSource(fs utilfs.Filesystem, path string, source checkpoin if source == nil { return utilfiles.ReplaceFile(fs, path, []byte{}) } + // check that UID and ResourceVersion are non-empty, + // error to save reference if the checkpoint can't be fully resolved + if source.UID() == "" { + return fmt.Errorf("failed to write RemoteConfigSource, empty UID is ambiguous") + } + if source.ResourceVersion() == "" { + return fmt.Errorf("failed to write RemoteConfigSource, empty ResourceVersion is ambiguous") + } // encode the source and save it to the file data, err := source.Encode() if err != nil { diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go index cd73f7d35c6..27414a10d0b 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/fsstore_test.go @@ -83,8 +83,8 @@ func TestFsStoreInitialize(t *testing.T) { } // check that checkpoints dir exists - if _, err := store.fs.Stat(store.checkpointPath("")); err != nil { - t.Fatalf("expect %q to exist, but stat failed with error: %v", store.checkpointPath(""), err) + if _, err := store.fs.Stat(filepath.Join(store.dir, checkpointsDir)); err != nil { + t.Fatalf("expect %q to exist, but stat failed with error: %v", filepath.Join(store.dir, checkpointsDir), err) } // check that assignedFile exists @@ -105,21 +105,29 @@ func TestFsStoreExists(t *testing.T) { } // checkpoint a payload - const uid = "uid" - p, err := checkpoint.NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: uid}}) + const ( + uid = "uid" + resourceVersion = "1" + ) + p, err := checkpoint.NewConfigMapPayload(&apiv1.ConfigMap{ObjectMeta: metav1.ObjectMeta{UID: uid, ResourceVersion: resourceVersion}}) if err != nil { - t.Fatalf("could not construct checkpoint, error: %v", err) + t.Fatalf("could not construct Payload, error: %v", err) + } + if err := store.Save(p); err != nil { + t.Fatalf("unexpected error: %v", err) } - store.Save(p) cases := []struct { - desc string - uid types.UID - expect bool - err string + desc string + uid types.UID + resourceVersion string + expect bool + err string }{ - {"exists", uid, true, ""}, - {"does not exist", "bogus-uid", false, ""}, + {"exists", uid, resourceVersion, true, ""}, + {"does not exist", "bogus-uid", "bogus-resourceVersion", false, ""}, + {"ambiguous UID", "", "bogus-resourceVersion", false, "empty UID is ambiguous"}, + {"ambiguous ResourceVersion", "bogus-uid", "", false, "empty ResourceVersion is ambiguous"}, } for _, c := range cases { @@ -129,10 +137,11 @@ func TestFsStoreExists(t *testing.T) { Name: "name", Namespace: "namespace", UID: c.uid, + ResourceVersion: c.resourceVersion, KubeletConfigKey: "kubelet", }}) if err != nil { - t.Fatalf("error constructing remote config source: %v", err) + t.Fatalf("unexpected error: %v", err) } ok, err := store.Exists(source) utiltest.ExpectError(t, err, c.err) @@ -160,38 +169,44 @@ func TestFsStoreSave(t *testing.T) { return s }() + const ( + uid = "uid" + resourceVersion = "1" + ) + cases := []struct { - desc string - files map[string]string - err string + desc string + uid types.UID + resourceVersion string + files map[string]string + err string }{ - {"valid payload", map[string]string{"foo": "foocontent", "bar": "barcontent"}, ""}, - {"empty key name", map[string]string{"": "foocontent"}, "must not be empty"}, - {"key name is not a base file name (foo/bar)", map[string]string{"foo/bar": "foocontent"}, "only base names are allowed"}, - {"key name is not a base file name (/foo)", map[string]string{"/bar": "foocontent"}, "only base names are allowed"}, - {"used .", map[string]string{".": "foocontent"}, "may not be '.' or '..'"}, - {"used ..", map[string]string{"..": "foocontent"}, "may not be '.' or '..'"}, - {"length violation", map[string]string{nameTooLong: "foocontent"}, "must be less than 255 characters"}, + {"valid payload", uid, resourceVersion, map[string]string{"foo": "foocontent", "bar": "barcontent"}, ""}, + {"empty key name", uid, resourceVersion, map[string]string{"": "foocontent"}, "must not be empty"}, + {"key name is not a base file name (foo/bar)", uid, resourceVersion, map[string]string{"foo/bar": "foocontent"}, "only base names are allowed"}, + {"key name is not a base file name (/foo)", uid, resourceVersion, map[string]string{"/bar": "foocontent"}, "only base names are allowed"}, + {"used .", uid, resourceVersion, map[string]string{".": "foocontent"}, "may not be '.' or '..'"}, + {"used ..", uid, resourceVersion, map[string]string{"..": "foocontent"}, "may not be '.' or '..'"}, + {"length violation", uid, resourceVersion, map[string]string{nameTooLong: "foocontent"}, "must be less than 255 characters"}, } for _, c := range cases { t.Run(c.desc, func(t *testing.T) { // construct the payload p, err := checkpoint.NewConfigMapPayload(&apiv1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{UID: "uid"}, + ObjectMeta: metav1.ObjectMeta{UID: c.uid, ResourceVersion: c.resourceVersion}, Data: c.files, }) - if err != nil { - t.Fatalf("error constructing payload: %v", err) + // if no error, save the payload, otherwise skip straight to error handler + if err == nil { + err = store.Save(p) } - // save the payload - err = store.Save(p) utiltest.ExpectError(t, err, c.err) if err != nil { return } // read the saved checkpoint - m, err := mapFromCheckpoint(store, p.UID()) + m, err := mapFromCheckpoint(store, p.UID(), p.ResourceVersion()) if err != nil { t.Fatalf("error loading checkpoint to map: %v", err) } @@ -220,11 +235,12 @@ func TestFsStoreLoad(t *testing.T) { } // construct a payload that contains the kubeletconfig const ( - uid = "uid" - kubeletKey = "kubelet" + uid = "uid" + resourceVersion = "1" + kubeletKey = "kubelet" ) p, err := checkpoint.NewConfigMapPayload(&apiv1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{UID: types.UID(uid)}, + ObjectMeta: metav1.ObjectMeta{UID: types.UID(uid), ResourceVersion: resourceVersion}, Data: map[string]string{ kubeletKey: string(data), }, @@ -239,12 +255,15 @@ func TestFsStoreLoad(t *testing.T) { } cases := []struct { - desc string - uid types.UID - err string + desc string + uid types.UID + resourceVersion string + err string }{ - {"checkpoint exists", uid, ""}, - {"checkpoint does not exist", "bogus-uid", "no checkpoint for source"}, + {"checkpoint exists", uid, resourceVersion, ""}, + {"checkpoint does not exist", "bogus-uid", "bogus-resourceVersion", "no checkpoint for source"}, + {"ambiguous UID", "", "bogus-resourceVersion", "empty UID is ambiguous"}, + {"ambiguous ResourceVersion", "bogus-uid", "", "empty ResourceVersion is ambiguous"}, } for _, c := range cases { t.Run(c.desc, func(t *testing.T) { @@ -253,10 +272,11 @@ func TestFsStoreLoad(t *testing.T) { Name: "name", Namespace: "namespace", UID: c.uid, + ResourceVersion: c.resourceVersion, KubeletConfigKey: kubeletKey, }}) if err != nil { - t.Fatalf("error constructing remote config source: %v", err) + t.Fatalf("unexpected error: %v", err) } loaded, err := store.Load(source) utiltest.ExpectError(t, err, c.err) @@ -389,35 +409,80 @@ func TestFsStoreSetAssigned(t *testing.T) { t.Fatalf("error constructing store: %v", err) } - const uid = "uid" - expect := fmt.Sprintf(`apiVersion: kubelet.config.k8s.io/v1beta1 + cases := []struct { + desc string + source *apiv1.NodeConfigSource + expect string + err string + }{ + { + desc: "nil source", + expect: "", // empty file + }, + { + desc: "non-nil source", + source: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + UID: "uid", + ResourceVersion: "1", + KubeletConfigKey: "kubelet", + }}, + expect: `apiVersion: kubelet.config.k8s.io/v1beta1 kind: SerializedNodeConfigSource source: configMap: kubeletConfigKey: kubelet name: name namespace: namespace - uid: %s -`, uid) - source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "name", - Namespace: "namespace", - UID: types.UID(uid), - KubeletConfigKey: "kubelet", - }}) - if err != nil { - t.Fatalf("unexpected error: %v", err) + resourceVersion: "1" + uid: uid +`, + }, + { + desc: "missing UID", + source: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + ResourceVersion: "1", + KubeletConfigKey: "kubelet", + }}, + err: "failed to write RemoteConfigSource, empty UID is ambiguous", + }, + { + desc: "missing ResourceVersion", + source: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + UID: "uid", + KubeletConfigKey: "kubelet", + }}, + err: "failed to write RemoteConfigSource, empty ResourceVersion is ambiguous", + }, } - // save the assigned source - if err := store.SetAssigned(source); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // check that the source saved as we would expect - data := readTestSourceFile(t, store, assignedFile) - if expect != string(data) { - t.Errorf("expect assigned source file to contain %q, but got %q", expect, string(data)) + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + var source checkpoint.RemoteConfigSource + if c.source != nil { + s, _, err := checkpoint.NewRemoteConfigSource(c.source) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + source = s + } + // save the assigned source + err = store.SetAssigned(source) + utiltest.ExpectError(t, err, c.err) + if err != nil { + return + } + // check that the source saved as we would expect + data := readTestSourceFile(t, store, assignedFile) + if c.expect != string(data) { + t.Errorf("expect assigned source file to contain %q, but got %q", c.expect, string(data)) + } + }) } } @@ -427,35 +492,80 @@ func TestFsStoreSetLastKnownGood(t *testing.T) { t.Fatalf("error constructing store: %v", err) } - const uid = "uid" - expect := fmt.Sprintf(`apiVersion: kubelet.config.k8s.io/v1beta1 + cases := []struct { + desc string + source *apiv1.NodeConfigSource + expect string + err string + }{ + { + desc: "nil source", + expect: "", // empty file + }, + { + desc: "non-nil source", + source: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + UID: "uid", + ResourceVersion: "1", + KubeletConfigKey: "kubelet", + }}, + expect: `apiVersion: kubelet.config.k8s.io/v1beta1 kind: SerializedNodeConfigSource source: configMap: kubeletConfigKey: kubelet name: name namespace: namespace - uid: %s -`, uid) - source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "name", - Namespace: "namespace", - UID: types.UID(uid), - KubeletConfigKey: "kubelet", - }}) - if err != nil { - t.Fatalf("unexpected error: %v", err) + resourceVersion: "1" + uid: uid +`, + }, + { + desc: "missing UID", + source: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + ResourceVersion: "1", + KubeletConfigKey: "kubelet", + }}, + err: "failed to write RemoteConfigSource, empty UID is ambiguous", + }, + { + desc: "missing ResourceVersion", + source: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Name: "name", + Namespace: "namespace", + UID: "uid", + KubeletConfigKey: "kubelet", + }}, + err: "failed to write RemoteConfigSource, empty ResourceVersion is ambiguous", + }, } - // save the last known good source - if err := store.SetLastKnownGood(source); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // check that the source saved as we would expect - data := readTestSourceFile(t, store, lastKnownGoodFile) - if expect != string(data) { - t.Errorf("expect last-known-good source file to contain %q, but got %q", expect, string(data)) + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + var source checkpoint.RemoteConfigSource + if c.source != nil { + s, _, err := checkpoint.NewRemoteConfigSource(c.source) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + source = s + } + // save the assigned source + err = store.SetLastKnownGood(source) + utiltest.ExpectError(t, err, c.err) + if err != nil { + return + } + // check that the source saved as we would expect + data := readTestSourceFile(t, store, lastKnownGoodFile) + if c.expect != string(data) { + t.Errorf("expect assigned source file to contain %q, but got %q", c.expect, string(data)) + } + }) } } @@ -536,107 +646,8 @@ func TestFsStoreReset(t *testing.T) { } } -func TestFsStoreReadRemoteConfigSource(t *testing.T) { - store, err := newInitializedFakeFsStore() - if err != nil { - t.Fatalf("error constructing store: %v", err) - } - - source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ - ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "name", - Namespace: "namespace", - UID: "uid", - KubeletConfigKey: "kubelet", - }}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - cases := []struct { - desc string - expect checkpoint.RemoteConfigSource - err string - }{ - {"default source", nil, ""}, - {"non-default source", source, ""}, - } - - const name = "some-source-file" - for _, c := range cases { - t.Run(c.desc, func(t *testing.T) { - saveTestSourceFile(t, store, name, c.expect) - source, err := readRemoteConfigSource(store.fs, store.metaPath(name)) - utiltest.ExpectError(t, err, c.err) - if err != nil { - return - } - if !checkpoint.EqualRemoteConfigSources(c.expect, source) { - t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.expect), spew.Sdump(c.expect), spew.Sdump(source)) - } - }) - } -} - -func TestFsStoreWriteRemoteConfigSource(t *testing.T) { - store, err := newInitializedFakeFsStore() - if err != nil { - t.Fatalf("error constructing store: %v", err) - } - - source, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Name: "name", - Namespace: "namespace", - UID: "uid", - KubeletConfigKey: "kubelet", - }}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - cases := []struct { - desc string - source checkpoint.RemoteConfigSource - }{ - {"nil source", nil}, - {"non-nil source", source}, - } - - const name = "some-source-file" - for _, c := range cases { - t.Run(c.desc, func(t *testing.T) { - // set the source file - err := writeRemoteConfigSource(store.fs, store.metaPath(name), c.source) - if err != nil { - t.Fatalf("unable to set source file, error: %v", err) - } - // read back the file - data := readTestSourceFile(t, store, name) - str := string(data) - - if c.source != nil { - // expect the contents to match the encoding of the source - data, err := c.source.Encode() - expect := string(data) - if err != nil { - t.Fatalf("couldn't encode source, error: %v", err) - } - if expect != str { - t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.source), expect, str) - } - } else { - // expect empty file - expect := "" - if expect != str { - t.Errorf("case %q, expect %q but got %q", spew.Sdump(c.source), expect, str) - } - } - }) - } -} - -func mapFromCheckpoint(store *fsStore, uid string) (map[string]string, error) { - files, err := store.fs.ReadDir(store.checkpointPath(uid)) +func mapFromCheckpoint(store *fsStore, uid, resourceVersion string) (map[string]string, error) { + files, err := store.fs.ReadDir(store.checkpointPath(uid, resourceVersion)) if err != nil { return nil, err } @@ -647,7 +658,7 @@ func mapFromCheckpoint(store *fsStore, uid string) (map[string]string, error) { return nil, fmt.Errorf("expect only regular files in checkpoint dir %q", uid) } // read the file contents and build the map - data, err := store.fs.ReadFile(filepath.Join(store.checkpointPath(uid), f.Name())) + data, err := store.fs.ReadFile(filepath.Join(store.checkpointPath(uid, resourceVersion), f.Name())) if err != nil { return nil, err } diff --git a/pkg/kubelet/kubeletconfig/checkpoint/store/store.go b/pkg/kubelet/kubeletconfig/checkpoint/store/store.go index cd4602876df..0ceb34a6007 100644 --- a/pkg/kubelet/kubeletconfig/checkpoint/store/store.go +++ b/pkg/kubelet/kubeletconfig/checkpoint/store/store.go @@ -30,6 +30,7 @@ type Store interface { Initialize() error // Exists returns true if the object referenced by `source` has been checkpointed. + // The source must be unambiguous - e.g. if referencing an API object it must specify both uid and resourceVersion. Exists(source checkpoint.RemoteConfigSource) (bool, error) // Save Kubelet config payloads to the storage layer. It must be possible to unmarshal the payload to a KubeletConfiguration. // The following payload types are supported: diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index 8662e568d53..cb92fb7e654 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -37,10 +37,10 @@ import ( const ( // KubeletConfigChangedEventReason identifies an event as a change of Kubelet configuration KubeletConfigChangedEventReason = "KubeletConfigChanged" - // EventMessageFmt is the message format for Kubelet config change events - EventMessageFmt = "Kubelet will restart to use: %s" - // LocalConfigMessage is the text to apply to EventMessageFmt when the Kubelet has been configured to use its local config (init or defaults) - LocalConfigMessage = "local config" + // LocalEventMessage is sent when the Kubelet restarts to use local config + LocalEventMessage = "Kubelet restarting to use local config" + // RemoteEventMessageFmt is sent when the Kubelet restarts to use a remote config + RemoteEventMessageFmt = "Kubelet restarting to use %s, UID: %s, ResourceVersion: %s, KubeletConfigKey: %s" ) // pokeConfiSourceWorker tells the worker thread that syncs config sources that work needs to be done @@ -69,111 +69,116 @@ func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v } }() - node, err := latestNode(cc.informer.GetStore(), nodeName) + // get the latest Node.Spec.ConfigSource from the informer + source, err := latestNodeConfigSource(cc.nodeInformer.GetStore(), nodeName) if err != nil { cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, status.InternalError)) syncerr = fmt.Errorf("%s, error: %v", status.InternalError, err) return } - // check the Node and download any new config - if updated, cur, reason, err := cc.doSyncConfigSource(client, node.Spec.ConfigSource); err != nil { - cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, reason)) + // a nil source simply means we reset to local defaults + if source == nil { + utillog.Infof("Node.Spec.ConfigSource is empty, will reset assigned and last-known-good to defaults") + if updated, reason, err := cc.resetConfig(); err != nil { + reason = fmt.Sprintf(status.SyncErrorFmt, reason) + cc.configStatus.SetErrorOverride(reason) + syncerr = fmt.Errorf("%s, error: %v", reason, err) + return + } else if updated { + restartForNewConfig(eventClient, nodeName, nil) + } + return + } + + // a non-nil source means we should attempt to download the config, and checkpoint it if necessary + utillog.Infof("Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary") + + // TODO(mtaufen): It would be nice if we could check the payload's metadata before (re)downloading the whole payload + // we at least try pulling the latest configmap out of the local informer store. + + // construct the interface that can dynamically dispatch the correct Download, etc. methods for the given source type + remote, reason, err := checkpoint.NewRemoteConfigSource(source) + if err != nil { + reason = fmt.Sprintf(status.SyncErrorFmt, reason) + cc.configStatus.SetErrorOverride(reason) + syncerr = fmt.Errorf("%s, error: %v", reason, err) + return + } + + // "download" source, either from informer's in-memory store or directly from the API server, if the informer doesn't have a copy + payload, reason, err := cc.downloadConfigPayload(client, remote) + if err != nil { + reason = fmt.Sprintf(status.SyncErrorFmt, reason) + cc.configStatus.SetErrorOverride(reason) + syncerr = fmt.Errorf("%s, error: %v", reason, err) + return + } + + // save a checkpoint for the payload, if one does not already exist + if reason, err := cc.saveConfigCheckpoint(remote, payload); err != nil { + reason = fmt.Sprintf(status.SyncErrorFmt, reason) + cc.configStatus.SetErrorOverride(reason) + syncerr = fmt.Errorf("%s, error: %v", reason, err) + return + } + + // update the local, persistent record of assigned config + if updated, reason, err := cc.setAssignedConfig(remote); err != nil { + reason = fmt.Sprintf(status.SyncErrorFmt, reason) + cc.configStatus.SetErrorOverride(reason) syncerr = fmt.Errorf("%s, error: %v", reason, err) return } else if updated { - path := LocalConfigMessage - if cur != nil { - path = cur.APIPath() - } - // we directly log and send the event, instead of using the event recorder, - // because the event recorder won't flush its queue before we exit (we'd lose the event) - event := eventf(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, EventMessageFmt, path) - glog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) - if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { - utillog.Errorf("failed to send event, error: %v", err) - } - os.Exit(0) + restartForNewConfig(eventClient, nodeName, remote) } // If we get here: - // - there is no need to restart to update the current config + // - there is no need to restart to use new config // - there was no error trying to sync configuration // - if, previously, there was an error trying to sync configuration, we need to clear that error from the status cc.configStatus.SetErrorOverride("") } -// doSyncConfigSource checkpoints and sets the store's current config to the new config or resets config, -// depending on the `source`, and returns whether the current config in the checkpoint store was updated as a result -func (cc *Controller) doSyncConfigSource(client clientset.Interface, source *apiv1.NodeConfigSource) (bool, checkpoint.RemoteConfigSource, string, error) { - if source == nil { - utillog.Infof("Node.Spec.ConfigSource is empty, will reset current and last-known-good to defaults") - updated, reason, err := cc.resetConfig() - if err != nil { - return false, nil, reason, err - } - return updated, nil, "", nil +// Note: source has up-to-date uid and resourceVersion after calling downloadConfigPayload. +func (cc *Controller) downloadConfigPayload(client clientset.Interface, source checkpoint.RemoteConfigSource) (checkpoint.Payload, string, error) { + var store cache.Store + if cc.remoteConfigSourceInformer != nil { + store = cc.remoteConfigSourceInformer.GetStore() } - - // if the NodeConfigSource is non-nil, download the config - utillog.Infof("Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary") - remote, reason, err := checkpoint.NewRemoteConfigSource(source) - if err != nil { - return false, nil, reason, err - } - reason, err = cc.checkpointConfigSource(client, remote) - if err != nil { - return false, nil, reason, err - } - updated, reason, err := cc.setAssignedConfig(remote) - if err != nil { - return false, nil, reason, err - } - return updated, remote, "", nil + return source.Download(client, store) } -// checkpointConfigSource downloads and checkpoints the object referred to by `source` if the checkpoint does not already exist, -// if a failure occurs, returns a sanitized failure reason and an error -func (cc *Controller) checkpointConfigSource(client clientset.Interface, source checkpoint.RemoteConfigSource) (string, error) { - // if the checkpoint already exists, skip downloading - if ok, err := cc.checkpointStore.Exists(source); err != nil { +func (cc *Controller) saveConfigCheckpoint(source checkpoint.RemoteConfigSource, payload checkpoint.Payload) (string, error) { + ok, err := cc.checkpointStore.Exists(source) + if err != nil { return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err) - } else if ok { - // TODO(mtaufen): update this to include ResourceVersion in #63221 - utillog.Infof("checkpoint already exists for object %s with UID %s, skipping download", source.APIPath(), source.UID()) + } + if ok { + utillog.Infof("checkpoint already exists for %s, UID: %s, ResourceVersion: %s", source.APIPath(), payload.UID(), payload.ResourceVersion()) return "", nil } - - // download - payload, reason, err := source.Download(client) - if err != nil { - return reason, fmt.Errorf("%s, error: %v", reason, err) - } - - // save - err = cc.checkpointStore.Save(payload) - if err != nil { + if err := cc.checkpointStore.Save(payload); err != nil { return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err) } - return "", nil } // setAssignedConfig updates the assigned checkpoint config in the store. -// Returns whether the current config changed as a result, or a sanitized failure reason and an error. +// Returns whether the assigned config changed as a result, or a sanitized failure reason and an error. func (cc *Controller) setAssignedConfig(source checkpoint.RemoteConfigSource) (bool, string, error) { - current, err := cc.checkpointStore.Assigned() + assigned, err := cc.checkpointStore.Assigned() if err != nil { return false, status.InternalError, err } if err := cc.checkpointStore.SetAssigned(source); err != nil { return false, status.InternalError, err } - return !checkpoint.EqualRemoteConfigSources(current, source), "", nil + return !checkpoint.EqualRemoteConfigSources(assigned, source), "", nil } -// resetConfig resets the current and last-known-good checkpoints in the checkpoint store to their default values and -// returns whether the current checkpoint changed as a result, or a sanitized failure reason and an error. +// resetConfig resets the assigned and last-known-good checkpoints in the checkpoint store to their default values and +// returns whether the assigned checkpoint changed as a result, or a sanitized failure reason and an error. func (cc *Controller) resetConfig() (bool, string, error) { updated, err := cc.checkpointStore.Reset() if err != nil { @@ -182,8 +187,26 @@ func (cc *Controller) resetConfig() (bool, string, error) { return updated, "", nil } -// latestNode returns the most recent Node with `nodeName` from `store` -func latestNode(store cache.Store, nodeName string) (*apiv1.Node, error) { +// restartForNewConfig presumes the Kubelet is managed by a babysitter, e.g. systemd +// It will send an event before exiting. +func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, source checkpoint.RemoteConfigSource) { + message := LocalEventMessage + if source != nil { + message = fmt.Sprintf(RemoteEventMessageFmt, source.APIPath(), source.UID(), source.ResourceVersion(), source.KubeletFilename()) + } + // we directly log and send the event, instead of using the event recorder, + // because the event recorder won't flush its queue before we exit (we'd lose the event) + event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message) + glog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) + if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { + utillog.Errorf("failed to send event, error: %v", err) + } + utillog.Infof(message) + os.Exit(0) +} + +// latestNodeConfigSource returns a copy of the most recent NodeConfigSource from the Node with `nodeName` in `store` +func latestNodeConfigSource(store cache.Store, nodeName string) (*apiv1.NodeConfigSource, error) { obj, ok, err := store.GetByKey(nodeName) if err != nil { err := fmt.Errorf("failed to retrieve Node %q from informer's store, error: %v", nodeName, err) @@ -200,13 +223,11 @@ func latestNode(store cache.Store, nodeName string) (*apiv1.Node, error) { utillog.Errorf(err.Error()) return nil, err } - return node, nil -} - -// eventf constructs and returns an event containing a formatted message -// similar to k8s.io/client-go/tools/record/event.go -func eventf(nodeName, eventType, reason, messageFmt string, args ...interface{}) *apiv1.Event { - return makeEvent(nodeName, eventType, reason, fmt.Sprintf(messageFmt, args...)) + // Copy the source, so anyone who modifies it after here doesn't mess up the informer's store! + // This was previously the cause of a bug that made the Kubelet frequently resync config; Download updated + // the UID and ResourceVersion on the NodeConfigSource, but the pointer was still drilling all the way + // into the informer's copy! + return node.Spec.ConfigSource.DeepCopy(), nil } // makeEvent constructs an event diff --git a/pkg/kubelet/kubeletconfig/controller.go b/pkg/kubelet/kubeletconfig/controller.go index f665745ab26..098a3ed6ceb 100644 --- a/pkg/kubelet/kubeletconfig/controller.go +++ b/pkg/kubelet/kubeletconfig/controller.go @@ -21,6 +21,7 @@ import ( "path/filepath" "time" + apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -52,8 +53,11 @@ type Controller struct { // configStatus manages the status we report on the Node object configStatus status.NodeConfigStatus - // informer is the informer that watches the Node object - informer cache.SharedInformer + // nodeInformer is the informer that watches the Node object + nodeInformer cache.SharedInformer + + // remoteConfigSourceInformer is the informer that watches the assigned config source + remoteConfigSourceInformer cache.SharedInformer // checkpointStore persists config source checkpoints to a storage layer checkpointStore store.Store @@ -139,51 +143,80 @@ func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) { return nil, err } - // update the active source to the non-nil last-known-good source + // set status to indicate the active source is the non-nil last-known-good source cc.configStatus.SetActive(lastKnownGoodSource.NodeConfigSource()) return lastKnownGoodConfig, nil } -// StartSync launches the controller's sync loops if `client` is non-nil and `nodeName` is non-empty. -// It will always start the Node condition reporting loop, and will also start the dynamic conifg sync loops -// if dynamic config is enabled on the controller. If `nodeName` is empty but `client` is non-nil, an error is logged. -func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.EventsGetter, nodeName string) { +// StartSync tells the controller to start the goroutines that sync status/config to/from the API server. +// The clients must be non-nil, and the nodeName must be non-empty. +func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.EventsGetter, nodeName string) error { + const errFmt = "cannot start Kubelet config sync: %s" if client == nil { - utillog.Infof("nil client, will not start sync loops") - return - } else if len(nodeName) == 0 { - utillog.Errorf("cannot start sync loops with empty nodeName") - return + return fmt.Errorf(errFmt, "nil client") + } + if eventClient == nil { + return fmt.Errorf(errFmt, "nil event client") + } + if nodeName == "" { + return fmt.Errorf(errFmt, "empty nodeName") } - // start the status sync loop - go utilpanic.HandlePanic(func() { - utillog.Infof("starting status sync loop") - wait.JitterUntil(func() { - cc.configStatus.Sync(client, nodeName) - }, 10*time.Second, 0.2, true, wait.NeverStop) - })() - - cc.informer = newSharedNodeInformer(client, nodeName, - cc.onAddNodeEvent, cc.onUpdateNodeEvent, cc.onDeleteNodeEvent) - // start the informer loop // Rather than use utilruntime.HandleCrash, which doesn't actually crash in the Kubelet, // we use HandlePanic to manually call the panic handlers and then crash. // We have a better chance of recovering normal operation if we just restart the Kubelet in the event // of a Go runtime error. - go utilpanic.HandlePanic(func() { - utillog.Infof("starting Node informer sync loop") - cc.informer.Run(wait.NeverStop) - })() + // NOTE(mtaufen): utilpanic.HandlePanic returns a function and you have to call it for your thing to run! + // This was EVIL to debug (difficult to see missing `()`). + // The code now uses `go name()` instead of `go utilpanic.HandlePanic(func(){...})()` to avoid confusion. - // start the config source sync loop - go utilpanic.HandlePanic(func() { - utillog.Infof("starting config source sync loop") + // status sync worker + statusSyncLoopFunc := utilpanic.HandlePanic(func() { + utillog.Infof("starting status sync loop") + wait.JitterUntil(func() { + cc.configStatus.Sync(client, nodeName) + }, 10*time.Second, 0.2, true, wait.NeverStop) + }) + // remote config source informer, if we have a remote source to watch + assignedSource, err := cc.checkpointStore.Assigned() + if err != nil { + return fmt.Errorf(errFmt, err) + } else if assignedSource == nil { + utillog.Infof("local source is assigned, will not start remote config source informer") + } else { + cc.remoteConfigSourceInformer = assignedSource.Informer(client, cache.ResourceEventHandlerFuncs{ + AddFunc: cc.onAddRemoteConfigSourceEvent, + UpdateFunc: cc.onUpdateRemoteConfigSourceEvent, + DeleteFunc: cc.onDeleteRemoteConfigSourceEvent, + }, + ) + } + remoteConfigSourceInformerFunc := utilpanic.HandlePanic(func() { + if cc.remoteConfigSourceInformer != nil { + utillog.Infof("starting remote config source informer") + cc.remoteConfigSourceInformer.Run(wait.NeverStop) + } + }) + // node informer + cc.nodeInformer = newSharedNodeInformer(client, nodeName, + cc.onAddNodeEvent, cc.onUpdateNodeEvent, cc.onDeleteNodeEvent) + nodeInformerFunc := utilpanic.HandlePanic(func() { + utillog.Infof("starting Node informer") + cc.nodeInformer.Run(wait.NeverStop) + }) + // config sync worker + configSyncLoopFunc := utilpanic.HandlePanic(func() { + utillog.Infof("starting Kubelet config sync loop") wait.JitterUntil(func() { cc.syncConfigSource(client, eventClient, nodeName) }, 10*time.Second, 0.2, true, wait.NeverStop) - })() + }) + go statusSyncLoopFunc() + go remoteConfigSourceInformerFunc() + go nodeInformerFunc() + go configSyncLoopFunc() + return nil } // loadConfig loads Kubelet config from a checkpoint @@ -213,7 +246,6 @@ func (cc *Controller) checkTrial(duration time.Duration) { if trial, err := cc.inTrial(duration); err != nil { utillog.Errorf("failed to check trial period for assigned config, error: %v", err) } else if !trial { - utillog.Infof("assigned config passed trial period, will set as last-known-good") if err := cc.graduateAssignedToLastKnownGood(); err != nil { utillog.Errorf("failed to set last-known-good to assigned config, error: %v", err) } @@ -236,17 +268,28 @@ func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) { // graduateAssignedToLastKnownGood sets the last-known-good in the checkpointStore // to the same value as the assigned config maintained by the checkpointStore func (cc *Controller) graduateAssignedToLastKnownGood() error { - // get the assigned config + // get assigned assigned, err := cc.checkpointStore.Assigned() if err != nil { return err } - // update the last-known-good config + // get last-known-good + lastKnownGood, err := cc.checkpointStore.LastKnownGood() + if err != nil { + return err + } + // if the sources are equal, no need to change + if assigned == lastKnownGood || + assigned != nil && lastKnownGood != nil && apiequality.Semantic.DeepEqual(assigned, lastKnownGood) { + return nil + } + // update last-known-good err = cc.checkpointStore.SetLastKnownGood(assigned) if err != nil { return err } // update the status to reflect the new last-known-good config cc.configStatus.SetLastKnownGood(assigned.NodeConfigSource()) + utillog.Infof("updated last-known-good config to %s, UID: %s, ResourceVersion: %s", assigned.APIPath(), assigned.UID(), assigned.ResourceVersion()) return nil } diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go index f683ea212b7..ae8cdec069c 100644 --- a/pkg/kubelet/kubeletconfig/status/status.go +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -80,9 +80,12 @@ type nodeConfigStatus struct { // NewNodeConfigStatus returns a new NodeConfigStatus interface func NewNodeConfigStatus() NodeConfigStatus { + // channels must have capacity at least 1, since we signal with non-blocking writes + syncCh := make(chan bool, 1) + // prime new status managers to sync with the API server on the first call to Sync + syncCh <- true return &nodeConfigStatus{ - // channels must have capacity at least 1, since we signal with non-blocking writes - syncCh: make(chan bool, 1), + syncCh: syncCh, } } @@ -142,6 +145,8 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) { return } + utillog.Infof("updating Node.Status.Config") + // grab the lock s.mux.Lock() defer s.mux.Unlock() diff --git a/pkg/kubelet/kubeletconfig/watch.go b/pkg/kubelet/kubeletconfig/watch.go index 09b22233e07..879fb747c7c 100644 --- a/pkg/kubelet/kubeletconfig/watch.go +++ b/pkg/kubelet/kubeletconfig/watch.go @@ -86,6 +86,7 @@ func (cc *Controller) onUpdateNodeEvent(oldObj interface{}, newObj interface{}) } if oldObj == nil { // Node was just added, need to sync + utillog.Infof("initial Node watch event") cc.pokeConfigSourceWorker() return } @@ -95,31 +96,59 @@ func (cc *Controller) onUpdateNodeEvent(oldObj interface{}, newObj interface{}) return } if !apiequality.Semantic.DeepEqual(oldNode.Spec.ConfigSource, newNode.Spec.ConfigSource) { + utillog.Infof("Node.Spec.ConfigSource was updated") cc.pokeConfigSourceWorker() } } -// onDeleteNodeEvent logs a message if the Node was deleted and may log errors -// if an unexpected DeletedFinalStateUnknown was received. +// onDeleteNodeEvent logs a message if the Node was deleted // We allow the sync-loop to continue, because it is possible that the Kubelet detected // a Node with unexpected externalID and is attempting to delete and re-create the Node // (see pkg/kubelet/kubelet_node_status.go), or that someone accidentally deleted the Node // (the Kubelet will re-create it). func (cc *Controller) onDeleteNodeEvent(deletedObj interface{}) { - node, ok := deletedObj.(*apiv1.Node) + // For this case, we just log the event. + // We don't want to poke the worker, because a temporary deletion isn't worth reporting an error for. + // If the Node is deleted because the VM is being deleted, then the Kubelet has nothing to do. + utillog.Infof("Node was deleted") +} + +// onAddRemoteConfigSourceEvent calls onUpdateConfigMapEvent with the new object and a nil old object +func (cc *Controller) onAddRemoteConfigSourceEvent(newObj interface{}) { + cc.onUpdateRemoteConfigSourceEvent(nil, newObj) +} + +// onUpdateRemoteConfigSourceEvent checks whether the configSource changed between oldObj and newObj, +// and pokes the sync worker if there was a change +func (cc *Controller) onUpdateRemoteConfigSourceEvent(oldObj interface{}, newObj interface{}) { + // since ConfigMap is currently the only source type, we handle that here + newConfigMap, ok := newObj.(*apiv1.ConfigMap) if !ok { - tombstone, ok := deletedObj.(cache.DeletedFinalStateUnknown) - if !ok { - utillog.Errorf("couldn't cast deleted object to DeletedFinalStateUnknown, object: %+v", deletedObj) - return - } - node, ok = tombstone.Obj.(*apiv1.Node) - if !ok { - utillog.Errorf("received DeletedFinalStateUnknown object but it did not contain a Node, object: %+v", deletedObj) - return - } - utillog.Infof("Node was deleted (DeletedFinalStateUnknown), sync-loop will continue because the Kubelet might recreate the Node, node: %+v", node) + utillog.Errorf("failed to cast new object to ConfigMap, couldn't handle event") return } - utillog.Infof("Node was deleted, sync-loop will continue because the Kubelet might recreate the Node, node: %+v", node) + if oldObj == nil { + // ConfigMap was just added, need to sync + utillog.Infof("initial ConfigMap watch event") + cc.pokeConfigSourceWorker() + return + } + oldConfigMap, ok := oldObj.(*apiv1.ConfigMap) + if !ok { + utillog.Errorf("failed to cast old object to ConfigMap, couldn't handle event") + return + } + if !apiequality.Semantic.DeepEqual(oldConfigMap, newConfigMap) { + utillog.Infof("assigned ConfigMap was updated") + cc.pokeConfigSourceWorker() + } +} + +// onDeleteRemoteConfigSourceEvent logs a message if the ConfigMap was deleted and pokes the sync worker +func (cc *Controller) onDeleteRemoteConfigSourceEvent(deletedObj interface{}) { + // If the ConfigMap we're watching is deleted, we log the event and poke the sync worker. + // This requires a sync, because if the Node is still configured to use the deleted ConfigMap, + // the Kubelet should report a DownloadError. + utillog.Infof("assigned ConfigMap was deleted") + cc.pokeConfigSourceWorker() } diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index eee41be6e56..3d709b99d10 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -18,7 +18,6 @@ package e2e_node import ( "fmt" - "reflect" "strings" "time" @@ -26,6 +25,7 @@ import ( apiv1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" @@ -37,18 +37,23 @@ import ( . "github.com/onsi/gomega" ) -type configStateStatus struct { - apiv1.NodeConfigStatus +const itDescription = "status and events should match expectations" - SkipActive bool - SkipAssigned bool - SkipLkg bool +type expectNodeConfigStatus struct { + lastKnownGood *apiv1.NodeConfigSource + err string + // If true, expect Status.Config.Active == Status.Config.LastKnownGood, + // otherwise expect Status.Config.Active == Status.Config.Assigned. + lkgActive bool + // If true, skip checking Status.Config.LastKnownGood == this.lastKnownGood in the status. + skipLkg bool } -type configState struct { +type nodeConfigTestCase struct { desc string configSource *apiv1.NodeConfigSource - expectConfigStatus *configStateStatus + configMap *apiv1.ConfigMap + expectConfigStatus expectNodeConfigStatus expectConfig *kubeletconfig.KubeletConfiguration // whether to expect this substring in an error returned from the API server when updating the config source apierr string @@ -60,63 +65,77 @@ type configState struct { } // This test is marked [Disruptive] because the Kubelet restarts several times during this test. -var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test") - var originalKC *kubeletconfig.KubeletConfiguration - var originalConfigMap *apiv1.ConfigMap + var beforeNode *apiv1.Node + var beforeConfigMap *apiv1.ConfigMap + var beforeKC *kubeletconfig.KubeletConfiguration + var localKC *kubeletconfig.KubeletConfiguration // Dummy context to prevent framework's AfterEach from cleaning up before this test's AfterEach can run Context("", func() { BeforeEach(func() { - var err error - if originalConfigMap == nil { - originalKC, err = getCurrentKubeletConfig() - framework.ExpectNoError(err) - originalConfigMap = newKubeletConfigMap("original-values", originalKC) - originalConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(originalConfigMap) - framework.ExpectNoError(err) - } // make sure Dynamic Kubelet Configuration feature is enabled on the Kubelet we are about to test enabled, err := isKubeletConfigEnabled(f) framework.ExpectNoError(err) if !enabled { framework.ExpectNoError(fmt.Errorf("The Dynamic Kubelet Configuration feature is not enabled.\n" + - "Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" + + "Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet and API server to enable this feature.\n" + "For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")) } + // record before state so we can restore it after the test + if beforeNode == nil { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + beforeNode = node + } + if source := beforeNode.Spec.ConfigSource; source != nil { + if source.ConfigMap != nil { + cm, err := f.ClientSet.CoreV1().ConfigMaps(source.ConfigMap.Namespace).Get(source.ConfigMap.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + beforeConfigMap = cm + } + } + if beforeKC == nil { + kc, err := getCurrentKubeletConfig() + framework.ExpectNoError(err) + beforeKC = kc + } + // reset the node's assigned/active/last-known-good config by setting the source to nil, + // so each test starts from a clean-slate + (&nodeConfigTestCase{ + desc: "reset via nil config source", + configSource: nil, + }).run(f, setConfigSourceFunc, false, 0) + // record local KC so we can check it during tests that roll back to nil last-known-good + if localKC == nil { + kc, err := getCurrentKubeletConfig() + framework.ExpectNoError(err) + localKC = kc + } }) AfterEach(func() { - // Set the config back to the original values before moving on. - // We care that the values are the same, not where they come from, so it - // should be fine to reset the values using a remote config, even if they - // were initially set via the locally provisioned configuration. - // This is the same strategy several other e2e node tests use. - - source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: originalConfigMap.UID, - Namespace: originalConfigMap.Namespace, - Name: originalConfigMap.Name, - KubeletConfigKey: "kubelet", - }} - setAndTestKubeletConfigState(f, &configState{desc: "reset to original values", - configSource: source, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: source, - Assigned: source, - }, - SkipLkg: true, - }, - expectConfig: originalKC, - }, false) + // clean-slate the Node again (prevents last-known-good from any tests from leaking through) + (&nodeConfigTestCase{ + desc: "reset via nil config source", + configSource: nil, + }).run(f, setConfigSourceFunc, false, 0) + // restore the values from before the test before moving on + restore := &nodeConfigTestCase{ + desc: "restore values from before test", + configSource: beforeNode.Spec.ConfigSource, + configMap: beforeConfigMap, + expectConfig: beforeKC, + } + restore.run(f, setConfigSourceFunc, false, 0) }) - Context("When changing NodeConfigSources", func() { - It("the Kubelet should report the appropriate status and configz", func() { + Context("update Node.Spec.ConfigSource: state transitions:", func() { + It(itDescription, func() { var err error - // we base the "correct" configmap off of the current configuration - correctKC := originalKC.DeepCopy() + // we base the "correct" configmap off of the configuration from before the test + correctKC := beforeKC.DeepCopy() correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-correct", correctKC) correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) framework.ExpectNoError(err) @@ -131,42 +150,34 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube failParseConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failParseConfigMap) framework.ExpectNoError(err) - // fail to validate, we make a copy and set an invalid KubeAPIQPS on kc before serializing + // fail to validate, we make a copy of correct and set an invalid KubeAPIQPS on kc before serializing invalidKC := correctKC.DeepCopy() - invalidKC.KubeAPIQPS = -1 failValidateConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-fail-validate", invalidKC) failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap) framework.ExpectNoError(err) correctSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: correctConfigMap.UID, Namespace: correctConfigMap.Namespace, Name: correctConfigMap.Name, KubeletConfigKey: "kubelet", }} failParseSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: failParseConfigMap.UID, Namespace: failParseConfigMap.Namespace, Name: failParseConfigMap.Name, KubeletConfigKey: "kubelet", }} failValidateSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: failValidateConfigMap.UID, Namespace: failValidateConfigMap.Namespace, Name: failValidateConfigMap.Name, KubeletConfigKey: "kubelet", }} - // Note: since we start with the nil source (resets lkg), and we don't wait longer than the 10-minute internal - // qualification period before changing it again, we can assume lkg source will be nil in the status - // for this entire test, which is why we never set SkipLkg=true here. - - states := []configState{ + cases := []nodeConfigTestCase{ { desc: "Node.Spec.ConfigSource is nil", configSource: nil, - expectConfigStatus: &configStateStatus{}, + expectConfigStatus: expectNodeConfigStatus{}, expectConfig: nil, event: true, }, @@ -178,7 +189,6 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube { desc: "Node.Spec.ConfigSource.ConfigMap is missing namespace", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", Name: "bar", KubeletConfigKey: "kubelet", }}, // missing Namespace @@ -187,8 +197,7 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube { desc: "Node.Spec.ConfigSource.ConfigMap is missing name", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", - Namespace: "bar", + Namespace: "foo", KubeletConfigKey: "kubelet", }}, // missing Name apierr: "spec.configSource.configMap.name: Required value: name must be set", @@ -196,24 +205,24 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube { desc: "Node.Spec.ConfigSource.ConfigMap is missing kubeletConfigKey", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", - Namespace: "bar", - Name: "baz", + Namespace: "foo", + Name: "bar", }}, // missing KubeletConfigKey apierr: "spec.configSource.configMap.kubeletConfigKey: Required value: kubeletConfigKey must be set", }, { - desc: "Node.Spec.ConfigSource.ConfigMap is missing uid", - configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - Namespace: "bar", - Name: "baz", - KubeletConfigKey: "kubelet", - }}, // missing uid - apierr: "spec.configSource.configMap.uid: Required value: uid must be set in spec", - }, - {desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified", + desc: "Node.Spec.ConfigSource.ConfigMap.UID is illegally specified", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ UID: "foo", + Name: "bar", + Namespace: "baz", + KubeletConfigKey: "kubelet", + }}, + apierr: "spec.configSource.configMap.uid: Forbidden: uid must not be set in spec", + }, + { + desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified", + configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ Name: "bar", Namespace: "baz", ResourceVersion: "1", @@ -221,106 +230,77 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube }}, apierr: "spec.configSource.configMap.resourceVersion: Forbidden: resourceVersion must not be set in spec", }, - {desc: "Node.Spec.ConfigSource.ConfigMap has invalid namespace", + { + desc: "Node.Spec.ConfigSource.ConfigMap has invalid namespace", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", Name: "bar", Namespace: "../baz", KubeletConfigKey: "kubelet", }}, apierr: "spec.configSource.configMap.namespace: Invalid value", }, - {desc: "Node.Spec.ConfigSource.ConfigMap has invalid name", + { + desc: "Node.Spec.ConfigSource.ConfigMap has invalid name", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", Name: "../bar", Namespace: "baz", KubeletConfigKey: "kubelet", }}, apierr: "spec.configSource.configMap.name: Invalid value", }, - {desc: "Node.Spec.ConfigSource.ConfigMap has invalid kubeletConfigKey", + { + desc: "Node.Spec.ConfigSource.ConfigMap has invalid kubeletConfigKey", configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", Name: "bar", Namespace: "baz", KubeletConfigKey: "../qux", }}, apierr: "spec.configSource.configMap.kubeletConfigKey: Invalid value", }, - { - // TODO(mtaufen): remove in #63221 - desc: "Node.Spec.ConfigSource.ConfigMap.UID does not align with Namespace/Name", - configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: "foo", - Namespace: correctConfigMap.Namespace, - Name: correctConfigMap.Name, - KubeletConfigKey: "kubelet", - }}, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Error: fmt.Sprintf(status.SyncErrorFmt, fmt.Sprintf(status.UIDMismatchErrorFmt, "foo", configMapAPIPath(correctConfigMap), correctConfigMap.UID)), - }, - // skip assigned and active, because we don't know what the prior source will be - SkipAssigned: true, - SkipActive: true, - }, - expectConfig: nil, - event: false, - }, { desc: "correct", configSource: correctSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: correctSource, - Assigned: correctSource, - }, - }, + configMap: correctConfigMap, expectConfig: correctKC, event: true, }, { desc: "fail-parse", configSource: failParseSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Assigned: failParseSource, - Error: status.LoadError, - }, - SkipActive: true, + configMap: failParseConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: status.LoadError, + lkgActive: true, }, - expectConfig: nil, + expectConfig: localKC, event: true, }, { desc: "fail-validate", configSource: failValidateSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Assigned: failValidateSource, - Error: status.ValidateError, - }, - SkipActive: true, + configMap: failValidateConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: status.ValidateError, + lkgActive: true, }, - expectConfig: nil, + expectConfig: localKC, event: true, }, } - L := len(states) - for i := 1; i <= L; i++ { // need one less iteration than the number of states - testBothDirections(f, &states[i-1 : i][0], states[i:L], 0) + L := len(cases) + for i := 1; i <= L; i++ { // need one less iteration than the number of cases + testBothDirections(f, setConfigSourceFunc, &cases[i-1 : i][0], cases[i:L], 0) } }) }) - Context("When a remote config becomes the new last-known-good, and then the Kubelet is updated to use a new, bad config", func() { - It("the Kubelet should report a status and configz indicating that it rolled back to the new last-known-good", func() { + Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap", func() { + It(itDescription, func() { var err error - // we base the "lkg" configmap off of the current configuration - lkgKC := originalKC.DeepCopy() + // we base the "lkg" configmap off of the configuration from before the test + lkgKC := beforeKC.DeepCopy() lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-intended-lkg", lkgKC) lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap) framework.ExpectNoError(err) @@ -336,43 +316,39 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube framework.ExpectNoError(err) lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: lkgConfigMap.UID, Namespace: lkgConfigMap.Namespace, Name: lkgConfigMap.Name, KubeletConfigKey: "kubelet", }} + lkgStatus := lkgSource.DeepCopy() + lkgStatus.ConfigMap.UID = lkgConfigMap.UID + lkgStatus.ConfigMap.ResourceVersion = lkgConfigMap.ResourceVersion + badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: badConfigMap.UID, Namespace: badConfigMap.Namespace, Name: badConfigMap.Name, KubeletConfigKey: "kubelet", }} - states := []configState{ - // intended lkg - {desc: "intended last-known-good", + cases := []nodeConfigTestCase{ + { + desc: "intended last-known-good", configSource: lkgSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: lkgSource, - Assigned: lkgSource, - }, - SkipLkg: true, + configMap: lkgConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + lastKnownGood: lkgStatus, }, expectConfig: lkgKC, event: true, }, - - // bad config - {desc: "bad config", + { + desc: "bad config", configSource: badSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: lkgSource, - Assigned: badSource, - LastKnownGood: lkgSource, - Error: status.LoadError, - }, + configMap: badConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + lastKnownGood: lkgStatus, + err: status.LoadError, + lkgActive: true, }, expectConfig: lkgKC, event: true, @@ -380,59 +356,53 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube } // wait 12 minutes after setting the first config to ensure it has time to pass the trial duration - testBothDirections(f, &states[0], states[1:], 12*time.Minute) + testBothDirections(f, setConfigSourceFunc, &cases[0], cases[1:], 12*time.Minute) }) }) - Context("When a remote config becomes the new last-known-good, and then Node.ConfigSource.ConfigMap.KubeletConfigKey is updated to use a new, bad config", func() { - It("the Kubelet should report a status and configz indicating that it rolled back to the new last-known-good", func() { + Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey", func() { + It(itDescription, func() { const badConfigKey = "bad" var err error - // we base the "lkg" configmap off of the current configuration - lkgKC := originalKC.DeepCopy() + // we base the "lkg" configmap off of the configuration from before the test + lkgKC := beforeKC.DeepCopy() combinedConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-combined", lkgKC) combinedConfigMap.Data[badConfigKey] = "{0xdeadbeef}" combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap) framework.ExpectNoError(err) lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: combinedConfigMap.UID, Namespace: combinedConfigMap.Namespace, Name: combinedConfigMap.Name, KubeletConfigKey: "kubelet", }} - badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: combinedConfigMap.UID, - Namespace: combinedConfigMap.Namespace, - Name: combinedConfigMap.Name, - KubeletConfigKey: badConfigKey, - }} - states := []configState{ - // intended lkg - {desc: "intended last-known-good", + lkgStatus := lkgSource.DeepCopy() + lkgStatus.ConfigMap.UID = combinedConfigMap.UID + lkgStatus.ConfigMap.ResourceVersion = combinedConfigMap.ResourceVersion + + badSource := lkgSource.DeepCopy() + badSource.ConfigMap.KubeletConfigKey = badConfigKey + + cases := []nodeConfigTestCase{ + { + desc: "intended last-known-good", configSource: lkgSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: lkgSource, - Assigned: lkgSource, - }, - SkipLkg: true, + configMap: combinedConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + lastKnownGood: lkgStatus, }, expectConfig: lkgKC, event: true, }, - - // bad config - {desc: "bad config", + { + desc: "bad config", configSource: badSource, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: lkgSource, - Assigned: badSource, - LastKnownGood: lkgSource, - Error: status.LoadError, - }, + configMap: combinedConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + lastKnownGood: lkgStatus, + err: status.LoadError, + lkgActive: true, }, expectConfig: lkgKC, event: true, @@ -440,18 +410,17 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube } // wait 12 minutes after setting the first config to ensure it has time to pass the trial duration - testBothDirections(f, &states[0], states[1:], 12*time.Minute) + testBothDirections(f, setConfigSourceFunc, &cases[0], cases[1:], 12*time.Minute) }) }) - // This stress test will help turn up resource leaks across kubelet restarts that can, over time, - // break our ability to dynamically update kubelet config - Context("When changing the configuration 100 times", func() { - It("the Kubelet should report the appropriate status and configz", func() { + // exposes resource leaks across config changes + Context("update Node.Spec.ConfigSource: 100 update stress test", func() { + It(itDescription, func() { var err error // we just create two configmaps with the same config but different names and toggle between them - kc1 := originalKC.DeepCopy() + kc1 := beforeKC.DeepCopy() cm1 := newKubeletConfigMap("dynamic-kubelet-config-test-cm1", kc1) cm1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm1) framework.ExpectNoError(err) @@ -464,113 +433,451 @@ var _ = framework.KubeDescribe("DynamicKubeletConfiguration [Feature:DynamicKube framework.ExpectNoError(err) cm1Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: cm1.UID, Namespace: cm1.Namespace, Name: cm1.Name, KubeletConfigKey: "kubelet", }} + cm2Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{ - UID: cm2.UID, Namespace: cm2.Namespace, Name: cm2.Name, KubeletConfigKey: "kubelet", }} - states := []configState{ - {desc: "cm1", + cases := []nodeConfigTestCase{ + { + desc: "cm1", configSource: cm1Source, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: cm1Source, - Assigned: cm1Source, - }, - SkipLkg: true, - }, + configMap: cm1, expectConfig: kc1, event: true, }, - - {desc: "cm2", + { + desc: "cm2", configSource: cm2Source, - expectConfigStatus: &configStateStatus{ - NodeConfigStatus: apiv1.NodeConfigStatus{ - Active: cm2Source, - Assigned: cm2Source, - }, - SkipLkg: true, - }, + configMap: cm2, expectConfig: kc2, event: true, }, } for i := 0; i < 50; i++ { // change the config 101 times (changes 3 times in the first iteration, 2 times in each subsequent iteration) - testBothDirections(f, &states[0], states[1:], 0) + testBothDirections(f, setConfigSourceFunc, &cases[0], cases[1:], 0) } }) }) + + // Please note: This behavior is tested to ensure implementation correctness. We do not, however, recommend ConfigMap mutations + // as a usage pattern for dynamic Kubelet config in large clusters. It is much safer to create a new ConfigMap, and incrementally + // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion + // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations + // change the ResourceVersion of the ConfigMap. + Context("update ConfigMap in-place: state transitions", func() { + It(itDescription, func() { + var err error + // we base the "correct" configmap off of the configuration from before the test + correctKC := beforeKC.DeepCopy() + correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-in-place", correctKC) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + framework.ExpectNoError(err) + + // we reuse the same name, namespace + failParseConfigMap := correctConfigMap.DeepCopy() + failParseConfigMap.Data = map[string]string{ + "kubelet": "{0xdeadbeef}", + } + + // fail to validate, we make a copy and set an invalid KubeAPIQPS on kc before serializing + invalidKC := correctKC.DeepCopy() + invalidKC.KubeAPIQPS = -1 + failValidateConfigMap := correctConfigMap.DeepCopy() + failValidateConfigMap.Data = newKubeletConfigMap("", invalidKC).Data + + // ensure node config source is set to the config map we will mutate in-place, + // since updateConfigMapFunc doesn't mutate Node.Spec.ConfigSource + source := &apiv1.NodeConfigSource{ + ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Namespace: correctConfigMap.Namespace, + Name: correctConfigMap.Name, + KubeletConfigKey: "kubelet", + }, + } + (&nodeConfigTestCase{ + desc: "initial state (correct)", + configSource: source, + configMap: correctConfigMap, + expectConfig: correctKC, + }).run(f, setConfigSourceFunc, false, 0) + + cases := []nodeConfigTestCase{ + { + desc: "correct", + configSource: source, + configMap: correctConfigMap, + expectConfig: correctKC, + event: true, + }, + { + desc: "fail-parse", + configSource: source, + configMap: failParseConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: status.LoadError, + lkgActive: true, + }, + expectConfig: localKC, + event: true, + }, + { + desc: "fail-validate", + configSource: source, + configMap: failValidateConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: status.ValidateError, + lkgActive: true, + }, + expectConfig: localKC, + event: true, + }, + } + L := len(cases) + for i := 1; i <= L; i++ { // need one less iteration than the number of cases + testBothDirections(f, updateConfigMapFunc, &cases[i-1 : i][0], cases[i:L], 0) + } + }) + }) + + // Please note: This behavior is tested to ensure implementation correctness. We do not, however, recommend ConfigMap mutations + // as a usage pattern for dynamic Kubelet config in large clusters. It is much safer to create a new ConfigMap, and incrementally + // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion + // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations + // change the ResourceVersion of the ConfigMap. + Context("update ConfigMap in-place: recover to last-known-good version", func() { + It(itDescription, func() { + var err error + // we base the "lkg" configmap off of the configuration from before the test + lkgKC := beforeKC.DeepCopy() + lkgConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-in-place-lkg", lkgKC) + lkgConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap) + framework.ExpectNoError(err) + + // bad config map, we insert some bogus stuff into the configMap + badConfigMap := lkgConfigMap.DeepCopy() + badConfigMap.Data = map[string]string{ + "kubelet": "{0xdeadbeef}", + } + // ensure node config source is set to the config map we will mutate in-place + source := &apiv1.NodeConfigSource{ + ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Namespace: lkgConfigMap.Namespace, + Name: lkgConfigMap.Name, + KubeletConfigKey: "kubelet", + }, + } + + // Even though the first test case will PUT the lkgConfigMap again, no-op writes don't increment + // ResourceVersion, so the expected status we record here will still be correct. + lkgStatus := source.DeepCopy() + lkgStatus.ConfigMap.UID = lkgConfigMap.UID + lkgStatus.ConfigMap.ResourceVersion = lkgConfigMap.ResourceVersion + + (&nodeConfigTestCase{ + desc: "initial state (correct)", + configSource: source, + configMap: lkgConfigMap, + expectConfig: lkgKC, + }).run(f, setConfigSourceFunc, false, 0) // wait 0 here, and we should not expect LastKnownGood to have changed yet (hence nil) + + cases := []nodeConfigTestCase{ + { + desc: "intended last-known-good", + configSource: source, + configMap: lkgConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + lastKnownGood: lkgStatus, + }, + expectConfig: lkgKC, + event: true, + }, + { + // NOTE(mtaufen): If you see a strange "expected assigned x but got assigned y" error on this case, + // it is possible that the Kubelet didn't start the informer that watches the currently assigned + // ConfigMap, or didn't get updates from that informer. Other tests don't always catch this because + // they quickly change config. The sync loop will always happen once, a bit after the Kubelet starts + // up, because other informers' initial "add" events can queue a sync. If you wait long enough before + // changing config (waiting for the config to become last-known-good, for example), the syncs queued by + // add events will have already been processed, and the lack of a running ConfigMap informer will result + // in a missed update, no config change, and the above error when we check the status. + desc: "bad config", + configSource: source, + configMap: badConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + lastKnownGood: lkgStatus, + err: status.LoadError, + lkgActive: true, + }, + expectConfig: lkgKC, + event: true, + }, + } + + // wait 12 minutes after setting the first config to ensure it has time to pass the trial duration + testBothDirections(f, updateConfigMapFunc, &cases[0], cases[1:], 12*time.Minute) + }) + }) + + // Please note: This behavior is tested to ensure implementation correctness. We do not, however, recommend ConfigMap mutations + // as a usage pattern for dynamic Kubelet config in large clusters. It is much safer to create a new ConfigMap, and incrementally + // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion + // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations + // change the ResourceVersion of the ConfigMap. + Context("delete and recreate ConfigMap: state transitions", func() { + It(itDescription, func() { + var err error + // we base the "correct" configmap off of the configuration from before the test + correctKC := beforeKC.DeepCopy() + correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-delete-createe", correctKC) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + framework.ExpectNoError(err) + + // we reuse the same name, namespace + failParseConfigMap := correctConfigMap.DeepCopy() + failParseConfigMap.Data = map[string]string{ + "kubelet": "{0xdeadbeef}", + } + + // fail to validate, we make a copy and set an invalid KubeAPIQPS on kc before serializing + invalidKC := correctKC.DeepCopy() + invalidKC.KubeAPIQPS = -1 + failValidateConfigMap := correctConfigMap.DeepCopy() + failValidateConfigMap.Data = newKubeletConfigMap("", invalidKC).Data + + // ensure node config source is set to the config map we will mutate in-place, + // since recreateConfigMapFunc doesn't mutate Node.Spec.ConfigSource + source := &apiv1.NodeConfigSource{ + ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Namespace: correctConfigMap.Namespace, + Name: correctConfigMap.Name, + KubeletConfigKey: "kubelet", + }, + } + (&nodeConfigTestCase{ + desc: "initial state (correct)", + configSource: source, + configMap: correctConfigMap, + expectConfig: correctKC, + }).run(f, setConfigSourceFunc, false, 0) + + cases := []nodeConfigTestCase{ + { + desc: "correct", + configSource: source, + configMap: correctConfigMap, + expectConfig: correctKC, + event: true, + }, + { + desc: "fail-parse", + configSource: source, + configMap: failParseConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: status.LoadError, + lkgActive: true, + }, + expectConfig: localKC, + event: true, + }, + { + desc: "fail-validate", + configSource: source, + configMap: failValidateConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: status.ValidateError, + lkgActive: true, + }, + expectConfig: localKC, + event: true, + }, + } + L := len(cases) + for i := 1; i <= L; i++ { // need one less iteration than the number of cases + testBothDirections(f, recreateConfigMapFunc, &cases[i-1 : i][0], cases[i:L], 0) + } + }) + }) + + // Please note: This behavior is tested to ensure implementation correctness. We do not, however, recommend ConfigMap mutations + // as a usage pattern for dynamic Kubelet config in large clusters. It is much safer to create a new ConfigMap, and incrementally + // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion + // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations + // change the ResourceVersion of the ConfigMap. + Context("delete and recreate ConfigMap: error while ConfigMap is absent", func() { + It(itDescription, func() { + var err error + // we base the "correct" configmap off of the configuration from before the test + correctKC := beforeKC.DeepCopy() + correctConfigMap := newKubeletConfigMap("dynamic-kubelet-config-test-delete-createe", correctKC) + correctConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(correctConfigMap) + framework.ExpectNoError(err) + + // ensure node config source is set to the config map we will mutate in-place, + // since our mutation functions don't mutate Node.Spec.ConfigSource + source := &apiv1.NodeConfigSource{ + ConfigMap: &apiv1.ConfigMapNodeConfigSource{ + Namespace: correctConfigMap.Namespace, + Name: correctConfigMap.Name, + KubeletConfigKey: "kubelet", + }, + } + (&nodeConfigTestCase{ + desc: "correct", + configSource: source, + configMap: correctConfigMap, + expectConfig: correctKC, + }).run(f, setConfigSourceFunc, false, 0) + + // delete the ConfigMap, and ensure an error is reported by the Kubelet while the ConfigMap is absent + (&nodeConfigTestCase{ + desc: "correct", + configSource: source, + configMap: correctConfigMap, + expectConfigStatus: expectNodeConfigStatus{ + err: fmt.Sprintf(status.SyncErrorFmt, status.DownloadError), + }, + expectConfig: correctKC, + }).run(f, deleteConfigMapFunc, false, 0) + + // re-create the ConfigMap, and ensure the error disappears + (&nodeConfigTestCase{ + desc: "correct", + configSource: source, + configMap: correctConfigMap, + expectConfig: correctKC, + }).run(f, createConfigMapFunc, false, 0) + }) + }) }) }) -// testBothDirections tests the state change represented by each edge, where each state is a vertex, -// and there are edges in each direction between first and each of the states. -func testBothDirections(f *framework.Framework, first *configState, states []configState, waitAfterFirst time.Duration) { +// testBothDirections tests the state change represented by each edge, where each case is a vertex, +// and there are edges in each direction between first and each of the cases. +func testBothDirections(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error, + first *nodeConfigTestCase, cases []nodeConfigTestCase, waitAfterFirst time.Duration) { // set to first and check that everything got set up properly - By(fmt.Sprintf("setting configSource to state %q", first.desc)) + By(fmt.Sprintf("setting initial state %q", first.desc)) // we don't always expect an event here, because setting "first" might not represent // a change from the current configuration - setAndTestKubeletConfigState(f, first, false) + first.run(f, fn, false, waitAfterFirst) - time.Sleep(waitAfterFirst) + // for each case, set up, check expectations, then reset to first and check again + for i := range cases { + tc := &cases[i] + By(fmt.Sprintf("from %q to %q", first.desc, tc.desc)) + // from first -> tc, tc.event fully describes whether we should get a config change event + tc.run(f, fn, tc.event, 0) - // for each state, set to that state, check expectations, then reset to first and check again - for i := range states { - By(fmt.Sprintf("from %q to %q", first.desc, states[i].desc)) - // from first -> states[i], states[i].event fully describes whether we should get a config change event - setAndTestKubeletConfigState(f, &states[i], states[i].event) - - By(fmt.Sprintf("back to %q from %q", first.desc, states[i].desc)) - // whether first -> states[i] should have produced a config change event partially determines whether states[i] -> first should produce an event - setAndTestKubeletConfigState(f, first, first.event && states[i].event) + By(fmt.Sprintf("back to %q from %q", first.desc, tc.desc)) + // whether first -> tc should have produced a config change event partially determines whether tc -> first should produce an event + first.run(f, fn, first.event && tc.event, 0) } } -// setAndTestKubeletConfigState tests that after setting the config source, the node spec, status, configz, and latest event match +// run tests that, after performing fn, the node spec, status, configz, and latest event match // the expectations described by state. -func setAndTestKubeletConfigState(f *framework.Framework, state *configState, expectEvent bool) { +func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.Framework, tc *nodeConfigTestCase) error, + expectEvent bool, wait time.Duration) { // set the desired state, retry a few times in case we are competing with other editors Eventually(func() error { - if err := setNodeConfigSource(f, state.configSource); err != nil { - if len(state.apierr) == 0 { - return fmt.Errorf("case %s: expect nil error but got %q", state.desc, err.Error()) - } else if !strings.Contains(err.Error(), state.apierr) { - return fmt.Errorf("case %s: expect error to contain %q but got %q", state.desc, state.apierr, err.Error()) + if err := fn(f, tc); err != nil { + if len(tc.apierr) == 0 { + return fmt.Errorf("case %s: expect nil error but got %q", tc.desc, err.Error()) + } else if !strings.Contains(err.Error(), tc.apierr) { + return fmt.Errorf("case %s: expect error to contain %q but got %q", tc.desc, tc.apierr, err.Error()) } - } else if len(state.apierr) > 0 { - return fmt.Errorf("case %s: expect error to contain %q but got nil error", state.desc, state.apierr) + } else if len(tc.apierr) > 0 { + return fmt.Errorf("case %s: expect error to contain %q but got nil error", tc.desc, tc.apierr) } return nil }, time.Minute, time.Second).Should(BeNil()) // skip further checks if we expected an API error - if len(state.apierr) > 0 { + if len(tc.apierr) > 0 { return } - // check that config source actually got set to what we expect - checkNodeConfigSource(f, state.desc, state.configSource) + // wait for the designated duration before checking the reconciliation + time.Sleep(wait) + // check config source + tc.checkNodeConfigSource(f) // check status - checkConfigStatus(f, state.desc, state.expectConfigStatus) + tc.checkConfigStatus(f) // check expectConfig - if state.expectConfig != nil { - checkConfig(f, state.desc, state.expectConfig) + if tc.expectConfig != nil { + tc.checkConfig(f) } // check that an event was sent for the config change if expectEvent { - checkEvent(f, state.desc, state.configSource) + tc.checkEvent(f) } } +// setConfigSourceFunc sets Node.Spec.ConfigSource to tc.configSource +func setConfigSourceFunc(f *framework.Framework, tc *nodeConfigTestCase) error { + return setNodeConfigSource(f, tc.configSource) +} + +// updateConfigMapFunc updates the ConfigMap described by tc.configMap to contain matching data. +// It also updates the resourceVersion in any non-nil NodeConfigSource.ConfigMap in the expected +// status to match the resourceVersion of the updated ConfigMap. +func updateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { + // Clear ResourceVersion from the ConfigMap objects we use to initiate mutations + // so that we don't get 409 (conflict) responses. ConfigMaps always allow updates + // (with respect to concurrency control) when you omit ResourceVersion. + // We know that we won't perform concurrent updates during this test. + tc.configMap.ResourceVersion = "" + cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Update(tc.configMap) + if err != nil { + return err + } + // update tc.configMap's ResourceVersion to match the updated ConfigMap, this makes + // sure our derived status checks have up-to-date information + tc.configMap.ResourceVersion = cm.ResourceVersion + return nil +} + +// recreateConfigMapFunc deletes and recreates the ConfigMap described by tc.configMap. +// The new ConfigMap will match tc.configMap. +func recreateConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { + // need to ignore NotFound error, since there could be cases where delete + // fails during a retry because the delete in a previous attempt succeeded, + // before some other error occurred. + err := deleteConfigMapFunc(f, tc) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + return createConfigMapFunc(f, tc) +} + +// deleteConfigMapFunc simply deletes tc.configMap +func deleteConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { + return f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Delete(tc.configMap.Name, &metav1.DeleteOptions{}) +} + +// createConfigMapFunc creates tc.configMap and updates the UID and ResourceVersion on tc.configMap +// to match the created configMap +func createConfigMapFunc(f *framework.Framework, tc *nodeConfigTestCase) error { + tc.configMap.ResourceVersion = "" + cm, err := f.ClientSet.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(tc.configMap) + if err != nil { + return err + } + // update tc.configMap's UID and ResourceVersion to match the new ConfigMap, this makes + // sure our derived status checks have up-to-date information + tc.configMap.UID = cm.UID + tc.configMap.ResourceVersion = cm.ResourceVersion + return nil +} + // make sure the node's config source matches what we expect, after setting it -func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.NodeConfigSource) { +func (tc *nodeConfigTestCase) checkNodeConfigSource(f *framework.Framework) { const ( timeout = time.Minute interval = time.Second @@ -578,62 +885,74 @@ func checkNodeConfigSource(f *framework.Framework, desc string, expect *apiv1.No Eventually(func() error { node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("checkNodeConfigSource: case %s: %v", desc, err) + return fmt.Errorf("checkNodeConfigSource: case %s: %v", tc.desc, err) } actual := node.Spec.ConfigSource - if !reflect.DeepEqual(expect, actual) { - return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", desc, expect, actual)) + if !apiequality.Semantic.DeepEqual(tc.configSource, actual) { + return fmt.Errorf(spew.Sprintf("checkNodeConfigSource: case %s: expected %#v but got %#v", tc.desc, tc.configSource, actual)) } return nil }, timeout, interval).Should(BeNil()) } // make sure the node status eventually matches what we expect -func checkConfigStatus(f *framework.Framework, desc string, expect *configStateStatus) { +func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) { const ( timeout = time.Minute interval = time.Second ) + errFmt := fmt.Sprintf("checkConfigStatus: case %s:", tc.desc) + " %v" Eventually(func() error { node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("checkConfigStatus: case %s: %v", desc, err) + return fmt.Errorf(errFmt, err) } - if err := expectConfigStatus(expect, node.Status.Config); err != nil { - return fmt.Errorf("checkConfigStatus: case %s: %v", desc, err) + if err := expectConfigStatus(tc, node.Status.Config); err != nil { + return fmt.Errorf(errFmt, err) } return nil }, timeout, interval).Should(BeNil()) } -func expectConfigStatus(expect *configStateStatus, actual *apiv1.NodeConfigStatus) error { - if expect == nil { - return fmt.Errorf("expectConfigStatus requires expect to be non-nil (possible malformed test case)") - } +func expectConfigStatus(tc *nodeConfigTestCase, actual *apiv1.NodeConfigStatus) error { + var errs []string if actual == nil { return fmt.Errorf("expectConfigStatus requires actual to be non-nil (possible Kubelet failed to update status)") } - var errs []string - if !expect.SkipActive && !apiequality.Semantic.DeepEqual(expect.Active, actual.Active) { - errs = append(errs, fmt.Sprintf("expected Active %#v but got %#v", expect.Active, actual.Active)) + // check Assigned matches tc.configSource, with UID and ResourceVersion from tc.configMap + expectAssigned := tc.configSource.DeepCopy() + if expectAssigned != nil && expectAssigned.ConfigMap != nil { + expectAssigned.ConfigMap.UID = tc.configMap.UID + expectAssigned.ConfigMap.ResourceVersion = tc.configMap.ResourceVersion } - if !expect.SkipAssigned && !apiequality.Semantic.DeepEqual(expect.Assigned, actual.Assigned) { - errs = append(errs, fmt.Sprintf("expected Assigned %#v but got %#v", expect.Assigned, actual.Assigned)) + if !apiequality.Semantic.DeepEqual(expectAssigned, actual.Assigned) { + errs = append(errs, spew.Sprintf("expected Assigned %#v but got %#v", expectAssigned, actual.Assigned)) } - if !expect.SkipLkg && !apiequality.Semantic.DeepEqual(expect.LastKnownGood, actual.LastKnownGood) { - errs = append(errs, fmt.Sprintf("expected LastKnownGood %#v but got %#v", expect.LastKnownGood, actual.LastKnownGood)) + // check LastKnownGood matches tc.expectConfigStatus.lastKnownGood + if !tc.expectConfigStatus.skipLkg && !apiequality.Semantic.DeepEqual(tc.expectConfigStatus.lastKnownGood, actual.LastKnownGood) { + errs = append(errs, spew.Sprintf("expected LastKnownGood %#v but got %#v", tc.expectConfigStatus.lastKnownGood, actual.LastKnownGood)) } - if expect.Error != actual.Error { - errs = append(errs, fmt.Sprintf("expected Error %q but got %q", expect.Error, actual.Error)) + // check Active matches Assigned or LastKnownGood, depending on tc.expectConfigStatus.lkgActive + expectActive := expectAssigned + if tc.expectConfigStatus.lkgActive { + expectActive = tc.expectConfigStatus.lastKnownGood } + if !apiequality.Semantic.DeepEqual(expectActive, actual.Active) { + errs = append(errs, spew.Sprintf("expected Active %#v but got %#v", expectActive, actual.Active)) + } + // check Error + if tc.expectConfigStatus.err != actual.Error { + errs = append(errs, fmt.Sprintf("expected Error %q but got %q", tc.expectConfigStatus.err, actual.Error)) + } + // format error list if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, ",")) + return fmt.Errorf("%s", strings.Join(errs, ", ")) } return nil } // make sure config exposed on configz matches what we expect -func checkConfig(f *framework.Framework, desc string, expect *kubeletconfig.KubeletConfiguration) { +func (tc *nodeConfigTestCase) checkConfig(f *framework.Framework) { const ( timeout = time.Minute interval = time.Second @@ -641,10 +960,10 @@ func checkConfig(f *framework.Framework, desc string, expect *kubeletconfig.Kube Eventually(func() error { actual, err := getCurrentKubeletConfig() if err != nil { - return fmt.Errorf("checkConfig: case %s: %v", desc, err) + return fmt.Errorf("checkConfig: case %s: %v", tc.desc, err) } - if !reflect.DeepEqual(expect, actual) { - return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", desc, expect, actual)) + if !apiequality.Semantic.DeepEqual(tc.expectConfig, actual) { + return fmt.Errorf(spew.Sprintf("checkConfig: case %s: expected %#v but got %#v", tc.desc, tc.expectConfig, actual)) } return nil }, timeout, interval).Should(BeNil()) @@ -652,7 +971,7 @@ func checkConfig(f *framework.Framework, desc string, expect *kubeletconfig.Kube // checkEvent makes sure an event was sent marking the Kubelet's restart to use new config, // and that it mentions the config we expect. -func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSource) { +func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) { const ( timeout = time.Minute interval = time.Second @@ -660,7 +979,7 @@ func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSou Eventually(func() error { events, err := f.ClientSet.CoreV1().Events("").List(metav1.ListOptions{}) if err != nil { - return fmt.Errorf("checkEvent: case %s: %v", desc, err) + return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err) } // find config changed event with most recent timestamp var recent *apiv1.Event @@ -676,23 +995,23 @@ func checkEvent(f *framework.Framework, desc string, expect *apiv1.NodeConfigSou } } } - // we expect at least one config change event if recent == nil { - return fmt.Errorf("checkEvent: case %s: no events found with reason %s", desc, controller.KubeletConfigChangedEventReason) + return fmt.Errorf("checkEvent: case %s: no events found with reason %s", tc.desc, controller.KubeletConfigChangedEventReason) } - - // ensure the message is what we expect (including the resource path) - expectMessage := fmt.Sprintf(controller.EventMessageFmt, controller.LocalConfigMessage) - if expect != nil { - if expect.ConfigMap != nil { - expectMessage = fmt.Sprintf(controller.EventMessageFmt, fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", expect.ConfigMap.Namespace, expect.ConfigMap.Name)) + // construct expected message, based on the test case + expectMessage := controller.LocalEventMessage + if tc.configSource != nil { + if tc.configSource.ConfigMap != nil { + expectMessage = fmt.Sprintf(controller.RemoteEventMessageFmt, + fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", tc.configSource.ConfigMap.Namespace, tc.configSource.ConfigMap.Name), + tc.configMap.UID, tc.configMap.ResourceVersion, tc.configSource.ConfigMap.KubeletConfigKey) } } + // compare messages if expectMessage != recent.Message { - return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", desc, expectMessage, recent.Message) + return fmt.Errorf("checkEvent: case %s: expected event message %q but got %q", tc.desc, expectMessage, recent.Message) } - return nil }, timeout, interval).Should(BeNil()) } diff --git a/test/integration/auth/node_test.go b/test/integration/auth/node_test.go index a8d58adef19..eb6526136ab 100644 --- a/test/integration/auth/node_test.go +++ b/test/integration/auth/node_test.go @@ -287,11 +287,8 @@ func TestNodeAuthorizer(t *testing.T) { } node2.Spec.ConfigSource = &api.NodeConfigSource{ ConfigMap: &api.ConfigMapNodeConfigSource{ - Namespace: "ns", - Name: "myconfigmapconfigsource", - // validation just requires UID to be non-empty and it isn't necessary for GET, - // so we just use a bogus one for the test - UID: "uid", + Namespace: "ns", + Name: "myconfigmapconfigsource", KubeletConfigKey: "kubelet", }, } From 1b3dee951cf62da27bb9f53a5d5c5386b773d45a Mon Sep 17 00:00:00 2001 From: Alexander Staubo Date: Mon, 21 May 2018 12:10:38 -0400 Subject: [PATCH 060/307] When creating ext3/ext4 volume, pass -m0 to mkfs in order to disable the super-user-reserved blocks, which otherwise defaults to 5% of the entire disk. Rationale: Reserving a percentage of the volume is generally a neither useful nor desirable feature for volumes that aren't used as root file systems for Linux distributions, since the reserved portion becomes unavailable for non-root users. For containers, the general case is to use the entire volume for data, without running as root. The case where one might want reserved blocks enabled is much rarer. --- pkg/util/mount/mount_linux.go | 6 +++++- pkg/util/mount/safe_format_and_mount_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 05bf54dae8e..b5fb1acafae 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -511,7 +511,11 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } if fstype == "ext4" || fstype == "ext3" { - args = []string{"-F", source} + args = []string{ + "-F", // Force flag + "-m0", // Zero blocks reserved for super-user + source, + } } glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) _, err := mounter.Exec.Run("mkfs."+fstype, args...) diff --git a/pkg/util/mount/safe_format_and_mount_test.go b/pkg/util/mount/safe_format_and_mount_test.go index 20bfeb9f624..506126b300b 100644 --- a/pkg/util/mount/safe_format_and_mount_test.go +++ b/pkg/util/mount/safe_format_and_mount_test.go @@ -116,7 +116,7 @@ func TestSafeFormatAndMount(t *testing.T) { execScripts: []ExecArgs{ {"fsck", []string{"-a", "/dev/foo"}, "", nil}, {"blkid", []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", "/dev/foo"}, "", &fakeexec.FakeExitError{Status: 2}}, - {"mkfs.ext4", []string{"-F", "/dev/foo"}, "", fmt.Errorf("formatting failed")}, + {"mkfs.ext4", []string{"-F", "-m0", "/dev/foo"}, "", fmt.Errorf("formatting failed")}, }, expectedError: fmt.Errorf("formatting failed"), }, @@ -127,7 +127,7 @@ func TestSafeFormatAndMount(t *testing.T) { execScripts: []ExecArgs{ {"fsck", []string{"-a", "/dev/foo"}, "", nil}, {"blkid", []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", "/dev/foo"}, "", &fakeexec.FakeExitError{Status: 2}}, - {"mkfs.ext4", []string{"-F", "/dev/foo"}, "", nil}, + {"mkfs.ext4", []string{"-F", "-m0", "/dev/foo"}, "", nil}, }, expectedError: fmt.Errorf("Still cannot mount"), }, @@ -138,7 +138,7 @@ func TestSafeFormatAndMount(t *testing.T) { execScripts: []ExecArgs{ {"fsck", []string{"-a", "/dev/foo"}, "", nil}, {"blkid", []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", "/dev/foo"}, "", &fakeexec.FakeExitError{Status: 2}}, - {"mkfs.ext4", []string{"-F", "/dev/foo"}, "", nil}, + {"mkfs.ext4", []string{"-F", "-m0", "/dev/foo"}, "", nil}, }, expectedError: nil, }, @@ -149,7 +149,7 @@ func TestSafeFormatAndMount(t *testing.T) { execScripts: []ExecArgs{ {"fsck", []string{"-a", "/dev/foo"}, "", nil}, {"blkid", []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", "/dev/foo"}, "", &fakeexec.FakeExitError{Status: 2}}, - {"mkfs.ext3", []string{"-F", "/dev/foo"}, "", nil}, + {"mkfs.ext3", []string{"-F", "-m0", "/dev/foo"}, "", nil}, }, expectedError: nil, }, From 3693ed095f52dcfeb677bb38165f321ecf1e2e41 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 21 May 2018 13:46:53 -0400 Subject: [PATCH 061/307] remove API dependency on printers --- pkg/api/testing/compat/BUILD | 1 - pkg/api/testing/compat/compatibility_tester.go | 18 ++++++++---------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/pkg/api/testing/compat/BUILD b/pkg/api/testing/compat/BUILD index b08e25c8df7..a5890df5358 100644 --- a/pkg/api/testing/compat/BUILD +++ b/pkg/api/testing/compat/BUILD @@ -11,7 +11,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/api/testing/compat", deps = [ "//pkg/api/legacyscheme:go_default_library", - "//pkg/printers:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/pkg/api/testing/compat/compatibility_tester.go b/pkg/api/testing/compat/compatibility_tester.go index 5fbdc01e301..68b4bc9a852 100644 --- a/pkg/api/testing/compat/compatibility_tester.go +++ b/pkg/api/testing/compat/compatibility_tester.go @@ -19,7 +19,6 @@ package compat import ( "encoding/json" "fmt" - "os" "reflect" "regexp" "strconv" @@ -30,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/printers" ) // Based on: https://github.com/openshift/origin/blob/master/pkg/api/compatibility_test.go @@ -72,14 +70,11 @@ func TestCompatibility( t.Fatalf("Unexpected error: %v", err) } - hasError := false for k, expectedValue := range expectedKeys { keys := strings.Split(k, ".") if actualValue, ok, err := getJSONValue(generic, keys...); err != nil || !ok { t.Errorf("Unexpected error for %s: %v", k, err) - hasError = true } else if !reflect.DeepEqual(expectedValue, fmt.Sprintf("%v", actualValue)) { - hasError = true t.Errorf("Unexpected value for %v: expected %v, got %v", k, expectedValue, actualValue) } } @@ -89,14 +84,17 @@ func TestCompatibility( actualValue, ok, err := getJSONValue(generic, keys...) if err == nil || ok { t.Errorf("Unexpected value found for key %s: %v", absentKey, actualValue) - hasError = true } } - if hasError { - printer := &printers.JSONPrinter{} - printer.PrintObj(obj, os.Stdout) - t.Logf("2: Encoded value: %#v", string(output)) + if t.Failed() { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + t.Log(err) + } else { + t.Log(string(data)) + } + t.Logf("2: Encoded value: %v", string(output)) } } From d1603c9560389a66312d4a743d22cb5159c63036 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Wed, 2 May 2018 15:15:47 -0400 Subject: [PATCH 062/307] move PrintOptions to genericclioptions --- hack/.golint_failures | 2 + hack/import-restrictions.yaml | 1 + pkg/kubectl/cmd/BUILD | 2 + pkg/kubectl/cmd/annotate.go | 6 +- pkg/kubectl/cmd/apply.go | 18 +++--- pkg/kubectl/cmd/apply_set_last_applied.go | 6 +- pkg/kubectl/cmd/auth/BUILD | 2 +- pkg/kubectl/cmd/auth/reconcile.go | 6 +- pkg/kubectl/cmd/autoscale.go | 17 ++---- pkg/kubectl/cmd/certificates.go | 8 +-- pkg/kubectl/cmd/clusterinfo_dump.go | 6 +- pkg/kubectl/cmd/cmd_printing_test.go | 12 ++-- pkg/kubectl/cmd/config/BUILD | 1 + pkg/kubectl/cmd/config/flags.go | 22 +++---- pkg/kubectl/cmd/convert.go | 6 +- pkg/kubectl/cmd/create/BUILD | 1 + pkg/kubectl/cmd/create/flags.go | 22 +++---- pkg/kubectl/cmd/drain.go | 35 +++++------ pkg/kubectl/cmd/drain_test.go | 4 +- pkg/kubectl/cmd/expose.go | 6 +- pkg/kubectl/cmd/get/BUILD | 1 - pkg/kubectl/cmd/get/get.go | 2 +- pkg/kubectl/cmd/get/get_flags.go | 25 ++++---- pkg/kubectl/cmd/get/humanreadable_flags.go | 3 +- .../cmd/get/humanreadable_flags_test.go | 6 +- pkg/kubectl/cmd/label.go | 16 ++--- pkg/kubectl/cmd/patch.go | 16 ++--- pkg/kubectl/cmd/replace.go | 7 +-- pkg/kubectl/cmd/rollingupdate.go | 17 ++---- pkg/kubectl/cmd/rollout/BUILD | 2 +- pkg/kubectl/cmd/rollout/rollout_pause.go | 17 ++---- pkg/kubectl/cmd/rollout/rollout_resume.go | 17 ++---- pkg/kubectl/cmd/rollout/rollout_undo.go | 18 +++--- pkg/kubectl/cmd/run.go | 9 ++- pkg/kubectl/cmd/run_test.go | 5 +- pkg/kubectl/cmd/scale.go | 8 +-- pkg/kubectl/cmd/set/BUILD | 3 +- pkg/kubectl/cmd/set/set_env.go | 6 +- pkg/kubectl/cmd/set/set_env_test.go | 7 +-- pkg/kubectl/cmd/set/set_image.go | 6 +- pkg/kubectl/cmd/set/set_image_test.go | 9 ++- pkg/kubectl/cmd/set/set_resources.go | 7 +-- pkg/kubectl/cmd/set/set_resources_test.go | 8 +-- pkg/kubectl/cmd/set/set_selector.go | 7 +-- pkg/kubectl/cmd/set/set_serviceaccount.go | 7 +-- .../cmd/set/set_serviceaccount_test.go | 10 ++-- pkg/kubectl/cmd/set/set_subject.go | 6 +- pkg/kubectl/cmd/taint.go | 17 ++---- pkg/kubectl/cmd/util/editor/BUILD | 2 +- pkg/kubectl/cmd/util/editor/editoptions.go | 16 ++--- pkg/kubectl/genericclioptions/BUILD | 20 ++++++- .../genericclioptions}/json_yaml_flags.go | 12 ++-- .../json_yaml_flags_test.go | 9 ++- .../genericclioptions}/name_flags.go | 8 ++- .../genericclioptions}/name_flags_test.go | 9 ++- .../genericclioptions/print_flags.go} | 49 ++------------- pkg/kubectl/genericclioptions/printers/BUILD | 41 +++++++++++++ .../genericclioptions/printers/interface.go | 32 ++++++++++ .../genericclioptions}/printers/json.go | 8 +-- .../genericclioptions}/printers/name.go | 4 +- .../printers/sourcechecker.go | 60 +++++++++++++++++++ .../printers/sourcechecker_test.go} | 0 .../genericclioptions}/printers/typesetter.go | 0 pkg/printers/BUILD | 36 +++-------- pkg/printers/customcolumn.go | 5 +- pkg/printers/customcolumn_flags.go | 8 +-- pkg/printers/customcolumn_flags_test.go | 10 ++-- pkg/printers/customcolumn_test.go | 37 ++++++------ pkg/printers/internalversion/BUILD | 2 + pkg/printers/internalversion/printers_test.go | 13 ++-- pkg/printers/jsonpath.go | 5 +- pkg/printers/jsonpath_flags.go | 6 +- pkg/printers/jsonpath_flags_test.go | 14 ++--- pkg/printers/kube_template_flags.go | 8 ++- pkg/printers/template.go | 5 +- pkg/printers/template_flags.go | 6 +- pkg/printers/template_flags_test.go | 14 ++--- 77 files changed, 475 insertions(+), 409 deletions(-) rename pkg/{printers => kubectl/genericclioptions}/json_yaml_flags.go (85%) rename pkg/{printers => kubectl/genericclioptions}/json_yaml_flags_test.go (91%) rename pkg/{printers => kubectl/genericclioptions}/name_flags.go (90%) rename pkg/{printers => kubectl/genericclioptions}/name_flags_test.go (93%) rename pkg/{printers/flags.go => kubectl/genericclioptions/print_flags.go} (72%) create mode 100644 pkg/kubectl/genericclioptions/printers/BUILD create mode 100644 pkg/kubectl/genericclioptions/printers/interface.go rename pkg/{ => kubectl/genericclioptions}/printers/json.go (93%) rename pkg/{ => kubectl/genericclioptions}/printers/name.go (97%) create mode 100644 pkg/kubectl/genericclioptions/printers/sourcechecker.go rename pkg/{printers/flags_test.go => kubectl/genericclioptions/printers/sourcechecker_test.go} (100%) rename pkg/{ => kubectl/genericclioptions}/printers/typesetter.go (100%) diff --git a/hack/.golint_failures b/hack/.golint_failures index dfb01b7005c..4b17f4efbfa 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -152,6 +152,7 @@ pkg/kubectl/cmd/util/editor pkg/kubectl/cmd/util/jsonmerge pkg/kubectl/cmd/util/sanity pkg/kubectl/genericclioptions +pkg/kubectl/genericclioptions/printers pkg/kubectl/genericclioptions/resource pkg/kubectl/metricsutil pkg/kubectl/util @@ -222,6 +223,7 @@ pkg/kubemark pkg/master pkg/master/controller/crdregistration pkg/master/tunneler +pkg/printers pkg/printers/internalversion pkg/printers/storage pkg/probe diff --git a/hack/import-restrictions.yaml b/hack/import-restrictions.yaml index 35645ea7ebc..c053eb45251 100644 --- a/hack/import-restrictions.yaml +++ b/hack/import-restrictions.yaml @@ -23,6 +23,7 @@ - k8s.io/client-go # TODO this one should be tightened. We depend on it for testing, but we should instead create our own scheme - k8s.io/api/core/v1 + - k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers - baseImportPath: "./vendor/k8s.io/apimachinery/" allowedImports: diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index fb3a23883c0..44c1e63bc5f 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -79,6 +79,7 @@ go_library( "//pkg/kubectl/cmd/util/openapi:go_default_library", "//pkg/kubectl/explain:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/metricsutil:go_default_library", "//pkg/kubectl/plugins:go_default_library", @@ -201,6 +202,7 @@ go_test( "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/cmd/util/openapi:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/plugins:go_default_library", "//pkg/kubectl/polymorphichelpers:go_default_library", diff --git a/pkg/kubectl/cmd/annotate.go b/pkg/kubectl/cmd/annotate.go index 755a4d79563..608398d20fa 100644 --- a/pkg/kubectl/cmd/annotate.go +++ b/pkg/kubectl/cmd/annotate.go @@ -35,15 +35,15 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // AnnotateOptions have the data required to perform the annotate operation type AnnotateOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc // Filename options @@ -111,7 +111,7 @@ var ( func NewAnnotateOptions(ioStreams genericclioptions.IOStreams) *AnnotateOptions { return &AnnotateOptions{ - PrintFlags: printers.NewPrintFlags("annotated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("annotated").WithTypeSetter(scheme.Scheme), RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index e93caf53728..bf112f3db3a 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -49,19 +49,19 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/validation" - "k8s.io/kubernetes/pkg/printers" ) type ApplyOptions struct { RecordFlags *genericclioptions.RecordFlags Recorder genericclioptions.Recorder - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) DeleteFlags *DeleteFlags DeleteOptions *DeleteOptions @@ -131,7 +131,7 @@ func NewApplyOptions(ioStreams genericclioptions.IOStreams) *ApplyOptions { return &ApplyOptions{ RecordFlags: genericclioptions.NewRecordFlags(), DeleteFlags: NewDeleteFlags("that contains the configuration to apply"), - PrintFlags: printers.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), Overwrite: true, OpenApiPatch: true, @@ -191,17 +191,13 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { o.DryRun = cmdutil.GetDryRunFlag(cmd) // allow for a success message operation to be specified at print time - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation if o.DryRun { o.PrintFlags.Complete("%s (dry run)") } - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } var err error @@ -592,7 +588,7 @@ type pruner struct { scaler scaleclient.ScalesGetter - toPrinter func(string) (printers.ResourcePrinterFunc, error) + toPrinter func(string) (printers.ResourcePrinter, error) out io.Writer } diff --git a/pkg/kubectl/cmd/apply_set_last_applied.go b/pkg/kubectl/cmd/apply_set_last_applied.go index c6643af2da8..8b39bfd2b6f 100644 --- a/pkg/kubectl/cmd/apply_set_last_applied.go +++ b/pkg/kubectl/cmd/apply_set_last_applied.go @@ -31,16 +31,16 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) type SetLastAppliedOptions struct { CreateAnnotation bool - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc FilenameOptions resource.FilenameOptions @@ -83,7 +83,7 @@ var ( func NewSetLastAppliedOptions(ioStreams genericclioptions.IOStreams) *SetLastAppliedOptions { return &SetLastAppliedOptions{ - PrintFlags: printers.NewPrintFlags("configured").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("configured").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } } diff --git a/pkg/kubectl/cmd/auth/BUILD b/pkg/kubectl/cmd/auth/BUILD index a56b7c5c592..7ab08d3d166 100644 --- a/pkg/kubectl/cmd/auth/BUILD +++ b/pkg/kubectl/cmd/auth/BUILD @@ -25,9 +25,9 @@ go_library( "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", - "//pkg/printers:go_default_library", "//pkg/registry/rbac/reconciliation:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index fd82e2ccdcd..2b189637f92 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -29,16 +29,16 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" "k8s.io/kubernetes/pkg/registry/rbac/reconciliation" ) // ReconcileOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type ReconcileOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags FilenameOptions *resource.FilenameOptions Visitor resource.Visitor @@ -64,7 +64,7 @@ var ( func NewReconcileOptions(ioStreams genericclioptions.IOStreams) *ReconcileOptions { return &ReconcileOptions{ FilenameOptions: &resource.FilenameOptions{}, - PrintFlags: printers.NewPrintFlags("reconciled").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("reconciled").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } } diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index e9c0708ca5c..eb0054dfbf2 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -31,10 +31,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -58,8 +58,8 @@ type AutoscaleOptions struct { RecordFlags *genericclioptions.RecordFlags Recorder genericclioptions.Recorder - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) Name string Generator string @@ -83,7 +83,7 @@ type AutoscaleOptions struct { func NewAutoscaleOptions(ioStreams genericclioptions.IOStreams) *AutoscaleOptions { return &AutoscaleOptions{ - PrintFlags: printers.NewPrintFlags("autoscaled").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("autoscaled").WithTypeSetter(scheme.Scheme), FilenameOptions: &resource.FilenameOptions{}, RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, @@ -170,18 +170,13 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args return err } - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation if o.dryRun { o.PrintFlags.Complete("%s (dry run)") } - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } return nil diff --git a/pkg/kubectl/cmd/certificates.go b/pkg/kubectl/cmd/certificates.go index 08648ee8505..36c0ff69356 100644 --- a/pkg/kubectl/cmd/certificates.go +++ b/pkg/kubectl/cmd/certificates.go @@ -29,10 +29,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" "github.com/spf13/cobra" ) @@ -57,7 +57,7 @@ func NewCmdCertificate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) type CertificateOptions struct { resource.FilenameOptions - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc csrNames []string @@ -100,7 +100,7 @@ func (o *CertificateOptions) Validate() error { func NewCmdCertificateApprove(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { options := CertificateOptions{ - PrintFlags: printers.NewPrintFlags("approved").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("approved").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } cmd := &cobra.Command{ @@ -157,7 +157,7 @@ func (o *CertificateOptions) RunCertificateApprove(force bool) error { func NewCmdCertificateDeny(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { options := CertificateOptions{ - PrintFlags: printers.NewPrintFlags("denied").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("denied").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } cmd := &cobra.Command{ diff --git a/pkg/kubectl/cmd/clusterinfo_dump.go b/pkg/kubectl/cmd/clusterinfo_dump.go index 11efb49fd91..0c3af8d4974 100644 --- a/pkg/kubectl/cmd/clusterinfo_dump.go +++ b/pkg/kubectl/cmd/clusterinfo_dump.go @@ -31,14 +31,14 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) type ClusterInfoDumpOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc OutputDir string @@ -57,7 +57,7 @@ type ClusterInfoDumpOptions struct { // NewCmdCreateSecret groups subcommands to create various types of secrets func NewCmdClusterInfoDump(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { o := &ClusterInfoDumpOptions{ - PrintFlags: printers.NewPrintFlags("").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } diff --git a/pkg/kubectl/cmd/cmd_printing_test.go b/pkg/kubectl/cmd/cmd_printing_test.go index 4f0948b0d8e..7c0b4d37688 100644 --- a/pkg/kubectl/cmd/cmd_printing_test.go +++ b/pkg/kubectl/cmd/cmd_printing_test.go @@ -24,6 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + genericprinters "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/printers" ) @@ -81,7 +83,7 @@ func TestIllegalPackageSourceCheckerThroughPrintFlags(t *testing.T) { } for _, tc := range testCases { - printFlags := printers.NewPrintFlags("succeeded").WithTypeSetter(scheme.Scheme) + printFlags := genericclioptions.NewPrintFlags("succeeded").WithTypeSetter(scheme.Scheme) printFlags.OutputFormat = &tc.output printer, err := printFlags.ToPrinter() @@ -97,7 +99,7 @@ func TestIllegalPackageSourceCheckerThroughPrintFlags(t *testing.T) { t.Fatalf("unexpected error %v", err) } - if !printers.IsInternalObjectError(err) { + if !genericprinters.IsInternalObjectError(err) { t.Fatalf("unexpected error - expecting internal object printer error, got %q", err) } continue @@ -143,13 +145,13 @@ func TestIllegalPackageSourceCheckerDirectlyThroughPrinters(t *testing.T) { { name: "json printer: object containing package path beginning with forbidden prefix is rejected", expectInternalObjErr: true, - printer: &printers.JSONPrinter{}, + printer: &genericprinters.JSONPrinter{}, obj: internalPod(), }, { name: "yaml printer: object containing package path beginning with forbidden prefix is rejected", expectInternalObjErr: true, - printer: &printers.YAMLPrinter{}, + printer: &genericprinters.YAMLPrinter{}, obj: internalPod(), }, { @@ -187,7 +189,7 @@ func TestIllegalPackageSourceCheckerDirectlyThroughPrinters(t *testing.T) { t.Fatalf("unexpected error %v", err) } - if !printers.IsInternalObjectError(err) { + if !genericprinters.IsInternalObjectError(err) { t.Fatalf("unexpected error - expecting internal object printer error, got %q", err) } continue diff --git a/pkg/kubectl/cmd/config/BUILD b/pkg/kubectl/cmd/config/BUILD index 494c871890f..84f82ab1b43 100644 --- a/pkg/kubectl/cmd/config/BUILD +++ b/pkg/kubectl/cmd/config/BUILD @@ -33,6 +33,7 @@ go_library( "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/util/i18n:go_default_library", "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/config/flags.go b/pkg/kubectl/cmd/config/flags.go index d9ed0b8bd2e..cf8b00cd6a6 100644 --- a/pkg/kubectl/cmd/config/flags.go +++ b/pkg/kubectl/cmd/config/flags.go @@ -20,6 +20,8 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + genericprinters "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/printers" ) @@ -27,11 +29,11 @@ import ( // used across all config commands, and provides a method // of retrieving a known printer based on flag values provided. type kubectlConfigPrintFlags struct { - JSONYamlPrintFlags *printers.JSONYamlPrintFlags - NamePrintFlags *printers.NamePrintFlags + JSONYamlPrintFlags *genericclioptions.JSONYamlPrintFlags + NamePrintFlags *genericclioptions.NamePrintFlags TemplateFlags *printers.KubeTemplatePrintFlags - TypeSetter *printers.TypeSetterPrinter + TypeSetter *genericprinters.TypeSetterPrinter OutputFormat *string } @@ -46,19 +48,19 @@ func (f *kubectlConfigPrintFlags) ToPrinter() (printers.ResourcePrinter, error) outputFormat = *f.OutputFormat } - if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return f.TypeSetter.WrapToPrinter(p, err) } - if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return f.TypeSetter.WrapToPrinter(p, err) } - if p, err := f.TemplateFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.TemplateFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return f.TypeSetter.WrapToPrinter(p, err) } - return nil, printers.NoCompatiblePrinterError{Options: f} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f} } func (f *kubectlConfigPrintFlags) AddFlags(cmd *cobra.Command) { @@ -83,10 +85,10 @@ func newKubeConfigPrintFlags(scheme runtime.ObjectTyper) *kubectlConfigPrintFlag return &kubectlConfigPrintFlags{ OutputFormat: &outputFormat, - JSONYamlPrintFlags: printers.NewJSONYamlPrintFlags(), - NamePrintFlags: printers.NewNamePrintFlags(""), + JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(), + NamePrintFlags: genericclioptions.NewNamePrintFlags(""), TemplateFlags: printers.NewKubeTemplatePrintFlags(), - TypeSetter: printers.NewTypeSetter(scheme), + TypeSetter: genericprinters.NewTypeSetter(scheme), } } diff --git a/pkg/kubectl/cmd/convert.go b/pkg/kubectl/cmd/convert.go index c6148c33941..ec29cdca15a 100644 --- a/pkg/kubectl/cmd/convert.go +++ b/pkg/kubectl/cmd/convert.go @@ -27,9 +27,9 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" "github.com/golang/glog" "github.com/spf13/cobra" @@ -89,7 +89,7 @@ func NewCmdConvert(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *co // ConvertOptions have the data required to perform the convert operation type ConvertOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc resource.FilenameOptions @@ -103,7 +103,7 @@ type ConvertOptions struct { func NewConvertOptions(ioStreams genericclioptions.IOStreams) *ConvertOptions { return &ConvertOptions{ - PrintFlags: printers.NewPrintFlags("converted").WithTypeSetter(scheme.Scheme).WithDefaultOutput("yaml"), + PrintFlags: genericclioptions.NewPrintFlags("converted").WithTypeSetter(scheme.Scheme).WithDefaultOutput("yaml"), local: true, IOStreams: ioStreams, } diff --git a/pkg/kubectl/cmd/create/BUILD b/pkg/kubectl/cmd/create/BUILD index 131dc94981d..964ae189102 100644 --- a/pkg/kubectl/cmd/create/BUILD +++ b/pkg/kubectl/cmd/create/BUILD @@ -29,6 +29,7 @@ go_library( "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/cmd/util/editor:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/i18n:go_default_library", diff --git a/pkg/kubectl/cmd/create/flags.go b/pkg/kubectl/cmd/create/flags.go index f0ee0c608f0..7c7540c9bdc 100644 --- a/pkg/kubectl/cmd/create/flags.go +++ b/pkg/kubectl/cmd/create/flags.go @@ -18,8 +18,10 @@ package create import ( "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/apimachinery/pkg/runtime" + genericprinters "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/printers" ) @@ -27,11 +29,11 @@ import ( // used across all create commands, and provides a method // of retrieving a known printer based on flag values provided. type PrintFlags struct { - JSONYamlPrintFlags *printers.JSONYamlPrintFlags - NamePrintFlags *printers.NamePrintFlags + JSONYamlPrintFlags *genericclioptions.JSONYamlPrintFlags + NamePrintFlags *genericclioptions.NamePrintFlags TemplateFlags *printers.KubeTemplatePrintFlags - TypeSetter *printers.TypeSetterPrinter + TypeSetter *genericprinters.TypeSetterPrinter OutputFormat *string } @@ -46,19 +48,19 @@ func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { outputFormat = *f.OutputFormat } - if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return f.TypeSetter.WrapToPrinter(p, err) } - if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return f.TypeSetter.WrapToPrinter(p, err) } - if p, err := f.TemplateFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.TemplateFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return f.TypeSetter.WrapToPrinter(p, err) } - return nil, printers.NoCompatiblePrinterError{Options: f} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f} } func (f *PrintFlags) AddFlags(cmd *cobra.Command) { @@ -77,10 +79,10 @@ func NewPrintFlags(operation string, scheme runtime.ObjectTyper) *PrintFlags { return &PrintFlags{ OutputFormat: &outputFormat, - JSONYamlPrintFlags: printers.NewJSONYamlPrintFlags(), - NamePrintFlags: printers.NewNamePrintFlags(operation), + JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(), + NamePrintFlags: genericclioptions.NewNamePrintFlags(operation), TemplateFlags: printers.NewKubeTemplatePrintFlags(), - TypeSetter: printers.NewTypeSetter(scheme), + TypeSetter: genericprinters.NewTypeSetter(scheme), } } diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index e895bf4dfef..60c3442a486 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -40,21 +40,21 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/kubectl/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) type DrainOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinterFunc, error) Namespace string @@ -108,7 +108,7 @@ var ( func NewCmdCordon(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { options := &DrainOptions{ - PrintFlags: printers.NewPrintFlags("cordoned").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("cordoned").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } @@ -140,7 +140,7 @@ var ( func NewCmdUncordon(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { options := &DrainOptions{ - PrintFlags: printers.NewPrintFlags("uncordoned").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("uncordoned").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } @@ -196,7 +196,7 @@ var ( func NewDrainOptions(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *DrainOptions { return &DrainOptions{ - PrintFlags: printers.NewPrintFlags("drained").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("drained").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, backOff: clockwork.NewRealClock(), @@ -279,6 +279,7 @@ func (o *DrainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st if err != nil { return nil, err } + return printer.PrintObj, nil } @@ -319,7 +320,7 @@ func (o *DrainOptions) RunDrain() error { return err } - printer, err := o.ToPrinter("drained") + printObj, err := o.ToPrinter("drained") if err != nil { return err } @@ -334,7 +335,7 @@ func (o *DrainOptions) RunDrain() error { } if err == nil || o.DryRun { drainedNodes.Insert(info.Name) - printer.PrintObj(info.Object, o.Out) + printObj(info.Object, o.Out) } else { fmt.Fprintf(o.ErrOut, "error: unable to drain node %q, aborting command...\n\n", info.Name) remainingNodes := []string{} @@ -650,7 +651,7 @@ func (o *DrainOptions) waitForDelete(pods []corev1.Pod, interval, timeout time.D } else { verbStr = "deleted" } - printer, err := o.ToPrinter(verbStr) + printObj, err := o.ToPrinter(verbStr) if err != nil { return pods, err } @@ -660,7 +661,7 @@ func (o *DrainOptions) waitForDelete(pods []corev1.Pod, interval, timeout time.D for i, pod := range pods { p, err := getPodFn(pod.Namespace, pod.Name) if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) { - printer.PrintObj(&pod, o.Out) + printObj(&pod, o.Out) continue } else if err != nil { return false, err @@ -736,12 +737,12 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error { } unsched := node.Spec.Unschedulable if unsched == desired { - printer, err := o.ToPrinter(already(desired)) + printObj, err := o.ToPrinter(already(desired)) if err != nil { fmt.Printf("error: %v", err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) + printObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) } else { if !o.DryRun { helper := resource.NewHelper(o.restClient, nodeInfo.Mapping) @@ -762,20 +763,20 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error { continue } } - printer, err := o.ToPrinter(changed(desired)) + printObj, err := o.ToPrinter(changed(desired)) if err != nil { fmt.Fprintf(o.ErrOut, "%v", err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) + printObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) } } else { - printer, err := o.ToPrinter("skipped") + printObj, err := o.ToPrinter("skipped") if err != nil { fmt.Fprintf(o.ErrOut, "%v", err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) + printObj(cmdutil.AsDefaultVersionedOrOriginal(nodeInfo.Object, nodeInfo.Mapping), o.Out) } } diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index 80bf4cb10fe..6b602ed87ec 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -50,8 +50,8 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" ) const ( @@ -834,7 +834,7 @@ func TestDeletePods(t *testing.T) { defer tf.Cleanup() o := DrainOptions{ - PrintFlags: printers.NewPrintFlags("drained").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("drained").WithTypeSetter(scheme.Scheme), } o.Out = os.Stdout diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index b35ceba4284..cae2a6e2082 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -35,10 +35,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -84,7 +84,7 @@ var ( type ExposeServiceOptions struct { FilenameOptions resource.FilenameOptions RecordFlags *genericclioptions.RecordFlags - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc DryRun bool @@ -111,7 +111,7 @@ type ExposeServiceOptions struct { func NewExposeServiceOptions(ioStreams genericclioptions.IOStreams) *ExposeServiceOptions { return &ExposeServiceOptions{ RecordFlags: genericclioptions.NewRecordFlags(), - PrintFlags: printers.NewPrintFlags("exposed").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("exposed").WithTypeSetter(scheme.Scheme), Recorder: genericclioptions.NoopRecorder{}, IOStreams: ioStreams, diff --git a/pkg/kubectl/cmd/get/BUILD b/pkg/kubectl/cmd/get/BUILD index db904110bdc..37aded20c5b 100644 --- a/pkg/kubectl/cmd/get/BUILD +++ b/pkg/kubectl/cmd/get/BUILD @@ -77,7 +77,6 @@ go_test( "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", - "//pkg/printers:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index 2e18dd16e6a..40bebe837f5 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -133,7 +133,7 @@ const ( // NewGetOptions returns a GetOptions with default chunk size 500. func NewGetOptions(parent string, streams genericclioptions.IOStreams) *GetOptions { return &GetOptions{ - PrintFlags: NewGetPrintFlags(legacyscheme.Scheme), + PrintFlags: NewGetPrintFlags(), CmdParent: parent, IOStreams: streams, diff --git a/pkg/kubectl/cmd/get/get_flags.go b/pkg/kubectl/cmd/get/get_flags.go index 3a7f828b80f..4540100b121 100644 --- a/pkg/kubectl/cmd/get/get_flags.go +++ b/pkg/kubectl/cmd/get/get_flags.go @@ -22,17 +22,17 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/printers" ) // PrintFlags composes common printer flag structs // used in the Get command. type PrintFlags struct { - JSONYamlPrintFlags *printers.JSONYamlPrintFlags - NamePrintFlags *printers.NamePrintFlags + JSONYamlPrintFlags *genericclioptions.JSONYamlPrintFlags + NamePrintFlags *genericclioptions.NamePrintFlags TemplateFlags *printers.KubeTemplatePrintFlags CustomColumnsFlags *printers.CustomColumnsPrintFlags HumanReadableFlags *HumanPrintFlags @@ -107,27 +107,27 @@ func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { f.CustomColumnsFlags.TemplateArgument = *f.TemplateFlags.TemplateArgument } - if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return p, err } - if p, err := f.HumanReadableFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.HumanReadableFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return p, err } - if p, err := f.TemplateFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.TemplateFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return p, err } - if p, err := f.CustomColumnsFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.CustomColumnsFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return p, err } - if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !printers.IsNoCompatiblePrinterError(err) { + if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return p, err } - return nil, printers.NoCompatiblePrinterError{Options: f} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f} } // AddFlags receives a *cobra.Command reference and binds @@ -153,7 +153,7 @@ func (f *PrintFlags) AddFlags(cmd *cobra.Command) { // NewGetPrintFlags returns flags associated with humanreadable, // template, and "name" printing, with default values set. -func NewGetPrintFlags(scheme runtime.ObjectConvertor) *PrintFlags { +func NewGetPrintFlags() *PrintFlags { outputFormat := "" noHeaders := false @@ -161,9 +161,10 @@ func NewGetPrintFlags(scheme runtime.ObjectConvertor) *PrintFlags { OutputFormat: &outputFormat, NoHeaders: &noHeaders, - JSONYamlPrintFlags: printers.NewJSONYamlPrintFlags(), - NamePrintFlags: printers.NewNamePrintFlags(""), + JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(), + NamePrintFlags: genericclioptions.NewNamePrintFlags(""), TemplateFlags: printers.NewKubeTemplatePrintFlags(), + HumanReadableFlags: NewHumanPrintFlags(), CustomColumnsFlags: printers.NewCustomColumnsPrintFlags(), } diff --git a/pkg/kubectl/cmd/get/humanreadable_flags.go b/pkg/kubectl/cmd/get/humanreadable_flags.go index d04bf826d9f..5a185b84524 100644 --- a/pkg/kubectl/cmd/get/humanreadable_flags.go +++ b/pkg/kubectl/cmd/get/humanreadable_flags.go @@ -18,6 +18,7 @@ package get import ( "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/kubectl/scheme" @@ -63,7 +64,7 @@ func (f *HumanPrintFlags) EnsureWithNamespace() error { // handling human-readable output. func (f *HumanPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { if len(outputFormat) > 0 && outputFormat != "wide" { - return nil, printers.NoCompatiblePrinterError{Options: f} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f} } decoder := scheme.Codecs.UniversalDecoder() diff --git a/pkg/kubectl/cmd/get/humanreadable_flags_test.go b/pkg/kubectl/cmd/get/humanreadable_flags_test.go index a9d03d7e3a2..e6282229542 100644 --- a/pkg/kubectl/cmd/get/humanreadable_flags_test.go +++ b/pkg/kubectl/cmd/get/humanreadable_flags_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) func TestHumanReadablePrinterSupportsExpectedOptions(t *testing.T) { @@ -119,12 +119,12 @@ func TestHumanReadablePrinterSupportsExpectedOptions(t *testing.T) { p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { - if !printers.IsNoCompatiblePrinterError(err) { + if !genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected no printer matches for output format %q", tc.outputFormat) } return } - if printers.IsNoCompatiblePrinterError(err) { + if genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", tc.outputFormat) } diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 383c1696f48..58de43af8ca 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -37,10 +37,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // LabelOptions have the data required to perform the label operation @@ -49,8 +49,8 @@ type LabelOptions struct { resource.FilenameOptions RecordFlags *genericclioptions.RecordFlags - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) // Common user flags overwrite bool @@ -115,7 +115,7 @@ func NewLabelOptions(ioStreams genericclioptions.IOStreams) *LabelOptions { RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, - PrintFlags: printers.NewPrintFlags("labeled").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("labeled").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } @@ -168,17 +168,13 @@ func (o *LabelOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st o.outputFormat = cmdutil.GetFlagString(cmd, "output") o.dryrun = cmdutil.GetDryRunFlag(cmd) - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation if o.dryrun { o.PrintFlags.Complete("%s (dry run)") } - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } resources, labelArgs, err := cmdutil.GetResourcesAndPairs(args, "label") diff --git a/pkg/kubectl/cmd/patch.go b/pkg/kubectl/cmd/patch.go index c75e8897491..e5b3fc3d9b4 100644 --- a/pkg/kubectl/cmd/patch.go +++ b/pkg/kubectl/cmd/patch.go @@ -36,10 +36,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) var patchTypes = map[string]types.PatchType{"json": types.JSONPatchType, "merge": types.MergePatchType, "strategic": types.StrategicMergePatchType} @@ -50,8 +50,8 @@ type PatchOptions struct { resource.FilenameOptions RecordFlags *genericclioptions.RecordFlags - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) Recorder genericclioptions.Recorder Local bool @@ -98,7 +98,7 @@ func NewPatchOptions(ioStreams genericclioptions.IOStreams) *PatchOptions { return &PatchOptions{ RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, - PrintFlags: printers.NewPrintFlags("patched").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("patched").WithTypeSetter(scheme.Scheme), IOStreams: ioStreams, } } @@ -143,17 +143,13 @@ func (o *PatchOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st o.outputFormat = cmdutil.GetFlagString(cmd, "output") o.dryRun = cmdutil.GetFlagBool(cmd, "dry-run") - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation if o.dryRun { o.PrintFlags.Complete("%s (dry run)") } - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } o.namespace, o.enforceNamespace, err = f.DefaultNamespace() diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 88c689c3be5..70131091f57 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -36,7 +36,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/validation" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -65,7 +64,7 @@ var ( ) type ReplaceOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags DeleteFlags *DeleteFlags RecordFlags *genericclioptions.RecordFlags @@ -94,9 +93,9 @@ func NewReplaceOptions(streams genericclioptions.IOStreams) *ReplaceOptions { return &ReplaceOptions{ // TODO(juanvallejo): figure out why we only support the "name" outputFormat in this command // we only support "-o name" for this command, so only register the name printer - PrintFlags: &printers.PrintFlags{ + PrintFlags: &genericclioptions.PrintFlags{ OutputFormat: &outputFormat, - NamePrintFlags: printers.NewNamePrintFlags("replaced"), + NamePrintFlags: genericclioptions.NewNamePrintFlags("replaced"), }, DeleteFlags: NewDeleteFlags("to use to replace the resource."), diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 395a912df26..24cde12b467 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -37,12 +37,12 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/validation" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -107,15 +107,15 @@ type RollingUpdateOptions struct { FindNewName func(*api.ReplicationController) string - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) genericclioptions.IOStreams } func NewRollingUpdateOptions(streams genericclioptions.IOStreams) *RollingUpdateOptions { return &RollingUpdateOptions{ - PrintFlags: printers.NewPrintFlags("rolling updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("rolling updated").WithTypeSetter(scheme.Scheme), FilenameOptions: &resource.FilenameOptions{}, DeploymentKey: "deployment", Timeout: timeout, @@ -226,18 +226,13 @@ func (o *RollingUpdateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, a o.Builder = f.NewBuilder() - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation if o.DryRun { o.PrintFlags.Complete("%s (dry run)") } - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } return nil } diff --git a/pkg/kubectl/cmd/rollout/BUILD b/pkg/kubectl/cmd/rollout/BUILD index f87d5c9a1e6..652bc084f72 100644 --- a/pkg/kubectl/cmd/rollout/BUILD +++ b/pkg/kubectl/cmd/rollout/BUILD @@ -25,11 +25,11 @@ go_library( "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/polymorphichelpers:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/i18n:go_default_library", - "//pkg/printers:go_default_library", "//pkg/util/interrupt:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index 907439bd93c..65888248cd9 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -28,18 +28,18 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // PauseConfig is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type PauseConfig struct { resource.FilenameOptions - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) Pauser func(info *resource.Info) ([]byte, error) Infos []*resource.Info @@ -64,7 +64,7 @@ var ( func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := &PauseConfig{ - PrintFlags: printers.NewPrintFlags("paused").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("paused").WithTypeSetter(scheme.Scheme), IOStreams: streams, } @@ -122,14 +122,9 @@ func (o *PauseConfig) CompletePause(f cmdutil.Factory, cmd *cobra.Command, args return err } - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } o.Infos, err = r.Infos() diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index 030262fa182..16dde958d83 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -28,18 +28,18 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // ResumeConfig is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type ResumeConfig struct { resource.FilenameOptions - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) Resumer func(object *resource.Info) ([]byte, error) Infos []*resource.Info @@ -62,7 +62,7 @@ var ( func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := &ResumeConfig{ - PrintFlags: printers.NewPrintFlags("resumed").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("resumed").WithTypeSetter(scheme.Scheme), IOStreams: streams, } @@ -106,14 +106,9 @@ func (o *ResumeConfig) CompleteResume(f cmdutil.Factory, cmd *cobra.Command, arg return err } - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } r := f.NewBuilder(). diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index e78252d96f3..6d5f4e28b09 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -26,10 +26,11 @@ import ( "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // UndoOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -37,8 +38,8 @@ import ( type UndoOptions struct { resource.FilenameOptions - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) Rollbackers []kubectl.Rollbacker Infos []*resource.Info @@ -65,7 +66,7 @@ var ( func NewCmdRolloutUndo(f cmdutil.Factory, out io.Writer) *cobra.Command { o := &UndoOptions{ - PrintFlags: printers.NewPrintFlags("").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme), ToRevision: int64(0), } @@ -112,17 +113,12 @@ func (o *UndoOptions) CompleteUndo(f cmdutil.Factory, cmd *cobra.Command, out io return err } - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation if o.DryRun { o.PrintFlags.Complete("%s (dry run)") } - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } r := f.NewBuilder(). diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 75b46b9f1c9..e19fea9f546 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -20,12 +20,11 @@ import ( "fmt" "io" - "k8s.io/client-go/dynamic" - "k8s.io/kubernetes/pkg/printers" - "github.com/docker/distribution/reference" "github.com/spf13/cobra" + "k8s.io/client-go/dynamic" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -98,7 +97,7 @@ type RunObject struct { } type RunOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags DeleteFlags *DeleteFlags DeleteOptions *DeleteOptions RecordFlags *genericclioptions.RecordFlags @@ -127,7 +126,7 @@ type RunOptions struct { func NewRunOptions(streams genericclioptions.IOStreams) *RunOptions { return &RunOptions{ - PrintFlags: printers.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), DeleteFlags: NewDeleteFlags("to use to replace the resource."), RecordFlags: genericclioptions.NewRecordFlags(), diff --git a/pkg/kubectl/cmd/run_test.go b/pkg/kubectl/cmd/run_test.go index c41a8c92344..a628412afb0 100644 --- a/pkg/kubectl/cmd/run_test.go +++ b/pkg/kubectl/cmd/run_test.go @@ -42,7 +42,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // This init should be removed after switching this command and its tests to user external types. @@ -198,7 +197,7 @@ func TestRunArgsFollowDashRules(t *testing.T) { cmd.Flags().Set("image", "nginx") cmd.Flags().Set("generator", "run/v1") - printFlags := printers.NewPrintFlags("created").WithTypeSetter(scheme.Scheme) + printFlags := genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme) printer, err := printFlags.ToPrinter() if err != nil { t.Errorf("unexpected error: %v", err) @@ -366,7 +365,7 @@ func TestGenerateService(t *testing.T) { }), } - printFlags := printers.NewPrintFlags("created").WithTypeSetter(scheme.Scheme) + printFlags := genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme) printer, err := printFlags.ToPrinter() if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index 7f4e321794a..c7c908a6537 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -33,9 +33,9 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -68,7 +68,7 @@ var ( type ScaleOptions struct { FilenameOptions resource.FilenameOptions RecordFlags *genericclioptions.RecordFlags - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc Selector string @@ -98,9 +98,9 @@ func NewScaleOptions(ioStreams genericclioptions.IOStreams) *ScaleOptions { return &ScaleOptions{ // TODO(juanvallejo): figure out why we only support the "name" outputFormat in this command // we only support "-o name" for this command, so only register the name printer - PrintFlags: &printers.PrintFlags{ + PrintFlags: &genericclioptions.PrintFlags{ OutputFormat: &outputFormat, - NamePrintFlags: printers.NewNamePrintFlags("scaled"), + NamePrintFlags: genericclioptions.NewNamePrintFlags("scaled"), }, RecordFlags: genericclioptions.NewRecordFlags(), CurrentReplicas: -1, diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index 161f7ac79a9..ea718f95416 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -25,10 +25,10 @@ go_library( "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/cmd/util/env:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/i18n:go_default_library", - "//pkg/printers:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -69,7 +69,6 @@ go_test( "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", - "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 90f2f22f92b..7153a2fa13e 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -34,9 +34,9 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" envutil "k8s.io/kubernetes/pkg/kubectl/cmd/util/env" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -93,7 +93,7 @@ var ( ) type EnvOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags resource.FilenameOptions EnvParams []string @@ -126,7 +126,7 @@ type EnvOptions struct { // pod templates are selected by default and allowing environment to be overwritten func NewEnvOptions(streams genericclioptions.IOStreams) *EnvOptions { return &EnvOptions{ - PrintFlags: printers.NewPrintFlags("env updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("env updated").WithTypeSetter(scheme.Scheme), ContainerSelector: "*", Overwrite: true, diff --git a/pkg/kubectl/cmd/set/set_env_test.go b/pkg/kubectl/cmd/set/set_env_test.go index 67eb342db4e..c4d453b83f5 100644 --- a/pkg/kubectl/cmd/set/set_env_test.go +++ b/pkg/kubectl/cmd/set/set_env_test.go @@ -42,7 +42,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" ) func TestSetEnvLocal(t *testing.T) { @@ -63,7 +62,7 @@ func TestSetEnvLocal(t *testing.T) { streams, _, buf, bufErr := genericclioptions.NewTestIOStreams() opts := NewEnvOptions(streams) - opts.PrintFlags = printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) + opts.PrintFlags = genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) opts.FilenameOptions = resource.FilenameOptions{ Filenames: []string{"../../../../test/e2e/testing-manifests/statefulset/cassandra/controller.yaml"}, } @@ -101,7 +100,7 @@ func TestSetMultiResourcesEnvLocal(t *testing.T) { outputFormat := "name" streams, _, buf, bufErr := genericclioptions.NewTestIOStreams() opts := NewEnvOptions(streams) - opts.PrintFlags = printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) + opts.PrintFlags = genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) opts.FilenameOptions = resource.FilenameOptions{ Filenames: []string{"../../../../test/fixtures/pkg/kubectl/cmd/set/multi-resource-yaml.yaml"}, } @@ -483,7 +482,7 @@ func TestSetEnvRemote(t *testing.T) { outputFormat := "yaml" streams := genericclioptions.NewTestIOStreamsDiscard() opts := NewEnvOptions(streams) - opts.PrintFlags = printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) + opts.PrintFlags = genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) opts.Local = false opts.IOStreams = streams err := opts.Complete(tf, NewCmdEnv(tf, streams), input.args) diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 76dd52646b7..7ff65c6c235 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -29,10 +29,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) // ImageOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of @@ -40,7 +40,7 @@ import ( type SetImageOptions struct { resource.FilenameOptions - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags Infos []*resource.Info @@ -87,7 +87,7 @@ var ( func NewImageOptions(streams genericclioptions.IOStreams) *SetImageOptions { return &SetImageOptions{ - PrintFlags: printers.NewPrintFlags("image updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("image updated").WithTypeSetter(scheme.Scheme), RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, diff --git a/pkg/kubectl/cmd/set/set_image_test.go b/pkg/kubectl/cmd/set/set_image_test.go index 50cf1e3b1d0..fb383be767d 100644 --- a/pkg/kubectl/cmd/set/set_image_test.go +++ b/pkg/kubectl/cmd/set/set_image_test.go @@ -43,7 +43,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" ) func TestImageLocal(t *testing.T) { @@ -70,7 +69,7 @@ func TestImageLocal(t *testing.T) { cmd.Flags().Set("local", "true") opts := SetImageOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), FilenameOptions: resource.FilenameOptions{ Filenames: []string{"../../../../test/e2e/testing-manifests/statefulset/cassandra/controller.yaml"}}, Local: true, @@ -92,7 +91,7 @@ func TestImageLocal(t *testing.T) { } func TestSetImageValidation(t *testing.T) { - printFlags := printers.NewPrintFlags("").WithTypeSetter(scheme.Scheme) + printFlags := genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme) testCases := []struct { name string @@ -183,7 +182,7 @@ func TestSetMultiResourcesImageLocal(t *testing.T) { cmd.Flags().Set("local", "true") opts := SetImageOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), FilenameOptions: resource.FilenameOptions{ Filenames: []string{"../../../../test/fixtures/pkg/kubectl/cmd/set/multi-resource-yaml.yaml"}}, Local: true, @@ -570,7 +569,7 @@ func TestSetImageRemote(t *testing.T) { cmd := NewCmdImage(tf, streams) cmd.Flags().Set("output", outputFormat) opts := SetImageOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), Local: false, IOStreams: streams, diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 0fd6bc5b310..5fbc9eccf03 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -20,8 +20,6 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/printers" - "github.com/spf13/cobra" "k8s.io/api/core/v1" @@ -33,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" @@ -65,7 +64,7 @@ var ( type SetResourcesOptions struct { resource.FilenameOptions - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags Infos []*resource.Info @@ -94,7 +93,7 @@ type SetResourcesOptions struct { // pod templates are selected by default. func NewResourcesOptions(streams genericclioptions.IOStreams) *SetResourcesOptions { return &SetResourcesOptions{ - PrintFlags: printers.NewPrintFlags("resource requirements updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("resource requirements updated").WithTypeSetter(scheme.Scheme), RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, diff --git a/pkg/kubectl/cmd/set/set_resources_test.go b/pkg/kubectl/cmd/set/set_resources_test.go index cea402abd71..fe630782649 100644 --- a/pkg/kubectl/cmd/set/set_resources_test.go +++ b/pkg/kubectl/cmd/set/set_resources_test.go @@ -24,8 +24,6 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/printers" - "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -70,7 +68,7 @@ func TestResourcesLocal(t *testing.T) { cmd.Flags().Set("local", "true") opts := SetResourcesOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), FilenameOptions: resource.FilenameOptions{ Filenames: []string{"../../../../test/e2e/testing-manifests/statefulset/cassandra/controller.yaml"}}, Local: true, @@ -119,7 +117,7 @@ func TestSetMultiResourcesLimitsLocal(t *testing.T) { cmd.Flags().Set("local", "true") opts := SetResourcesOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), FilenameOptions: resource.FilenameOptions{ Filenames: []string{"../../../../test/fixtures/pkg/kubectl/cmd/set/multi-resource-yaml.yaml"}}, Local: true, @@ -494,7 +492,7 @@ func TestSetResourcesRemote(t *testing.T) { cmd := NewCmdResources(tf, streams) cmd.Flags().Set("output", outputFormat) opts := SetResourcesOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), Limits: "cpu=200m,memory=512Mi", ContainerSelector: "*", diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 5ed87e22ac3..ce8cca2a16f 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -19,8 +19,6 @@ package set import ( "fmt" - "k8s.io/kubernetes/pkg/printers" - "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/api/core/v1" @@ -32,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" @@ -42,7 +41,7 @@ import ( type SetSelectorOptions struct { fileOptions resource.FilenameOptions - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags local bool @@ -79,7 +78,7 @@ var ( func NewSelectorOptions(streams genericclioptions.IOStreams) *SetSelectorOptions { return &SetSelectorOptions{ - PrintFlags: printers.NewPrintFlags("selector updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("selector updated").WithTypeSetter(scheme.Scheme), RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index d06184ddee1..5a85b43b693 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -20,8 +20,6 @@ import ( "errors" "fmt" - "k8s.io/kubernetes/pkg/printers" - "github.com/spf13/cobra" "github.com/golang/glog" @@ -32,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" @@ -58,7 +57,7 @@ var ( // serviceAccountConfig encapsulates the data required to perform the operation. type SetServiceAccountOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags fileNameOptions resource.FilenameOptions @@ -79,7 +78,7 @@ type SetServiceAccountOptions struct { func NewSetServiceAccountOptions(streams genericclioptions.IOStreams) *SetServiceAccountOptions { return &SetServiceAccountOptions{ - PrintFlags: printers.NewPrintFlags("serviceaccount updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("serviceaccount updated").WithTypeSetter(scheme.Scheme), RecordFlags: genericclioptions.NewRecordFlags(), Recorder: genericclioptions.NoopRecorder{}, diff --git a/pkg/kubectl/cmd/set/set_serviceaccount_test.go b/pkg/kubectl/cmd/set/set_serviceaccount_test.go index a4ca8658660..f2e06bd7bb6 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount_test.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount_test.go @@ -25,8 +25,6 @@ import ( "path" "testing" - "k8s.io/kubernetes/pkg/printers" - "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -90,7 +88,7 @@ func TestSetServiceAccountLocal(t *testing.T) { cmd.Flags().Set("local", "true") testapi.Default = testapi.Groups[input.apiGroup] saConfig := SetServiceAccountOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), fileNameOptions: resource.FilenameOptions{ Filenames: []string{input.yaml}}, local: true, @@ -128,7 +126,7 @@ func TestSetServiceAccountMultiLocal(t *testing.T) { cmd.Flags().Set("output", outputFormat) cmd.Flags().Set("local", "true") opts := SetServiceAccountOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), fileNameOptions: resource.FilenameOptions{ Filenames: []string{"../../../../test/fixtures/pkg/kubectl/cmd/set/multi-resource-yaml.yaml"}}, local: true, @@ -365,7 +363,7 @@ func TestSetServiceAccountRemote(t *testing.T) { cmd := NewCmdServiceAccount(tf, streams) cmd.Flags().Set("output", outputFormat) saConfig := SetServiceAccountOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), local: false, IOStreams: streams, @@ -407,7 +405,7 @@ func TestServiceAccountValidation(t *testing.T) { cmd := NewCmdServiceAccount(tf, streams) saConfig := &SetServiceAccountOptions{ - PrintFlags: printers.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme), IOStreams: streams, } err := saConfig.Complete(tf, cmd, input.args) diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index ce55ea76309..10fd4031e10 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -31,10 +31,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" ) var ( @@ -57,7 +57,7 @@ type updateSubjects func(existings []rbacv1.Subject, targets []rbacv1.Subject) ( // SubjectOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags type SubjectOptions struct { - PrintFlags *printers.PrintFlags + PrintFlags *genericclioptions.PrintFlags resource.FilenameOptions @@ -82,7 +82,7 @@ type SubjectOptions struct { func NewSubjectOptions(streams genericclioptions.IOStreams) *SubjectOptions { return &SubjectOptions{ - PrintFlags: printers.NewPrintFlags("subjects updated").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("subjects updated").WithTypeSetter(scheme.Scheme), IOStreams: streams, } diff --git a/pkg/kubectl/cmd/taint.go b/pkg/kubectl/cmd/taint.go index af626ad1b02..e4c68dd4340 100644 --- a/pkg/kubectl/cmd/taint.go +++ b/pkg/kubectl/cmd/taint.go @@ -34,17 +34,17 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" - "k8s.io/kubernetes/pkg/printers" taintutils "k8s.io/kubernetes/pkg/util/taints" ) // TaintOptions have the data required to perform the taint operation type TaintOptions struct { - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) resources []string taintsToAdd []v1.Taint @@ -87,7 +87,7 @@ var ( func NewCmdTaint(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { options := &TaintOptions{ - PrintFlags: printers.NewPrintFlags("tainted").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("tainted").WithTypeSetter(scheme.Scheme), IOStreams: streams, } @@ -148,14 +148,9 @@ func (o *TaintOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } } - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } if len(o.resources) < 1 { diff --git a/pkg/kubectl/cmd/util/editor/BUILD b/pkg/kubectl/cmd/util/editor/BUILD index 2a0e78cf351..06cf3472f53 100644 --- a/pkg/kubectl/cmd/util/editor/BUILD +++ b/pkg/kubectl/cmd/util/editor/BUILD @@ -19,11 +19,11 @@ go_library( "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/crlf:go_default_library", "//pkg/kubectl/util/term:go_default_library", - "//pkg/printers:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index a9c9251c2f4..990ae53e58a 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -46,10 +46,10 @@ import ( "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/crlf" - "k8s.io/kubernetes/pkg/printers" ) // EditOptions contains all the options for running edit cli command. @@ -57,8 +57,8 @@ type EditOptions struct { resource.FilenameOptions RecordFlags *genericclioptions.RecordFlags - PrintFlags *printers.PrintFlags - ToPrinter func(string) (printers.ResourcePrinterFunc, error) + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) Output string OutputPatch bool @@ -88,7 +88,7 @@ func NewEditOptions(editMode EditMode, ioStreams genericclioptions.IOStreams) *E EditMode: editMode, - PrintFlags: printers.NewPrintFlags("edited").WithTypeSetter(scheme.Scheme), + PrintFlags: genericclioptions.NewPrintFlags("edited").WithTypeSetter(scheme.Scheme), WindowsLineEndings: goruntime.GOOS == "windows", @@ -163,13 +163,9 @@ func (o *EditOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Comm Do() } - o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - printer, err := o.PrintFlags.ToPrinter() - if err != nil { - return nil, err - } - return printer.PrintObj, nil + return o.PrintFlags.ToPrinter() } o.CmdNamespace = cmdNamespace diff --git a/pkg/kubectl/genericclioptions/BUILD b/pkg/kubectl/genericclioptions/BUILD index 652f846874d..66867c87ec5 100644 --- a/pkg/kubectl/genericclioptions/BUILD +++ b/pkg/kubectl/genericclioptions/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -7,11 +7,15 @@ go_library( "config_flags_fake.go", "doc.go", "io_options.go", + "json_yaml_flags.go", + "name_flags.go", + "print_flags.go", "record_flags.go", ], importpath = "k8s.io/kubernetes/pkg/kubectl/genericclioptions", visibility = ["//visibility:public"], deps = [ + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", @@ -37,8 +41,22 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/kubectl/genericclioptions/printers:all-srcs", "//pkg/kubectl/genericclioptions/resource:all-srcs", ], tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = [ + "json_yaml_flags_test.go", + "name_flags_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/pkg/printers/json_yaml_flags.go b/pkg/kubectl/genericclioptions/json_yaml_flags.go similarity index 85% rename from pkg/printers/json_yaml_flags.go rename to pkg/kubectl/genericclioptions/json_yaml_flags.go index 3176e4aae21..91a86e43e9c 100644 --- a/pkg/printers/json_yaml_flags.go +++ b/pkg/kubectl/genericclioptions/json_yaml_flags.go @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers +package genericclioptions import ( "strings" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" ) // JSONYamlPrintFlags provides default flags necessary for json/yaml printing. @@ -32,15 +34,15 @@ type JSONYamlPrintFlags struct { // handling --output=(yaml|json) printing. // Returns false if the specified outputFormat does not match a supported format. // Supported Format types can be found in pkg/printers/printers.go -func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (ResourcePrinter, error) { - var printer ResourcePrinter +func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + var printer printers.ResourcePrinter outputFormat = strings.ToLower(outputFormat) switch outputFormat { case "json": - printer = &JSONPrinter{} + printer = &printers.JSONPrinter{} case "yaml": - printer = &YAMLPrinter{} + printer = &printers.YAMLPrinter{} default: return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &outputFormat} } diff --git a/pkg/printers/json_yaml_flags_test.go b/pkg/kubectl/genericclioptions/json_yaml_flags_test.go similarity index 91% rename from pkg/printers/json_yaml_flags_test.go rename to pkg/kubectl/genericclioptions/json_yaml_flags_test.go index 2582b8c0894..7ab5b7f1dcb 100644 --- a/pkg/printers/json_yaml_flags_test.go +++ b/pkg/kubectl/genericclioptions/json_yaml_flags_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers_test +package genericclioptions import ( "bytes" @@ -23,7 +23,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/printers" ) func TestPrinterSupportsExpectedJSONYamlFormats(t *testing.T) { @@ -62,16 +61,16 @@ func TestPrinterSupportsExpectedJSONYamlFormats(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - printFlags := printers.JSONYamlPrintFlags{} + printFlags := JSONYamlPrintFlags{} p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { - if !printers.IsNoCompatiblePrinterError(err) { + if !IsNoCompatiblePrinterError(err) { t.Fatalf("expected no printer matches for output format %q", tc.outputFormat) } return } - if printers.IsNoCompatiblePrinterError(err) { + if IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", tc.outputFormat) } if err != nil { diff --git a/pkg/printers/name_flags.go b/pkg/kubectl/genericclioptions/name_flags.go similarity index 90% rename from pkg/printers/name_flags.go rename to pkg/kubectl/genericclioptions/name_flags.go index 1f8aad188c6..5b103e73dd7 100644 --- a/pkg/printers/name_flags.go +++ b/pkg/kubectl/genericclioptions/name_flags.go @@ -14,13 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers +package genericclioptions import ( "fmt" "strings" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" ) // NamePrintFlags provides default flags necessary for printing @@ -42,8 +44,8 @@ func (f *NamePrintFlags) Complete(successTemplate string) error { // handling --output=name printing. // Returns false if the specified outputFormat does not match a supported format. // Supported format types can be found in pkg/printers/printers.go -func (f *NamePrintFlags) ToPrinter(outputFormat string) (ResourcePrinter, error) { - namePrinter := &NamePrinter{ +func (f *NamePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + namePrinter := &printers.NamePrinter{ Operation: f.Operation, } diff --git a/pkg/printers/name_flags_test.go b/pkg/kubectl/genericclioptions/name_flags_test.go similarity index 93% rename from pkg/printers/name_flags_test.go rename to pkg/kubectl/genericclioptions/name_flags_test.go index 5b2267c505a..6f64fad9498 100644 --- a/pkg/printers/name_flags_test.go +++ b/pkg/kubectl/genericclioptions/name_flags_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers_test +package genericclioptions import ( "bytes" @@ -23,7 +23,6 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/printers" ) func TestNamePrinterSupportsExpectedFormats(t *testing.T) { @@ -78,18 +77,18 @@ func TestNamePrinterSupportsExpectedFormats(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - printFlags := printers.NamePrintFlags{ + printFlags := NamePrintFlags{ Operation: tc.operation, } p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { - if !printers.IsNoCompatiblePrinterError(err) { + if !IsNoCompatiblePrinterError(err) { t.Fatalf("expected no printer matches for output format %q", tc.outputFormat) } return } - if printers.IsNoCompatiblePrinterError(err) { + if IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match name printer for output format %q", tc.outputFormat) } diff --git a/pkg/printers/flags.go b/pkg/kubectl/genericclioptions/print_flags.go similarity index 72% rename from pkg/printers/flags.go rename to pkg/kubectl/genericclioptions/print_flags.go index 83bbc228a69..981f3697f07 100644 --- a/pkg/printers/flags.go +++ b/pkg/kubectl/genericclioptions/print_flags.go @@ -14,28 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers +package genericclioptions import ( "fmt" - "strings" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" ) -var ( - internalObjectPrinterErr = "a versioned object must be passed to a printer" - - // disallowedPackagePrefixes contains regular expression templates - // for object package paths that are not allowed by printers. - disallowedPackagePrefixes = []string{ - "k8s.io/kubernetes/pkg/apis/", - } -) - -var internalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes} - type NoCompatiblePrinterError struct { OutputFormat *string Options interface{} @@ -59,14 +47,6 @@ func IsNoCompatiblePrinterError(err error) bool { return ok } -func IsInternalObjectError(err error) bool { - if err == nil { - return false - } - - return err.Error() == internalObjectPrinterErr -} - // PrintFlags composes common printer flag structs // used across all commands, and provides a method // of retrieving a known printer based on flag values provided. @@ -74,7 +54,7 @@ type PrintFlags struct { JSONYamlPrintFlags *JSONYamlPrintFlags NamePrintFlags *NamePrintFlags - TypeSetterPrinter *TypeSetterPrinter + TypeSetterPrinter *printers.TypeSetterPrinter OutputFormat *string } @@ -83,7 +63,7 @@ func (f *PrintFlags) Complete(successTemplate string) error { return f.NamePrintFlags.Complete(successTemplate) } -func (f *PrintFlags) ToPrinter() (ResourcePrinter, error) { +func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { outputFormat := "" if f.OutputFormat != nil { outputFormat = *f.OutputFormat @@ -121,7 +101,7 @@ func (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags { // WithTypeSetter sets a wrapper than will surround the returned printer with a printer to type resources func (f *PrintFlags) WithTypeSetter(scheme *runtime.Scheme) *PrintFlags { - f.TypeSetterPrinter = NewTypeSetter(scheme) + f.TypeSetterPrinter = printers.NewTypeSetter(scheme) return f } @@ -135,22 +115,3 @@ func NewPrintFlags(operation string) *PrintFlags { NamePrintFlags: NewNamePrintFlags(operation), } } - -// illegalPackageSourceChecker compares a given -// object's package path, and determines if the -// object originates from a disallowed source. -type illegalPackageSourceChecker struct { - // disallowedPrefixes is a slice of disallowed package path - // prefixes for a given runtime.Object that we are printing. - disallowedPrefixes []string -} - -func (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool { - for _, forbiddenPrefix := range c.disallowedPrefixes { - if strings.HasPrefix(pkgPath, forbiddenPrefix) { - return true - } - } - - return false -} diff --git a/pkg/kubectl/genericclioptions/printers/BUILD b/pkg/kubectl/genericclioptions/printers/BUILD new file mode 100644 index 00000000000..f62c55053ea --- /dev/null +++ b/pkg/kubectl/genericclioptions/printers/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "interface.go", + "json.go", + "name.go", + "sourcechecker.go", + "typesetter.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["sourcechecker_test.go"], + embed = [":go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubectl/genericclioptions/printers/interface.go b/pkg/kubectl/genericclioptions/printers/interface.go new file mode 100644 index 00000000000..a42a136a98d --- /dev/null +++ b/pkg/kubectl/genericclioptions/printers/interface.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" +) + +// ResourcePrinterFunc is a function that can print objects +type ResourcePrinterFunc func(runtime.Object, io.Writer) error + +// ResourcePrinter is an interface that knows how to print runtime objects. +type ResourcePrinter interface { + // Print receives a runtime object, formats it and prints it to a writer. + PrintObj(runtime.Object, io.Writer) error +} diff --git a/pkg/printers/json.go b/pkg/kubectl/genericclioptions/printers/json.go similarity index 93% rename from pkg/printers/json.go rename to pkg/kubectl/genericclioptions/printers/json.go index ec52bd6d2af..1df9a864665 100644 --- a/pkg/printers/json.go +++ b/pkg/kubectl/genericclioptions/printers/json.go @@ -36,8 +36,8 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we use reflect.Indirect here in order to obtain the actual value from a pointer. // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if internalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(internalObjectPrinterErr) + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) } switch obj := obj.(type) { @@ -78,8 +78,8 @@ func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we use reflect.Indirect here in order to obtain the actual value from a pointer. // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if internalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(internalObjectPrinterErr) + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) } switch obj := obj.(type) { diff --git a/pkg/printers/name.go b/pkg/kubectl/genericclioptions/printers/name.go similarity index 97% rename from pkg/printers/name.go rename to pkg/kubectl/genericclioptions/printers/name.go index 5aa694bfa9c..d04c5c6bbc7 100644 --- a/pkg/printers/name.go +++ b/pkg/kubectl/genericclioptions/printers/name.go @@ -45,8 +45,8 @@ func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we use reflect.Indirect here in order to obtain the actual value from a pointer. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. // we need an actual value in order to retrieve the package path for an object. - if internalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(internalObjectPrinterErr) + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) } if meta.IsListType(obj) { diff --git a/pkg/kubectl/genericclioptions/printers/sourcechecker.go b/pkg/kubectl/genericclioptions/printers/sourcechecker.go new file mode 100644 index 00000000000..11d672cc910 --- /dev/null +++ b/pkg/kubectl/genericclioptions/printers/sourcechecker.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "strings" +) + +var ( + InternalObjectPrinterErr = "a versioned object must be passed to a printer" + + // disallowedPackagePrefixes contains regular expression templates + // for object package paths that are not allowed by printers. + disallowedPackagePrefixes = []string{ + "k8s.io/kubernetes/pkg/apis/", + } +) + +var InternalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes} + +func IsInternalObjectError(err error) bool { + if err == nil { + return false + } + + return err.Error() == InternalObjectPrinterErr +} + +// illegalPackageSourceChecker compares a given +// object's package path, and determines if the +// object originates from a disallowed source. +type illegalPackageSourceChecker struct { + // disallowedPrefixes is a slice of disallowed package path + // prefixes for a given runtime.Object that we are printing. + disallowedPrefixes []string +} + +func (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool { + for _, forbiddenPrefix := range c.disallowedPrefixes { + if strings.HasPrefix(pkgPath, forbiddenPrefix) { + return true + } + } + + return false +} diff --git a/pkg/printers/flags_test.go b/pkg/kubectl/genericclioptions/printers/sourcechecker_test.go similarity index 100% rename from pkg/printers/flags_test.go rename to pkg/kubectl/genericclioptions/printers/sourcechecker_test.go diff --git a/pkg/printers/typesetter.go b/pkg/kubectl/genericclioptions/printers/typesetter.go similarity index 100% rename from pkg/printers/typesetter.go rename to pkg/kubectl/genericclioptions/printers/typesetter.go diff --git a/pkg/printers/BUILD b/pkg/printers/BUILD index cf60c45f983..1b2d8c111b9 100644 --- a/pkg/printers/BUILD +++ b/pkg/printers/BUILD @@ -11,25 +11,20 @@ go_library( srcs = [ "customcolumn.go", "customcolumn_flags.go", - "flags.go", "humanreadable.go", "interface.go", - "json.go", - "json_yaml_flags.go", "jsonpath.go", "jsonpath_flags.go", "kube_template_flags.go", - "name.go", - "name_flags.go", "tabwriter.go", "template.go", "template_flags.go", - "typesetter.go", ], importpath = "k8s.io/kubernetes/pkg/printers", deps = [ + "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/scheme:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -43,26 +38,6 @@ go_library( ], ) -go_test( - name = "go_default_xtest", - srcs = [ - "customcolumn_flags_test.go", - "customcolumn_test.go", - "json_yaml_flags_test.go", - "jsonpath_flags_test.go", - "name_flags_test.go", - "template_flags_test.go", - ], - deps = [ - ":go_default_library", - "//pkg/api/legacyscheme:go_default_library", - "//pkg/apis/core:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -83,13 +58,18 @@ filegroup( go_test( name = "go_default_test", srcs = [ - "flags_test.go", + "customcolumn_flags_test.go", + "customcolumn_test.go", "humanreadable_test.go", + "jsonpath_flags_test.go", + "template_flags_test.go", "template_test.go", ], embed = [":go_default_library"], deps = [ + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", + "//pkg/kubectl/genericclioptions:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", diff --git a/pkg/printers/customcolumn.go b/pkg/printers/customcolumn.go index 9aee658a2c9..45141e0cbae 100644 --- a/pkg/printers/customcolumn.go +++ b/pkg/printers/customcolumn.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/jsonpath" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" ) var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") @@ -154,8 +155,8 @@ func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error // we use reflect.Indirect here in order to obtain the actual value from a pointer. // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if internalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(internalObjectPrinterErr) + if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(printers.InternalObjectPrinterErr) } if w, found := out.(*tabwriter.Writer); !found { diff --git a/pkg/printers/customcolumn_flags.go b/pkg/printers/customcolumn_flags.go index 920835f64cd..b982942ae9b 100644 --- a/pkg/printers/customcolumn_flags.go +++ b/pkg/printers/customcolumn_flags.go @@ -23,6 +23,7 @@ import ( "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/scheme" ) @@ -39,7 +40,7 @@ type CustomColumnsPrintFlags struct { // Supported format types can be found in pkg/printers/printers.go func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (ResourcePrinter, error) { if len(templateFormat) == 0 { - return nil, NoCompatiblePrinterError{} + return nil, genericclioptions.NoCompatiblePrinterError{} } templateValue := "" @@ -63,7 +64,7 @@ func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (ResourcePrin } if _, supportedFormat := supportedFormats[templateFormat]; !supportedFormat { - return nil, NoCompatiblePrinterError{} + return nil, genericclioptions.NoCompatiblePrinterError{} } if len(templateValue) == 0 { @@ -82,8 +83,7 @@ func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (ResourcePrin return p, err } - p, err := NewCustomColumnsPrinterFromSpec(templateValue, decoder, f.NoHeaders) - return p, err + return NewCustomColumnsPrinterFromSpec(templateValue, decoder, f.NoHeaders) } // AddFlags receives a *cobra.Command reference and binds diff --git a/pkg/printers/customcolumn_flags_test.go b/pkg/printers/customcolumn_flags_test.go index 550d261fe19..1aed8744922 100644 --- a/pkg/printers/customcolumn_flags_test.go +++ b/pkg/printers/customcolumn_flags_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers_test +package printers import ( "bytes" @@ -26,7 +26,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) func TestPrinterSupportsExpectedCustomColumnFormats(t *testing.T) { @@ -94,18 +94,18 @@ func TestPrinterSupportsExpectedCustomColumnFormats(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - printFlags := printers.CustomColumnsPrintFlags{ + printFlags := CustomColumnsPrintFlags{ TemplateArgument: tc.templateArg, } p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { - if !printers.IsNoCompatiblePrinterError(err) { + if !genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected no printer matches for output format %q", tc.outputFormat) } return } - if printers.IsNoCompatiblePrinterError(err) { + if genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", tc.outputFormat) } diff --git a/pkg/printers/customcolumn_test.go b/pkg/printers/customcolumn_test.go index fd4a28c00b0..d01c9c1235f 100644 --- a/pkg/printers/customcolumn_test.go +++ b/pkg/printers/customcolumn_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers_test +package printers import ( "bytes" @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/printers" ) func TestMassageJSONPath(t *testing.T) { @@ -48,7 +47,7 @@ func TestMassageJSONPath(t *testing.T) { } for _, test := range tests { t.Run(test.input, func(t *testing.T) { - output, err := printers.RelaxedJSONPathExpression(test.input) + output, err := RelaxedJSONPathExpression(test.input) if err != nil && !test.expectErr { t.Errorf("unexpected error: %v", err) return @@ -69,7 +68,7 @@ func TestMassageJSONPath(t *testing.T) { func TestNewColumnPrinterFromSpec(t *testing.T) { tests := []struct { spec string - expectedColumns []printers.Column + expectedColumns []Column expectErr bool name string noHeaders bool @@ -97,7 +96,7 @@ func TestNewColumnPrinterFromSpec(t *testing.T) { { spec: "NAME:metadata.name,API_VERSION:apiVersion", name: "ok", - expectedColumns: []printers.Column{ + expectedColumns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -116,7 +115,7 @@ func TestNewColumnPrinterFromSpec(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - printer, err := printers.NewCustomColumnsPrinterFromSpec(test.spec, legacyscheme.Codecs.UniversalDecoder(), test.noHeaders) + printer, err := NewCustomColumnsPrinterFromSpec(test.spec, legacyscheme.Codecs.UniversalDecoder(), test.noHeaders) if test.expectErr { if err == nil { t.Errorf("[%s] unexpected non-error", test.name) @@ -164,7 +163,7 @@ const exampleTemplateTwo = `NAME API_VERSION func TestNewColumnPrinterFromTemplate(t *testing.T) { tests := []struct { spec string - expectedColumns []printers.Column + expectedColumns []Column expectErr bool name string }{ @@ -191,7 +190,7 @@ func TestNewColumnPrinterFromTemplate(t *testing.T) { { spec: exampleTemplateOne, name: "ok", - expectedColumns: []printers.Column{ + expectedColumns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -205,7 +204,7 @@ func TestNewColumnPrinterFromTemplate(t *testing.T) { { spec: exampleTemplateTwo, name: "ok-2", - expectedColumns: []printers.Column{ + expectedColumns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -220,7 +219,7 @@ func TestNewColumnPrinterFromTemplate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { reader := bytes.NewBufferString(test.spec) - printer, err := printers.NewCustomColumnsPrinterFromTemplate(reader, legacyscheme.Codecs.UniversalDecoder()) + printer, err := NewCustomColumnsPrinterFromTemplate(reader, legacyscheme.Codecs.UniversalDecoder()) if test.expectErr { if err == nil { t.Errorf("[%s] unexpected non-error", test.name) @@ -241,12 +240,12 @@ func TestNewColumnPrinterFromTemplate(t *testing.T) { func TestColumnPrint(t *testing.T) { tests := []struct { - columns []printers.Column + columns []Column obj runtime.Object expectedOutput string }{ { - columns: []printers.Column{ + columns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -258,7 +257,7 @@ foo `, }, { - columns: []printers.Column{ + columns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -276,7 +275,7 @@ bar `, }, { - columns: []printers.Column{ + columns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -292,7 +291,7 @@ foo baz `, }, { - columns: []printers.Column{ + columns: []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -315,7 +314,7 @@ foo baz for _, test := range tests { t.Run(test.expectedOutput, func(t *testing.T) { - printer := &printers.CustomColumnsPrinter{ + printer := &CustomColumnsPrinter{ Columns: test.columns, Decoder: legacyscheme.Codecs.UniversalDecoder(), } @@ -332,7 +331,7 @@ foo baz // this mimics how resource/get.go calls the customcolumn printer func TestIndividualPrintObjOnExistingTabWriter(t *testing.T) { - columns := []printers.Column{ + columns := []Column{ { Header: "NAME", FieldSpec: "{.metadata.name}", @@ -356,8 +355,8 @@ bar bar bar ` buffer := &bytes.Buffer{} - tabWriter := printers.GetNewTabWriter(buffer) - printer := &printers.CustomColumnsPrinter{ + tabWriter := GetNewTabWriter(buffer) + printer := &CustomColumnsPrinter{ Columns: columns, Decoder: legacyscheme.Codecs.UniversalDecoder(), } diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index c28c99c0949..8ad635a01ef 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -27,6 +27,8 @@ go_test( "//pkg/apis/storage:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", + "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/printers:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 2c6a1c3c577..a3739cf3ea1 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -50,6 +50,8 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/storage" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + genericprinters "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/printers" ) @@ -255,11 +257,11 @@ func testPrinter(t *testing.T, printer printers.ResourcePrinter, unmarshalFunc f } func TestYAMLPrinter(t *testing.T) { - testPrinter(t, printers.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&printers.YAMLPrinter{}), yaml.Unmarshal) + testPrinter(t, genericprinters.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&genericprinters.YAMLPrinter{}), yaml.Unmarshal) } func TestJSONPrinter(t *testing.T) { - testPrinter(t, printers.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&printers.JSONPrinter{}), json.Unmarshal) + testPrinter(t, genericprinters.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&genericprinters.JSONPrinter{}), json.Unmarshal) } func TestFormatResourceName(t *testing.T) { @@ -385,7 +387,7 @@ func TestNamePrinter(t *testing.T) { "pod/bar\n"}, } - printFlags := printers.NewPrintFlags("").WithTypeSetter(legacyscheme.Scheme).WithDefaultOutput("name") + printFlags := genericclioptions.NewPrintFlags("").WithTypeSetter(legacyscheme.Scheme).WithDefaultOutput("name") printer, err := printFlags.ToPrinter() if err != nil { t.Fatalf("unexpected err: %v", err) @@ -548,8 +550,9 @@ func TestPrinters(t *testing.T) { } genericPrinters := map[string]printers.ResourcePrinter{ - "json": printers.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&printers.JSONPrinter{}), - "yaml": printers.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&printers.YAMLPrinter{}), + // TODO(juanvallejo): move "generic printer" tests to pkg/kubectl/genericclioptions/printers + "json": genericprinters.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&genericprinters.JSONPrinter{}), + "yaml": genericprinters.NewTypeSetter(legacyscheme.Scheme).ToPrinter(&genericprinters.YAMLPrinter{}), "template": templatePrinter, "template2": templatePrinter2, "jsonpath": jsonpathPrinter, diff --git a/pkg/printers/jsonpath.go b/pkg/printers/jsonpath.go index cc48f538a7a..b8ea553f513 100644 --- a/pkg/printers/jsonpath.go +++ b/pkg/printers/jsonpath.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/jsonpath" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" ) // exists returns true if it would be possible to call the index function @@ -118,8 +119,8 @@ func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // we use reflect.Indirect here in order to obtain the actual value from a pointer. // we need an actual value in order to retrieve the package path for an object. // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. - if internalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(internalObjectPrinterErr) + if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(printers.InternalObjectPrinterErr) } var queryObj interface{} = obj diff --git a/pkg/printers/jsonpath_flags.go b/pkg/printers/jsonpath_flags.go index 636d6cd0158..49a559d6248 100644 --- a/pkg/printers/jsonpath_flags.go +++ b/pkg/printers/jsonpath_flags.go @@ -22,6 +22,8 @@ import ( "strings" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) // JSONPathPrintFlags provides default flags necessary for template printing. @@ -39,7 +41,7 @@ type JSONPathPrintFlags struct { // Returns false if the specified templateFormat does not match a template format. func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (ResourcePrinter, error) { if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { - return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} } templateValue := "" @@ -66,7 +68,7 @@ func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (ResourcePrinter, } if _, supportedFormat := templateFormats[templateFormat]; !supportedFormat { - return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} } if len(templateValue) == 0 { diff --git a/pkg/printers/jsonpath_flags_test.go b/pkg/printers/jsonpath_flags_test.go index a84241b14bd..2f5b6c4c7c2 100644 --- a/pkg/printers/jsonpath_flags_test.go +++ b/pkg/printers/jsonpath_flags_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers_test +package printers import ( "bytes" @@ -26,7 +26,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) func TestPrinterSupportsExpectedJSONPathFormats(t *testing.T) { @@ -99,18 +99,18 @@ func TestPrinterSupportsExpectedJSONPathFormats(t *testing.T) { templateArg = nil } - printFlags := printers.JSONPathPrintFlags{ + printFlags := JSONPathPrintFlags{ TemplateArgument: templateArg, } p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { - if !printers.IsNoCompatiblePrinterError(err) { + if !genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected no printer matches for output format %q", tc.outputFormat) } return } - if printers.IsNoCompatiblePrinterError(err) { + if genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", tc.outputFormat) } @@ -177,14 +177,14 @@ func TestJSONPathPrinterDefaultsAllowMissingKeysToTrue(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - printFlags := printers.JSONPathPrintFlags{ + printFlags := JSONPathPrintFlags{ TemplateArgument: &tc.templateArg, AllowMissingKeys: tc.allowMissingKeys, } outputFormat := "jsonpath" p, err := printFlags.ToPrinter(outputFormat) - if printers.IsNoCompatiblePrinterError(err) { + if genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", outputFormat) } if err != nil { diff --git a/pkg/printers/kube_template_flags.go b/pkg/printers/kube_template_flags.go index 691c467a704..a5b0c71326c 100644 --- a/pkg/printers/kube_template_flags.go +++ b/pkg/printers/kube_template_flags.go @@ -16,7 +16,11 @@ limitations under the License. package printers -import "github.com/spf13/cobra" +import ( + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" +) // KubeTemplatePrintFlags composes print flags that provide both a JSONPath and a go-template printer. // This is necessary if dealing with cases that require support both both printers, since both sets of flags @@ -30,7 +34,7 @@ type KubeTemplatePrintFlags struct { } func (f *KubeTemplatePrintFlags) ToPrinter(outputFormat string) (ResourcePrinter, error) { - if p, err := f.JSONPathPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + if p, err := f.JSONPathPrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { return p, err } return f.GoTemplatePrintFlags.ToPrinter(outputFormat) diff --git a/pkg/printers/template.go b/pkg/printers/template.go index 6b2a7e2bddf..678b46e3ba4 100644 --- a/pkg/printers/template.go +++ b/pkg/printers/template.go @@ -25,6 +25,7 @@ import ( "text/template" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" ) // GoTemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. @@ -60,8 +61,8 @@ func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) { // PrintObj formats the obj with the Go Template. func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if internalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { - return fmt.Errorf(internalObjectPrinterErr) + if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(printers.InternalObjectPrinterErr) } var data []byte diff --git a/pkg/printers/template_flags.go b/pkg/printers/template_flags.go index 6d713e415fa..0e6d825895d 100644 --- a/pkg/printers/template_flags.go +++ b/pkg/printers/template_flags.go @@ -22,6 +22,8 @@ import ( "strings" "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) // GoTemplatePrintFlags provides default flags necessary for template printing. @@ -39,7 +41,7 @@ type GoTemplatePrintFlags struct { // Returns false if the specified templateFormat does not match a template format. func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (ResourcePrinter, error) { if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { - return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} } templateValue := "" @@ -68,7 +70,7 @@ func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (ResourcePrinter } if _, supportedFormat := supportedFormats[templateFormat]; !supportedFormat { - return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + return nil, genericclioptions.NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} } if len(templateValue) == 0 { diff --git a/pkg/printers/template_flags_test.go b/pkg/printers/template_flags_test.go index f75e51f033b..3a2400ca879 100644 --- a/pkg/printers/template_flags_test.go +++ b/pkg/printers/template_flags_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package printers_test +package printers import ( "bytes" @@ -26,7 +26,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/printers" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) func TestPrinterSupportsExpectedTemplateFormats(t *testing.T) { @@ -99,18 +99,18 @@ func TestPrinterSupportsExpectedTemplateFormats(t *testing.T) { templateArg = nil } - printFlags := printers.GoTemplatePrintFlags{ + printFlags := GoTemplatePrintFlags{ TemplateArgument: templateArg, } p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { - if !printers.IsNoCompatiblePrinterError(err) { + if !genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected no printer matches for output format %q", tc.outputFormat) } return } - if printers.IsNoCompatiblePrinterError(err) { + if genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", tc.outputFormat) } @@ -171,14 +171,14 @@ func TestTemplatePrinterDefaultsAllowMissingKeysToTrue(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - printFlags := printers.GoTemplatePrintFlags{ + printFlags := GoTemplatePrintFlags{ TemplateArgument: &tc.templateArg, AllowMissingKeys: tc.allowMissingKeys, } outputFormat := "template" p, err := printFlags.ToPrinter(outputFormat) - if printers.IsNoCompatiblePrinterError(err) { + if genericclioptions.IsNoCompatiblePrinterError(err) { t.Fatalf("expected to match template printer for output format %q", outputFormat) } if err != nil { From e911418242006d52db5df762b365c245573c243f Mon Sep 17 00:00:00 2001 From: Benjamin Elder Date: Mon, 21 May 2018 12:03:52 -0700 Subject: [PATCH 063/307] correct test logging package stackdrvier -> stackdriver --- test/e2e/instrumentation/logging/BUILD | 4 ++-- test/e2e/instrumentation/logging/imports.go | 2 +- .../logging/{stackdrvier => stackdriver}/BUILD | 2 +- .../logging/{stackdrvier => stackdriver}/basic.go | 0 .../logging/{stackdrvier => stackdriver}/soak.go | 0 .../logging/{stackdrvier => stackdriver}/utils.go | 0 6 files changed, 4 insertions(+), 4 deletions(-) rename test/e2e/instrumentation/logging/{stackdrvier => stackdriver}/BUILD (98%) rename test/e2e/instrumentation/logging/{stackdrvier => stackdriver}/basic.go (100%) rename test/e2e/instrumentation/logging/{stackdrvier => stackdriver}/soak.go (100%) rename test/e2e/instrumentation/logging/{stackdrvier => stackdriver}/utils.go (100%) diff --git a/test/e2e/instrumentation/logging/BUILD b/test/e2e/instrumentation/logging/BUILD index 6d1698b4138..d68ba2aab31 100644 --- a/test/e2e/instrumentation/logging/BUILD +++ b/test/e2e/instrumentation/logging/BUILD @@ -16,7 +16,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/elasticsearch:go_default_library", - "//test/e2e/instrumentation/logging/stackdrvier:go_default_library", + "//test/e2e/instrumentation/logging/stackdriver:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", @@ -35,7 +35,7 @@ filegroup( srcs = [ ":package-srcs", "//test/e2e/instrumentation/logging/elasticsearch:all-srcs", - "//test/e2e/instrumentation/logging/stackdrvier:all-srcs", + "//test/e2e/instrumentation/logging/stackdriver:all-srcs", "//test/e2e/instrumentation/logging/utils:all-srcs", ], tags = ["automanaged"], diff --git a/test/e2e/instrumentation/logging/imports.go b/test/e2e/instrumentation/logging/imports.go index a3621e7bdc9..aaf73d90dfa 100644 --- a/test/e2e/instrumentation/logging/imports.go +++ b/test/e2e/instrumentation/logging/imports.go @@ -18,5 +18,5 @@ package logging import ( _ "k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch" - _ "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier" + _ "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver" ) diff --git a/test/e2e/instrumentation/logging/stackdrvier/BUILD b/test/e2e/instrumentation/logging/stackdriver/BUILD similarity index 98% rename from test/e2e/instrumentation/logging/stackdrvier/BUILD rename to test/e2e/instrumentation/logging/stackdriver/BUILD index b9782f2623a..3c9b7951e09 100644 --- a/test/e2e/instrumentation/logging/stackdrvier/BUILD +++ b/test/e2e/instrumentation/logging/stackdriver/BUILD @@ -12,7 +12,7 @@ go_library( "soak.go", "utils.go", ], - importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier", + importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver", deps = [ "//test/e2e/framework:go_default_library", "//test/e2e/instrumentation/common:go_default_library", diff --git a/test/e2e/instrumentation/logging/stackdrvier/basic.go b/test/e2e/instrumentation/logging/stackdriver/basic.go similarity index 100% rename from test/e2e/instrumentation/logging/stackdrvier/basic.go rename to test/e2e/instrumentation/logging/stackdriver/basic.go diff --git a/test/e2e/instrumentation/logging/stackdrvier/soak.go b/test/e2e/instrumentation/logging/stackdriver/soak.go similarity index 100% rename from test/e2e/instrumentation/logging/stackdrvier/soak.go rename to test/e2e/instrumentation/logging/stackdriver/soak.go diff --git a/test/e2e/instrumentation/logging/stackdrvier/utils.go b/test/e2e/instrumentation/logging/stackdriver/utils.go similarity index 100% rename from test/e2e/instrumentation/logging/stackdrvier/utils.go rename to test/e2e/instrumentation/logging/stackdriver/utils.go From af69af2d3335803ceda21e33aac78b62d036062f Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Mon, 21 May 2018 19:58:50 +0000 Subject: [PATCH 064/307] Update CHANGELOG-1.9.md for v1.9.8. --- CHANGELOG-1.9.md | 165 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 120 insertions(+), 45 deletions(-) diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index 83c51c305d4..5180bda0f32 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -1,60 +1,67 @@ -- [v1.9.7](#v197) - - [Downloads for v1.9.7](#downloads-for-v197) +- [v1.9.8](#v198) + - [Downloads for v1.9.8](#downloads-for-v198) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.9.6](#changelog-since-v196) - - [Action Required](#action-required) + - [Changelog since v1.9.7](#changelog-since-v197) - [Other notable changes](#other-notable-changes) -- [v1.9.6](#v196) - - [Downloads for v1.9.6](#downloads-for-v196) +- [v1.9.7](#v197) + - [Downloads for v1.9.7](#downloads-for-v197) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.9.5](#changelog-since-v195) + - [Changelog since v1.9.6](#changelog-since-v196) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-1) -- [v1.9.5](#v195) - - [Downloads for v1.9.5](#downloads-for-v195) +- [v1.9.6](#v196) + - [Downloads for v1.9.6](#downloads-for-v196) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.9.4](#changelog-since-v194) + - [Changelog since v1.9.5](#changelog-since-v195) - [Other notable changes](#other-notable-changes-2) -- [v1.9.4](#v194) - - [Downloads for v1.9.4](#downloads-for-v194) +- [v1.9.5](#v195) + - [Downloads for v1.9.5](#downloads-for-v195) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.9.3](#changelog-since-v193) + - [Changelog since v1.9.4](#changelog-since-v194) - [Other notable changes](#other-notable-changes-3) -- [v1.9.3](#v193) - - [Downloads for v1.9.3](#downloads-for-v193) +- [v1.9.4](#v194) + - [Downloads for v1.9.4](#downloads-for-v194) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.9.2](#changelog-since-v192) - - [Action Required](#action-required-1) + - [Changelog since v1.9.3](#changelog-since-v193) - [Other notable changes](#other-notable-changes-4) -- [v1.9.2](#v192) - - [Downloads for v1.9.2](#downloads-for-v192) +- [v1.9.3](#v193) + - [Downloads for v1.9.3](#downloads-for-v193) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.9.1](#changelog-since-v191) + - [Changelog since v1.9.2](#changelog-since-v192) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-5) -- [v1.9.1](#v191) - - [Downloads for v1.9.1](#downloads-for-v191) +- [v1.9.2](#v192) + - [Downloads for v1.9.2](#downloads-for-v192) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.9.0](#changelog-since-v190) + - [Changelog since v1.9.1](#changelog-since-v191) - [Other notable changes](#other-notable-changes-6) -- [v1.9.0](#v190) - - [Downloads for v1.9.0](#downloads-for-v190) +- [v1.9.1](#v191) + - [Downloads for v1.9.1](#downloads-for-v191) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) + - [Changelog since v1.9.0](#changelog-since-v190) + - [Other notable changes](#other-notable-changes-7) +- [v1.9.0](#v190) + - [Downloads for v1.9.0](#downloads-for-v190) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [1.9 Release Notes](#19-release-notes) - [WARNING: etcd backup strongly recommended](#warning-etcd-backup-strongly-recommended) - [Introduction to 1.9.0](#introduction-to-190) @@ -142,48 +149,116 @@ - [External Dependencies](#external-dependencies) - [v1.9.0-beta.2](#v190-beta2) - [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2) - - [Client Binaries](#client-binaries-8) - - [Server Binaries](#server-binaries-8) - - [Node Binaries](#node-binaries-8) - - [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1) - - [Other notable changes](#other-notable-changes-7) -- [v1.9.0-beta.1](#v190-beta1) - - [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3) - - [Action Required](#action-required-2) + - [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1) - [Other notable changes](#other-notable-changes-8) -- [v1.9.0-alpha.3](#v190-alpha3) - - [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3) +- [v1.9.0-beta.1](#v190-beta1) + - [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2) - - [Action Required](#action-required-3) + - [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-9) -- [v1.9.0-alpha.2](#v190-alpha2) - - [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2) +- [v1.9.0-alpha.3](#v190-alpha3) + - [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - - [Changelog since v1.8.0](#changelog-since-v180) - - [Action Required](#action-required-4) + - [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-10) -- [v1.9.0-alpha.1](#v190-alpha1) - - [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1) +- [v1.9.0-alpha.2](#v190-alpha2) + - [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - [Node Binaries](#node-binaries-12) + - [Changelog since v1.8.0](#changelog-since-v180) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-11) +- [v1.9.0-alpha.1](#v190-alpha1) + - [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1) + - [Client Binaries](#client-binaries-13) + - [Server Binaries](#server-binaries-13) + - [Node Binaries](#node-binaries-13) - [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3) - [Action Required](#action-required-5) - - [Other notable changes](#other-notable-changes-11) + - [Other notable changes](#other-notable-changes-12) +# v1.9.8 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) + +## Downloads for v1.9.8 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes.tar.gz) | `de31bcccfe99b88f54ff24147be0b6c4fbc4fe46b10f81d0f05294317070e221` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-src.tar.gz) | `d349086847a22ee89dc1fba335741c670650c0b61c1648612f4a40bc0b90255e` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-darwin-386.tar.gz) | `de772fcc08bf14d21c96f55aad8812bac36b7ef61957f7dab5ba3ec0321b691f` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-darwin-amd64.tar.gz) | `6aff6127b21e14009e3aa181fc1a5932868060a32ba291170cd10a54acc2e6f4` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-linux-386.tar.gz) | `1d5429470fd7c8c451075fa1cdbcbf430510a80131ad52b45f51e5e941ffed05` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-linux-amd64.tar.gz) | `70b66cc9cd17a184a38d4a8daa5e3410f10390eb7c8745418c004cda457e609c` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-linux-arm.tar.gz) | `648cfde6590100621c1eaa2e98d072c03ba19239fbc9809d8f6b859f81d61fc1` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-linux-arm64.tar.gz) | `0966e4070d2cad96610d2a533501d1ca116016fe6a76b865d052ea72dec60a19` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-linux-ppc64le.tar.gz) | `12d4a5e572d7c8dcccd569c93b0d7eaffd8bbed0f44d78e08c062d90d3dd1eec` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-linux-s390x.tar.gz) | `53f619ad8c74cbcc0a3000d007ac98f544f7724a566c0560bd2b3d1ab123b223` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-windows-386.tar.gz) | `04bcaaecd9e72b5dcb0019314baeaa1b3915401ae82a73e616b8579fcfb8c5e7` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-client-windows-amd64.tar.gz) | `ec5d0b08daaaa101597a8bc8ff51727556ae16e26c1d71fe96dd07ba4ad63cbf` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-server-linux-amd64.tar.gz) | `13a410ad1de823a807474c29475640cb3160f78c865f74c339f9690107d6f819` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-server-linux-arm.tar.gz) | `42f625c0c6bf370a6213e498c044d8ba62d757e365cd1ec292e4f7532c2360de` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-server-linux-arm64.tar.gz) | `464fe315c21b29417b74f51c5d923787bc046d652089a6e11f3a1fd72ae66f74` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-server-linux-ppc64le.tar.gz) | `e62a7f5e2b5f9502da07b03d2c4b6670d31d320b7811e9dbf0cf7470cc2377c1` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-server-linux-s390x.tar.gz) | `11323b78190600000c92167cbd9ac000f9561545876425a9cf3ba7eab69ab132` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-node-linux-amd64.tar.gz) | `34c699bab128eb5094a3c7ca21e728d9ab7b08fe98ee9156671c7a3228344329` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-node-linux-arm.tar.gz) | `f8bb865a85d2cf2bfa0188cf250471ff6997b2b1e8bd750db39b635355568445` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-node-linux-arm64.tar.gz) | `7be2b47820b82153a7cf39abc146a59595e7156209f29a86fa6a4878ea4840c1` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-node-linux-ppc64le.tar.gz) | `1ee6402840c60b4519b8c785057e35880f78c2449e17695a82174f12144dd4e0` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-node-linux-s390x.tar.gz) | `a023e6357f05ed1314a107c6848b48b3b2cb5b562dd557b64720481288b588d8` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.8/kubernetes-node-windows-amd64.tar.gz) | `a75684e1da14453517ed7deab78453579e148341440a6b4afa5a17c59a72a459` + +## Changelog since v1.9.7 + +### Other notable changes + +* Fix a bug in scheduler cache by using Pod UID as the cache key instead of namespace/name ([#61069](https://github.com/kubernetes/kubernetes/pull/61069), [@anfernee](https://github.com/anfernee)) +* Fix in vSphere Cloud Provider to handle upgrades from kubernetes version less than v1.9.4 to v1.9.4 and above. ([#62919](https://github.com/kubernetes/kubernetes/pull/62919), [@abrarshivani](https://github.com/abrarshivani)) +* Fixes issue where subpath readOnly mounts failed ([#63045](https://github.com/kubernetes/kubernetes/pull/63045), [@msau42](https://github.com/msau42)) +* Fix in vSphere Cloud Provider to report disk is detach when VM is not found. ([#62220](https://github.com/kubernetes/kubernetes/pull/62220), [@abrarshivani](https://github.com/abrarshivani)) +* corrects a race condition in bootstrapping aggregated cluster roles in new HA clusters ([#63761](https://github.com/kubernetes/kubernetes/pull/63761), [@liggitt](https://github.com/liggitt)) +* Add MAX_PODS_PER_NODE env so that GCE/GKE user can use it to specify the default max pods per node for the cluster. IP_ALIAS_SIZE will be changed accordingly. Must have ip alias enabled. ([#63451](https://github.com/kubernetes/kubernetes/pull/63451), [@grayluck](https://github.com/grayluck)) +* Fix user visible files creation for windows ([#62375](https://github.com/kubernetes/kubernetes/pull/62375), [@feiskyer](https://github.com/feiskyer)) +* Add ipset and udevadm to the hyperkube base image. ([#61357](https://github.com/kubernetes/kubernetes/pull/61357), [@rphillips](https://github.com/rphillips)) +* Fixes bugs that make apiserver panic when aggregating valid but not well formed OpenAPI spec ([#63626](https://github.com/kubernetes/kubernetes/pull/63626), [@roycaihw](https://github.com/roycaihw)) +* Kubernetes version command line parameter in kubeadm has been updated to drop an unnecessary redirection from ci/latest.txt to ci-cross/latest.txt. Users should know exactly where the builds are stored on Google Cloud storage buckets from now on. For example for 1.9 and 1.10, users can specify ci/latest-1.9 and ci/latest-1.10 as the CI build jobs what build images correctly updates those. The CI jobs for master update the ci-cross/latest location, so if you are looking for latest master builds, then the correct parameter to use would be ci-cross/latest. ([#63504](https://github.com/kubernetes/kubernetes/pull/63504), [@dims](https://github.com/dims)) +* Fix issue where on re-registration of device plugin, `allocatable` was not getting updated. This issue makes devices invisible to the Kubelet if device plugin restarts. Only work-around, if this fix is not there, is to restart the kubelet and then start device plugin. ([#63118](https://github.com/kubernetes/kubernetes/pull/63118), [@vikaschoudhary16](https://github.com/vikaschoudhary16)) +* GCE: Fix for internal load balancer management resulting in backend services with outdated instance group links. ([#62887](https://github.com/kubernetes/kubernetes/pull/62887), [@nicksardo](https://github.com/nicksardo)) +* Bugfix allowing use of IP-aliases with custom-mode network in GCE setup scripts. ([#62172](https://github.com/kubernetes/kubernetes/pull/62172), [@shyamjvs](https://github.com/shyamjvs)) + + + # v1.9.7 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) From 8fc0bfd28792bee3d74b3e6409627a038e3ee23c Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Mon, 21 May 2018 15:07:08 -0400 Subject: [PATCH 065/307] remove LabelsForObject and ResolveImage from factory --- pkg/kubectl/cmd/expose.go | 4 +- pkg/kubectl/cmd/set/set_image.go | 14 ++++- pkg/kubectl/cmd/util/factory.go | 7 --- pkg/kubectl/cmd/util/factory_client_access.go | 8 --- pkg/kubectl/cmd/util/factory_test.go | 52 ------------------- 5 files changed, 13 insertions(+), 72 deletions(-) diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index b35ceba4284..6224236a6f4 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -96,7 +96,6 @@ type ExposeServiceOptions struct { MapBasedSelectorForObject func(runtime.Object) (string, error) PortsForObject func(runtime.Object) ([]string, error) ProtocolsForObject func(runtime.Object) (map[string]string, error) - LabelsForObject func(runtime.Object) (map[string]string, error) Namespace string Mapper meta.RESTMapper @@ -200,7 +199,6 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e if err != nil { return err } - o.LabelsForObject = f.LabelsForObject o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() if err != nil { @@ -294,7 +292,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro } if kubectl.IsZero(params["labels"]) { - labels, err := o.LabelsForObject(info.Object) + labels, err := meta.NewAccessor().Labels(info.Object) if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 76dd52646b7..564dc94fafe 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -49,7 +49,7 @@ type SetImageOptions struct { All bool Output string Local bool - ResolveImage func(in string) (string, error) + ResolveImage ImageResolver PrintObj printers.ResourcePrinterFunc Recorder genericclioptions.Recorder @@ -137,7 +137,7 @@ func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.UpdatePodSpecForObject = f.UpdatePodSpecForObject o.DryRun = cmdutil.GetDryRunFlag(cmd) o.Output = cmdutil.GetFlagString(cmd, "output") - o.ResolveImage = f.ResolveImage + o.ResolveImage = resolveImageFunc if o.DryRun { o.PrintFlags.Complete("%s (dry run)") @@ -309,3 +309,13 @@ func hasWildcardKey(containerImages map[string]string) bool { _, ok := containerImages["*"] return ok } + +// ImageResolver is a func that receives an image name, and +// resolves it to an appropriate / compatible image name. +// Adds flexibility for future image resolving methods. +type ImageResolver func(in string) (string, error) + +// implements ImageResolver +func resolveImageFunc(in string) (string, error) { + return in, nil +} diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 8f867553028..f630b1db224 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -91,8 +91,6 @@ type ClientAccessFactory interface { PortsForObject(object runtime.Object) ([]string, error) // ProtocolsForObject returns the mapping associated with the provided object ProtocolsForObject(object runtime.Object) (map[string]string, error) - // LabelsForObject returns the labels associated with the provided object - LabelsForObject(object runtime.Object) (map[string]string, error) // Command will stringify and return all environment arguments ie. a command run by a client // using the factory. @@ -110,11 +108,6 @@ type ClientAccessFactory interface { // in case the object is already resumed. Resumer(info *resource.Info) ([]byte, error) - // ResolveImage resolves the image names. For kubernetes this function is just - // passthrough but it allows to perform more sophisticated image name resolving for - // third-party vendors. - ResolveImage(imageName string) (string, error) - // Returns the default namespace to use in cases where no // other namespace is specified and whether the namespace was // overridden. diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 55ffc2bb3cb..3ea7e5c867b 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -260,10 +260,6 @@ func (f *ring0Factory) ProtocolsForObject(object runtime.Object) (map[string]str } } -func (f *ring0Factory) LabelsForObject(object runtime.Object) (map[string]string, error) { - return meta.NewAccessor().Labels(object) -} - // Set showSecrets false to filter out stuff like secrets. func (f *ring0Factory) Command(cmd *cobra.Command, showSecrets bool) string { if len(os.Args) == 0 { @@ -318,10 +314,6 @@ func (f *ring0Factory) Pauser(info *resource.Info) ([]byte, error) { } } -func (f *ring0Factory) ResolveImage(name string) (string, error) { - return name, nil -} - func (f *ring0Factory) Resumer(info *resource.Info) ([]byte, error) { switch obj := info.Object.(type) { case *extensions.Deployment: diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index ba43f58a37e..8f9b120dde4 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -21,7 +21,6 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" api "k8s.io/kubernetes/pkg/apis/core" @@ -98,57 +97,6 @@ func TestProtocolsForObject(t *testing.T) { } } -func TestLabelsForObject(t *testing.T) { - f := NewFactory(genericclioptions.NewTestConfigFlags()) - - tests := []struct { - name string - object runtime.Object - expected string - err error - }{ - { - name: "successful re-use of labels", - object: &api.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", Labels: map[string]string{"svc": "test"}}, - TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, - }, - expected: "svc=test", - err: nil, - }, - { - name: "empty labels", - object: &api.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test", Labels: map[string]string{}}, - TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, - }, - expected: "", - err: nil, - }, - { - name: "nil labels", - object: &api.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "zen", Namespace: "test", Labels: nil}, - TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, - }, - expected: "", - err: nil, - }, - } - - for _, test := range tests { - gotLabels, err := f.LabelsForObject(test.object) - if err != test.err { - t.Fatalf("%s: Error mismatch: Expected %v, got %v", test.name, test.err, err) - } - got := kubectl.MakeLabels(gotLabels) - if test.expected != got { - t.Fatalf("%s: Labels mismatch! Expected %s, got %s", test.name, test.expected, got) - } - - } -} - func TestCanBeExposed(t *testing.T) { factory := NewFactory(genericclioptions.NewTestConfigFlags()) tests := []struct { From 5adee740008a65907bfd8d6e3c3275e24f23f27d Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Thu, 17 May 2018 20:39:21 +0300 Subject: [PATCH 066/307] kubeadm-upgrade: small improvements to diff 1) Store the io.Writer and pass it to sub-commands in upgrade.go 2) Check if the manifest path is an empty string in diff.go:runDiff() 3) Use the io.Writer that upgrade.go defines instead of writing to os.Stdout directly. --- cmd/kubeadm/app/cmd/upgrade/diff.go | 7 +++++-- cmd/kubeadm/app/cmd/upgrade/upgrade.go | 2 ++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/kubeadm/app/cmd/upgrade/diff.go b/cmd/kubeadm/app/cmd/upgrade/diff.go index 957b05f72ee..cf87756c678 100644 --- a/cmd/kubeadm/app/cmd/upgrade/diff.go +++ b/cmd/kubeadm/app/cmd/upgrade/diff.go @@ -17,8 +17,8 @@ limitations under the License. package upgrade import ( + "fmt" "io/ioutil" - "os" "github.com/golang/glog" "github.com/pmezard/go-difflib/difflib" @@ -119,6 +119,9 @@ func runDiff(flags *diffFlags, args []string) error { if err != nil { return err } + if path == "" { + return fmt.Errorf("empty manifest path") + } existingManifest, err := ioutil.ReadFile(path) if err != nil { return err @@ -133,7 +136,7 @@ func runDiff(flags *diffFlags, args []string) error { Context: flags.contextLines, } - difflib.WriteUnifiedDiff(os.Stdout, diff) + difflib.WriteUnifiedDiff(flags.parent.out, diff) } return nil } diff --git a/cmd/kubeadm/app/cmd/upgrade/upgrade.go b/cmd/kubeadm/app/cmd/upgrade/upgrade.go index 1993e67a22e..a7928aa01bc 100644 --- a/cmd/kubeadm/app/cmd/upgrade/upgrade.go +++ b/cmd/kubeadm/app/cmd/upgrade/upgrade.go @@ -37,6 +37,7 @@ type cmdUpgradeFlags struct { skipPreFlight bool ignorePreflightErrors []string ignorePreflightErrorsSet sets.String + out io.Writer } // NewCmdUpgrade returns the cobra command for `kubeadm upgrade` @@ -50,6 +51,7 @@ func NewCmdUpgrade(out io.Writer) *cobra.Command { printConfig: false, skipPreFlight: false, ignorePreflightErrorsSet: sets.NewString(), + out: out, } cmd := &cobra.Command{ From 6469c8e333c01b2cccd7d8f918453db8abfd35b8 Mon Sep 17 00:00:00 2001 From: Ryan Phillips Date: Tue, 8 May 2018 14:12:20 -0500 Subject: [PATCH 067/307] kubelet: fix checkpoint manager logic bug on restore --- pkg/kubelet/config/BUILD | 2 ++ pkg/kubelet/config/config.go | 26 +++++++++++++-------- pkg/kubelet/config/config_test.go | 39 +++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 10 deletions(-) diff --git a/pkg/kubelet/config/BUILD b/pkg/kubelet/config/BUILD index 37e72d75e2a..6985adfdd15 100644 --- a/pkg/kubelet/config/BUILD +++ b/pkg/kubelet/config/BUILD @@ -108,6 +108,8 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", "//pkg/apis/core/validation:go_default_library", + "//pkg/kubelet/checkpoint:go_default_library", + "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 6ffb448254b..51c2e29c546 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -113,17 +113,20 @@ func (c *PodConfig) Sync() { // Restore restores pods from the checkpoint path, *once* func (c *PodConfig) Restore(path string, updates chan<- interface{}) error { - var err error - if c.checkpointManager == nil { - c.checkpointManager, err = checkpointmanager.NewCheckpointManager(path) - if err != nil { - pods, err := checkpoint.LoadPods(c.checkpointManager) - if err == nil { - updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.RESTORE, Source: kubetypes.ApiserverSource} - } - } + if c.checkpointManager != nil { + return nil } - return err + var err error + c.checkpointManager, err = checkpointmanager.NewCheckpointManager(path) + if err != nil { + return err + } + pods, err := checkpoint.LoadPods(c.checkpointManager) + if err != nil { + return err + } + updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.RESTORE, Source: kubetypes.ApiserverSource} + return nil } // podStorage manages the current pod state at any point in time and ensures updates @@ -311,6 +314,9 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } case kubetypes.RESTORE: glog.V(4).Infof("Restoring pods for source %s", source) + for _, value := range update.Pods { + restorePods = append(restorePods, value) + } default: glog.Warningf("Received invalid update type: %v", update) diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index deb1f4dcaf3..f41542a9a6c 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -17,7 +17,9 @@ limitations under the License. package config import ( + "io/ioutil" "math/rand" + "os" "reflect" "sort" "strconv" @@ -30,6 +32,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/kubelet/checkpoint" + "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/securitycontext" ) @@ -85,6 +90,14 @@ func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*v1.Pod) return kubetypes.PodUpdate{Pods: pods, Op: op, Source: source} } +func createPodConfigTesterByChannel(mode PodConfigNotificationMode, channelName string) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) { + eventBroadcaster := record.NewBroadcaster() + config := NewPodConfig(mode, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"})) + channel := config.Channel(channelName) + ch := config.Updates() + return channel, ch, config +} + func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) { eventBroadcaster := record.NewBroadcaster() config := NewPodConfig(mode, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"})) @@ -413,3 +426,29 @@ func TestPodUpdateLabels(t *testing.T) { expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod)) } + +func TestPodRestore(t *testing.T) { + tmpDir, _ := ioutil.TempDir("", "") + defer os.RemoveAll(tmpDir) + + pod := CreateValidPod("api-server", "kube-default") + pod.Annotations = make(map[string]string, 0) + pod.Annotations["kubernetes.io/config.source"] = kubetypes.ApiserverSource + pod.Annotations[core.BootstrapCheckpointAnnotationKey] = "true" + + // Create Checkpointer + checkpointManager, err := checkpointmanager.NewCheckpointManager(tmpDir) + if err != nil { + t.Fatalf("failed to initialize checkpoint manager: %v", err) + } + if err := checkpoint.WritePod(checkpointManager, pod); err != nil { + t.Fatalf("Error writing checkpoint for pod: %v", pod.GetName()) + } + + // Restore checkpoint + channel, ch, config := createPodConfigTesterByChannel(PodConfigNotificationIncremental, kubetypes.ApiserverSource) + if err := config.Restore(tmpDir, channel); err != nil { + t.Fatalf("Restore returned error: %v", err) + } + expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.RESTORE, kubetypes.ApiserverSource, pod)) +} From f93d064e931bd85b5a39a5c7d9b526adca32f56f Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Thu, 17 May 2018 20:45:17 +0300 Subject: [PATCH 068/307] kubeadm-upgrade: add unit tests for the diff command Add the file diff_test.go, which has a single test: TestRunDiff The test covers most error cases for the runDiff() function, and also performs a valid diff. A couple of test files are added in: cmd/kubeadm/app/cmd/upgrade/testdata/ --- cmd/kubeadm/app/cmd/upgrade/BUILD | 2 + cmd/kubeadm/app/cmd/upgrade/diff_test.go | 93 +++++++++++++++++++ .../upgrade/testdata/diff_dummy_manifest.yaml | 1 + .../upgrade/testdata/diff_master_config.yaml | 3 + 4 files changed, 99 insertions(+) create mode 100644 cmd/kubeadm/app/cmd/upgrade/diff_test.go create mode 100644 cmd/kubeadm/app/cmd/upgrade/testdata/diff_dummy_manifest.yaml create mode 100644 cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index f22be4b4435..201c49acfaf 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -44,8 +44,10 @@ go_test( srcs = [ "apply_test.go", "common_test.go", + "diff_test.go", "plan_test.go", ], + data = glob(["testdata/**"]), embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", diff --git a/cmd/kubeadm/app/cmd/upgrade/diff_test.go b/cmd/kubeadm/app/cmd/upgrade/diff_test.go new file mode 100644 index 00000000000..2c7e2834399 --- /dev/null +++ b/cmd/kubeadm/app/cmd/upgrade/diff_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upgrade + +import ( + "io/ioutil" + "testing" +) + +const ( + testUpgradeDiffConfig = `testdata/diff_master_config.yaml` + testUpgradeDiffManifest = `testdata/diff_dummy_manifest.yaml` +) + +func TestRunDiff(t *testing.T) { + parentFlags := &cmdUpgradeFlags{ + cfgPath: "", + out: ioutil.Discard, + } + flags := &diffFlags{ + parent: parentFlags, + } + + testCases := []struct { + name string + args []string + setManifestPath bool + manifestPath string + cfgPath string + expectedError bool + }{ + { + name: "valid: run diff on valid manifest path", + cfgPath: testUpgradeDiffConfig, + setManifestPath: true, + manifestPath: testUpgradeDiffManifest, + expectedError: false, + }, + { + name: "invalid: missing config file", + cfgPath: "missing-path-to-a-config", + expectedError: true, + }, + { + name: "invalid: valid config but empty manifest path", + cfgPath: testUpgradeDiffConfig, + setManifestPath: true, + manifestPath: "", + expectedError: true, + }, + { + name: "invalid: valid config but bad manifest path", + cfgPath: testUpgradeDiffConfig, + setManifestPath: true, + manifestPath: "bad-path", + expectedError: true, + }, + { + name: "invalid: badly formatted version as argument", + cfgPath: testUpgradeDiffConfig, + args: []string{"bad-version"}, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + parentFlags.cfgPath = tc.cfgPath + if tc.setManifestPath { + flags.apiServerManifestPath = tc.manifestPath + flags.controllerManagerManifestPath = tc.manifestPath + flags.schedulerManifestPath = tc.manifestPath + } + if err := runDiff(flags, tc.args); (err != nil) != tc.expectedError { + t.Fatalf("expected error: %v, saw: %v, error: %v", tc.expectedError, (err != nil), err) + } + }) + } +} diff --git a/cmd/kubeadm/app/cmd/upgrade/testdata/diff_dummy_manifest.yaml b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_dummy_manifest.yaml new file mode 100644 index 00000000000..b65b6743bf6 --- /dev/null +++ b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_dummy_manifest.yaml @@ -0,0 +1 @@ +some-empty-file-to-diff diff --git a/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml new file mode 100644 index 00000000000..7bfbed6015a --- /dev/null +++ b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml @@ -0,0 +1,3 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +kubernetesVersion: 1.11.0 From 087140aee9c001fd6c56eb3f4f9a7b808d6384d0 Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 21 May 2018 16:06:58 -0400 Subject: [PATCH 069/307] move updatepodspecforobject out of factory --- pkg/kubectl/cmd/set/BUILD | 1 + pkg/kubectl/cmd/set/set_env.go | 5 +- pkg/kubectl/cmd/set/set_image.go | 5 +- pkg/kubectl/cmd/set/set_resources.go | 5 +- pkg/kubectl/cmd/set/set_serviceaccount.go | 5 +- pkg/kubectl/cmd/util/BUILD | 2 - pkg/kubectl/cmd/util/factory.go | 5 - pkg/kubectl/cmd/util/factory_client_access.go | 64 ------------- pkg/kubectl/polymorphichelpers/BUILD | 3 + pkg/kubectl/polymorphichelpers/interface.go | 8 ++ .../polymorphichelpers/updatepodspec.go | 91 +++++++++++++++++++ 11 files changed, 115 insertions(+), 79 deletions(-) create mode 100644 pkg/kubectl/polymorphichelpers/updatepodspec.go diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index 161f7ac79a9..c96b85e7cef 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/kubectl/cmd/util/env:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", + "//pkg/kubectl/polymorphichelpers:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/i18n:go_default_library", "//pkg/printers:go_default_library", diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 90f2f22f92b..eba08dc03fc 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -35,6 +35,7 @@ import ( envutil "k8s.io/kubernetes/pkg/kubectl/cmd/util/env" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/printers" ) @@ -114,7 +115,7 @@ type EnvOptions struct { output string dryRun bool builder func() *resource.Builder - updatePodSpecForObject func(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) + updatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc namespace string enforceNamespace bool clientset *kubernetes.Clientset @@ -192,7 +193,7 @@ func (o *EnvOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri return fmt.Errorf("all resources must be specified before environment changes: %s", strings.Join(args, " ")) } - o.updatePodSpecForObject = f.UpdatePodSpecForObject + o.updatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn o.output = cmdutil.GetFlagString(cmd, "output") o.dryRun = cmdutil.GetDryRunFlag(cmd) diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 76dd52646b7..57ae09a5b3a 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -30,6 +30,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/printers" @@ -54,7 +55,7 @@ type SetImageOptions struct { PrintObj printers.ResourcePrinterFunc Recorder genericclioptions.Recorder - UpdatePodSpecForObject func(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) + UpdatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc Resources []string ContainerImages map[string]string @@ -134,7 +135,7 @@ func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ return err } - o.UpdatePodSpecForObject = f.UpdatePodSpecForObject + o.UpdatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn o.DryRun = cmdutil.GetDryRunFlag(cmd) o.Output = cmdutil.GetFlagString(cmd, "output") o.ResolveImage = f.ResolveImage diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 0fd6bc5b310..6f83109a176 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -34,6 +34,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -84,7 +85,7 @@ type SetResourcesOptions struct { Requests string ResourceRequirements v1.ResourceRequirements - UpdatePodSpecForObject func(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) + UpdatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc Resources []string genericclioptions.IOStreams @@ -153,7 +154,7 @@ func (o *SetResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, ar return err } - o.UpdatePodSpecForObject = f.UpdatePodSpecForObject + o.UpdatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn o.Output = cmdutil.GetFlagString(cmd, "output") o.DryRun = cmdutil.GetDryRunFlag(cmd) diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index d06184ddee1..e3706ab43f6 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -33,6 +33,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -67,7 +68,7 @@ type SetServiceAccountOptions struct { all bool output string local bool - updatePodSpecForObject func(runtime.Object, func(*v1.PodSpec) error) (bool, error) + updatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc infos []*resource.Info serviceAccountName string @@ -130,7 +131,7 @@ func (o *SetServiceAccountOptions) Complete(f cmdutil.Factory, cmd *cobra.Comman o.shortOutput = cmdutil.GetFlagString(cmd, "output") == "name" o.dryRun = cmdutil.GetDryRunFlag(cmd) o.output = cmdutil.GetFlagString(cmd, "output") - o.updatePodSpecForObject = f.UpdatePodSpecForObject + o.updatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn if o.dryRun { o.PrintFlags.Complete("%s (dry run)") diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index eca56bc1cbb..41cba673fa6 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -36,11 +36,9 @@ go_library( "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 8f867553028..f42f80560e8 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -23,7 +23,6 @@ import ( "github.com/spf13/cobra" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -79,10 +78,6 @@ type ClientAccessFactory interface { // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder - // UpdatePodSpecForObject will call the provided function on the pod spec this object supports, - // return false if no pod spec is supported, or return an error. - UpdatePodSpecForObject(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) - // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a // new set-based selector is provided, an error is returned if the selector cannot be converted to a // map-based selector diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 55ffc2bb3cb..519021d919b 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -26,14 +26,11 @@ import ( "path/filepath" "strings" - "k8s.io/api/core/v1" - "github.com/spf13/cobra" "github.com/spf13/pflag" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" - appsv1beta2 "k8s.io/api/apps/v1beta2" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" batchv2alpha1 "k8s.io/api/batch/v2alpha1" @@ -129,67 +126,6 @@ func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { return restclient.RESTClientFor(clientConfig) } -func (f *ring0Factory) UpdatePodSpecForObject(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) { - // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) - switch t := obj.(type) { - case *v1.Pod: - return true, fn(&t.Spec) - // ReplicationController - case *v1.ReplicationController: - if t.Spec.Template == nil { - t.Spec.Template = &v1.PodTemplateSpec{} - } - return true, fn(&t.Spec.Template.Spec) - - // Deployment - case *extensionsv1beta1.Deployment: - return true, fn(&t.Spec.Template.Spec) - case *appsv1beta1.Deployment: - return true, fn(&t.Spec.Template.Spec) - case *appsv1beta2.Deployment: - return true, fn(&t.Spec.Template.Spec) - case *appsv1.Deployment: - return true, fn(&t.Spec.Template.Spec) - - // DaemonSet - case *extensionsv1beta1.DaemonSet: - return true, fn(&t.Spec.Template.Spec) - case *appsv1beta2.DaemonSet: - return true, fn(&t.Spec.Template.Spec) - case *appsv1.DaemonSet: - return true, fn(&t.Spec.Template.Spec) - - // ReplicaSet - case *extensionsv1beta1.ReplicaSet: - return true, fn(&t.Spec.Template.Spec) - case *appsv1beta2.ReplicaSet: - return true, fn(&t.Spec.Template.Spec) - case *appsv1.ReplicaSet: - return true, fn(&t.Spec.Template.Spec) - - // StatefulSet - case *appsv1beta1.StatefulSet: - return true, fn(&t.Spec.Template.Spec) - case *appsv1beta2.StatefulSet: - return true, fn(&t.Spec.Template.Spec) - case *appsv1.StatefulSet: - return true, fn(&t.Spec.Template.Spec) - - // Job - case *batchv1.Job: - return true, fn(&t.Spec.Template.Spec) - - // CronJob - case *batchv1beta1.CronJob: - return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) - case *batchv2alpha1.CronJob: - return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) - - default: - return false, fmt.Errorf("the object is not a pod or does not have a pod template: %T", t) - } -} - func (f *ring0Factory) MapBasedSelectorForObject(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index cf624a54558..1feaaf523b1 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -9,6 +9,7 @@ go_library( "interface.go", "logsforobject.go", "statusviewer.go", + "updatepodspec.go", ], importpath = "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers", visibility = ["//visibility:public"], @@ -27,6 +28,8 @@ go_library( "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/apps/v1beta2:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1beta1:go_default_library", + "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index 63e1006318a..3029730742e 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -19,6 +19,7 @@ package polymorphichelpers import ( "time" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" @@ -50,3 +51,10 @@ type StatusViewerFunc func(restClientGetter genericclioptions.RESTClientGetter, // StatusViewerFn gives a way to easily override the function for unit testing if needed var StatusViewerFn StatusViewerFunc = statusViewer + +// UpdatePodSpecForObjectFunc will call the provided function on the pod spec this object supports, +// return false if no pod spec is supported, or return an error. +type UpdatePodSpecForObjectFunc func(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) + +// UpdatePodSpecForObjectFn gives a way to easily override the function for unit testing if needed +var UpdatePodSpecForObjectFn UpdatePodSpecForObjectFunc = updatePodSpecForObject diff --git a/pkg/kubectl/polymorphichelpers/updatepodspec.go b/pkg/kubectl/polymorphichelpers/updatepodspec.go new file mode 100644 index 00000000000..40448638246 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/updatepodspec.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func updatePodSpecForObject(obj runtime.Object, fn func(*v1.PodSpec) error) (bool, error) { + switch t := obj.(type) { + case *v1.Pod: + return true, fn(&t.Spec) + // ReplicationController + case *v1.ReplicationController: + if t.Spec.Template == nil { + t.Spec.Template = &v1.PodTemplateSpec{} + } + return true, fn(&t.Spec.Template.Spec) + + // Deployment + case *extensionsv1beta1.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta1.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.Deployment: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.Deployment: + return true, fn(&t.Spec.Template.Spec) + + // DaemonSet + case *extensionsv1beta1.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.DaemonSet: + return true, fn(&t.Spec.Template.Spec) + + // ReplicaSet + case *extensionsv1beta1.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.ReplicaSet: + return true, fn(&t.Spec.Template.Spec) + + // StatefulSet + case *appsv1beta1.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1beta2.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + case *appsv1.StatefulSet: + return true, fn(&t.Spec.Template.Spec) + + // Job + case *batchv1.Job: + return true, fn(&t.Spec.Template.Spec) + + // CronJob + case *batchv1beta1.CronJob: + return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) + case *batchv2alpha1.CronJob: + return true, fn(&t.Spec.JobTemplate.Spec.Template.Spec) + + default: + return false, fmt.Errorf("the object is not a pod or does not have a pod template: %T", t) + } +} From 2acbe87156d3fcd7c18a853ee694732b8ba31cda Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Thu, 29 Mar 2018 12:57:20 -0700 Subject: [PATCH 070/307] mark ServerAddressByClientCIDRs as optional --- staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 917efb37f75..e93df18461e 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -799,7 +799,8 @@ type APIGroup struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` + // +optional + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } // ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. From 4ad9aedb04df53a7cd733c0322a3c947a73c88dd Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 16:25:45 -0700 Subject: [PATCH 071/307] test/e2e_node: Add [NodeConformance] to tests tagged [Conformance] This has no effect yet until test configurations are updated. --- test/e2e_node/gke_environment_test.go | 2 +- test/e2e_node/kubelet_test.go | 4 ++-- test/e2e_node/lifecycle_hook_test.go | 8 ++++---- test/e2e_node/mirror_pod_test.go | 6 +++--- test/e2e_node/runtime_conformance_test.go | 14 +++++++------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/test/e2e_node/gke_environment_test.go b/test/e2e_node/gke_environment_test.go index dd88976b39c..98512e1768c 100644 --- a/test/e2e_node/gke_environment_test.go +++ b/test/e2e_node/gke_environment_test.go @@ -310,7 +310,7 @@ func checkDockerStorageDriver() error { return fmt.Errorf("failed to find storage driver") } -var _ = framework.KubeDescribe("GKE system requirements [Conformance] [Feature:GKEEnv]", func() { +var _ = framework.KubeDescribe("GKE system requirements [Conformance][NodeConformance][Feature:GKEEnv]", func() { BeforeEach(func() { framework.RunIfSystemSpecNameIs("gke") }) diff --git a/test/e2e_node/kubelet_test.go b/test/e2e_node/kubelet_test.go index 9347d2d183d..c712d7cc330 100644 --- a/test/e2e_node/kubelet_test.go +++ b/test/e2e_node/kubelet_test.go @@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { }) Context("when scheduling a busybox command in a pod", func() { podName := "busybox-scheduling-" + string(uuid.NewUUID()) - framework.ConformanceIt("it should print the output to logs", func() { + framework.ConformanceIt("it should print the output to logs [NodeConformance]", func() { podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { }) Context("when scheduling a read only busybox container", func() { podName := "busybox-readonly-fs" + string(uuid.NewUUID()) - framework.ConformanceIt("it should not write to root filesystem", func() { + framework.ConformanceIt("it should not write to root filesystem [NodeConformance]", func() { isReadOnly := true podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e_node/lifecycle_hook_test.go b/test/e2e_node/lifecycle_hook_test.go index 3aa7e0065fd..5082d9df655 100644 --- a/test/e2e_node/lifecycle_hook_test.go +++ b/test/e2e_node/lifecycle_hook_test.go @@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { }, preStopWaitTimeout, podCheckInterval).Should(BeNil()) } } - framework.ConformanceIt("should execute poststart exec hook properly", func() { + framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() { lifecycle := &v1.Lifecycle{ PostStart: &v1.Handler{ Exec: &v1.ExecAction{ @@ -95,7 +95,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle) testPodWithHook(podWithHook) }) - framework.ConformanceIt("should execute prestop exec hook properly", func() { + framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() { lifecycle := &v1.Lifecycle{ PreStop: &v1.Handler{ Exec: &v1.ExecAction{ @@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle) testPodWithHook(podWithHook) }) - framework.ConformanceIt("should execute poststart http hook properly", func() { + framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() { lifecycle := &v1.Lifecycle{ PostStart: &v1.Handler{ HTTPGet: &v1.HTTPGetAction{ @@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() { podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle) testPodWithHook(podWithHook) }) - framework.ConformanceIt("should execute prestop http hook properly", func() { + framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() { lifecycle := &v1.Lifecycle{ PreStop: &v1.Handler{ HTTPGet: &v1.HTTPGetAction{ diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index 55db5f47890..9e3118622cd 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) - framework.ConformanceIt("should be updated when static pod updated", func() { + framework.ConformanceIt("should be updated when static pod updated [NodeConformance]", func() { By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { Expect(len(pod.Spec.Containers)).Should(Equal(1)) Expect(pod.Spec.Containers[0].Image).Should(Equal(image)) }) - framework.ConformanceIt("should be recreated when mirror pod gracefully deleted", func() { + framework.ConformanceIt("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() { By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) @@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() { return checkMirrorPodRecreatedAndRunnig(f.ClientSet, mirrorPodName, ns, uid) }, 2*time.Minute, time.Second*4).Should(BeNil()) }) - framework.ConformanceIt("should be recreated when mirror pod forcibly deleted", func() { + framework.ConformanceIt("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() { By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 0a8b7ac57d6..3846d7c9326 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -46,7 +46,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() { Describe("container runtime conformance blackbox test", func() { Context("when starting a container that exits", func() { - framework.ConformanceIt("it should run with the expected status", func() { + framework.ConformanceIt("it should run with the expected status [NodeConformance]", func() { restartCountVolumeName := "restart-count" restartCountVolumePath := "/restart-count" testContainer := v1.Container{ @@ -127,7 +127,7 @@ while true; do sleep 1; done By("it should get the expected 'State'") Expect(GetContainerState(status.State)).To(Equal(testCase.State)) - By("it should be possible to delete [Conformance]") + By("it should be possible to delete [Conformance][NodeConformance]") Expect(terminateContainer.Delete()).To(Succeed()) Eventually(terminateContainer.Present, retryTimeout, pollInterval).Should(BeFalse()) } @@ -142,7 +142,7 @@ while true; do sleep 1; done message gomegatypes.GomegaMatcher }{ { - name: "if TerminationMessagePath is set [Conformance]", + name: "if TerminationMessagePath is set [Conformance][NodeConformance]", container: v1.Container{ Image: busyboxImage, Command: []string{"/bin/sh", "-c"}, @@ -157,7 +157,7 @@ while true; do sleep 1; done }, { - name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance]", + name: "if TerminationMessagePath is set as non-root user and at a non-default path [Conformance][NodeConformance]", container: v1.Container{ Image: busyboxImage, Command: []string{"/bin/sh", "-c"}, @@ -172,7 +172,7 @@ while true; do sleep 1; done }, { - name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance]", + name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]", container: v1.Container{ Image: busyboxImage, Command: []string{"/bin/sh", "-c"}, @@ -198,7 +198,7 @@ while true; do sleep 1; done }, { - name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance]", + name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [Conformance][NodeConformance]", container: v1.Container{ Image: busyboxImage, Command: []string{"/bin/sh", "-c"}, @@ -313,7 +313,7 @@ while true; do sleep 1; done }, } { testCase := testCase - It(testCase.description+" [Conformance]", func() { + It(testCase.description+" [Conformance][NodeConformance]", func() { name := "image-pull-test" command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} container := ConformanceContainer{ From 7cbd897e3edc28e5ea839d56953a48560d37bd5c Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 16:46:54 -0700 Subject: [PATCH 072/307] test/e2e_node: Add Node-exclusive feature tags to existing tests --- test/e2e_node/apparmor_test.go | 2 +- test/e2e_node/cpu_manager_test.go | 2 +- test/e2e_node/device_plugin.go | 2 +- test/e2e_node/docker_test.go | 2 +- test/e2e_node/dockershim_checkpoint_test.go | 2 +- test/e2e_node/dynamic_kubelet_config_test.go | 2 +- test/e2e_node/eviction_test.go | 2 +- test/e2e_node/gke_environment_test.go | 2 +- test/e2e_node/gpu_device_plugin.go | 2 +- test/e2e_node/hugepages_test.go | 2 +- test/e2e_node/security_context_test.go | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 85e35148544..6c9c713f774 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -40,7 +40,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() { +var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() { if isAppArmorEnabled() { BeforeEach(func() { By("Loading AppArmor profiles for testing") diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 132919c3c03..36bf6f5bf8e 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -438,7 +438,7 @@ func runCPUManagerTests(f *framework.Framework) { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() { +var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager][NodeAlphaFeature:CPUManager]", func() { f := framework.NewDefaultFramework("cpu-manager-test") Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() { diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index 4e0e07aa091..3f03a851a44 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -44,7 +44,7 @@ const ( ) // Serial because the test restarts Kubelet -var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin] [Serial]", func() { +var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePlugin][NodeFeature:DevicePlugin][Serial]", func() { f := framework.NewDefaultFramework("device-plugin-errors") Context("DevicePlugin", func() { diff --git a/test/e2e_node/docker_test.go b/test/e2e_node/docker_test.go index 97dae3e99b6..8a361c65d09 100644 --- a/test/e2e_node/docker_test.go +++ b/test/e2e_node/docker_test.go @@ -30,7 +30,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("Docker features [Feature:Docker]", func() { +var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() { f := framework.NewDefaultFramework("docker-feature-test") BeforeEach(func() { diff --git a/test/e2e_node/dockershim_checkpoint_test.go b/test/e2e_node/dockershim_checkpoint_test.go index e00a75f5797..e06ad64c7f8 100644 --- a/test/e2e_node/dockershim_checkpoint_test.go +++ b/test/e2e_node/dockershim_checkpoint_test.go @@ -43,7 +43,7 @@ const ( testCheckpointContent = `{"version":"v1","name":"fluentd-gcp-v2.0-vmnqx","namespace":"kube-system","data":{},"checksum":1799154314}` ) -var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() { +var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() { f := framework.NewDefaultFramework("dockerhism-checkpoint-test") BeforeEach(func() { diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 3d709b99d10..c1ea9ca6724 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -65,7 +65,7 @@ type nodeConfigTestCase struct { } // This test is marked [Disruptive] because the Kubelet restarts several times during this test. -var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeAlphaFeature:DynamicKubeletConfig][Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test") var beforeNode *apiv1.Node var beforeConfigMap *apiv1.ConfigMap diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 21962c62f31..08e5752fca9 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup }) // LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions -var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation]", func() { +var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:LocalStorageCapacityIsolation]", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") evictionTestTimeout := 10 * time.Minute Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() { diff --git a/test/e2e_node/gke_environment_test.go b/test/e2e_node/gke_environment_test.go index 98512e1768c..f0badefd440 100644 --- a/test/e2e_node/gke_environment_test.go +++ b/test/e2e_node/gke_environment_test.go @@ -310,7 +310,7 @@ func checkDockerStorageDriver() error { return fmt.Errorf("failed to find storage driver") } -var _ = framework.KubeDescribe("GKE system requirements [Conformance][NodeConformance][Feature:GKEEnv]", func() { +var _ = framework.KubeDescribe("GKE system requirements [Conformance][NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() { BeforeEach(func() { framework.RunIfSystemSpecNameIs("gke") }) diff --git a/test/e2e_node/gpu_device_plugin.go b/test/e2e_node/gpu_device_plugin.go index eca834822d3..d9ee9f52728 100644 --- a/test/e2e_node/gpu_device_plugin.go +++ b/test/e2e_node/gpu_device_plugin.go @@ -37,7 +37,7 @@ const ( ) // Serial because the test restarts Kubelet -var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("device-plugin-gpus-errors") Context("DevicePlugin", func() { diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index cd2ab1e1715..98659b201c3 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -191,7 +191,7 @@ func runHugePagesTests(f *framework.Framework) { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages]", func() { +var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeFeature:HugePages]", func() { f := framework.NewDefaultFramework("hugepages-test") Context("With config updated with hugepages feature enabled", func() { diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index b7dac718e38..4efdb92335d 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("Security Context", func() { podClient = f.PodClient() }) - Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace]", func() { + Context("when pod PID namespace is configurable [Feature:ShareProcessNamespace][NodeAlphaFeature:ShareProcessNamespace]", func() { It("containers in pods using isolated PID namespaces should all receive PID 1", func() { By("Create a pod with isolated PID namespaces.") f.PodClient().CreateSync(&v1.Pod{ From 5802f1828310673ddf8100606a7f2a371de5bcbc Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 17:10:08 -0700 Subject: [PATCH 073/307] test/e2e_node: mark more tests with [NodeConformance] --- test/e2e_node/kubelet_test.go | 6 +++--- test/e2e_node/log_path_test.go | 2 +- test/e2e_node/pods_container_manager_test.go | 2 +- test/e2e_node/runtime_conformance_test.go | 2 +- test/e2e_node/security_context_test.go | 16 ++++++++-------- test/e2e_node/summary_test.go | 2 +- test/e2e_node/volume_manager_test.go | 2 +- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/test/e2e_node/kubelet_test.go b/test/e2e_node/kubelet_test.go index c712d7cc330..3419eb2500d 100644 --- a/test/e2e_node/kubelet_test.go +++ b/test/e2e_node/kubelet_test.go @@ -92,7 +92,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { }) }) - It("should have an error terminated reason", func() { + It("should have an error terminated reason [NodeConformance]", func() { Eventually(func() error { podData, err := podClient.Get(podName, metav1.GetOptions{}) if err != nil { @@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { }, time.Minute, time.Second*4).Should(BeNil()) }) - It("should be possible to delete", func() { + It("should be possible to delete [NodeConformance]", func() { err := podClient.Delete(podName, &metav1.DeleteOptions{}) Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err)) }) @@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { Context("when scheduling a busybox Pod with hostAliases", func() { podName := "busybox-host-aliases" + string(uuid.NewUUID()) - It("it should write entries to /etc/hosts", func() { + It("it should write entries to /etc/hosts [NodeConformance]", func() { podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index 2c01d8ac820..c993f121708 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -35,7 +35,7 @@ const ( checkContName = "checker-container" ) -var _ = framework.KubeDescribe("ContainerLogPath", func() { +var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() { f := framework.NewDefaultFramework("kubelet-container-log-path") Describe("Pod with a container", func() { Context("printed log to stdout", func() { diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index c835548beb9..1768e7d9fa7 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { }) }) - Describe("Pod containers", func() { + Describe("Pod containers [NodeConformance]", func() { Context("On scheduling a Guaranteed Pod", func() { It("Pod containers should have been created under the cgroup-root", func() { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 3846d7c9326..3ba96a87ad7 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -185,7 +185,7 @@ while true; do sleep 1; done }, { - name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set", + name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]", container: v1.Container{ Image: busyboxImage, Command: []string{"/bin/sh", "-c"}, diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index 4efdb92335d..a6e2f619bc8 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -382,11 +382,11 @@ var _ = framework.KubeDescribe("Security Context", func() { podClient.WaitForSuccess(podName, framework.PodStartTimeout) } - It("should run the container with uid 65534", func() { + It("should run the container with uid 65534 [NodeConformance]", func() { createAndWaitUserPod(65534) }) - It("should run the container with uid 0", func() { + It("should run the container with uid 0 [NodeConformance]", func() { createAndWaitUserPod(0) }) }) @@ -429,11 +429,11 @@ var _ = framework.KubeDescribe("Security Context", func() { return podName } - It("should run the container with readonly rootfs when readOnlyRootFilesystem=true", func() { + It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() { createAndWaitUserPod(true) }) - It("should run the container with writable rootfs when readOnlyRootFilesystem=false", func() { + It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() { createAndWaitUserPod(false) }) }) @@ -497,14 +497,14 @@ var _ = framework.KubeDescribe("Security Context", func() { return nil } - It("should allow privilege escalation when not explicitly set and uid != 0", func() { + It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() { podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil { framework.Failf("Match output for pod %q failed: %v", podName, err) } }) - It("should not allow privilege escalation when false", func() { + It("should not allow privilege escalation when false [NodeConformance]", func() { podName := "alpine-nnp-false-" + string(uuid.NewUUID()) apeFalse := false if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil { @@ -512,7 +512,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("should allow privilege escalation when true", func() { + It("should allow privilege escalation when true [NodeConformance]", func() { podName := "alpine-nnp-true-" + string(uuid.NewUUID()) apeTrue := true if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil { @@ -568,7 +568,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("should run the container as unprivileged when false", func() { + It("should run the container as unprivileged when false [NodeConformance]", func() { podName := createAndWaitUserPod(false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index b75c35a25b6..dd950eed261 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -36,7 +36,7 @@ import ( "github.com/onsi/gomega/types" ) -var _ = framework.KubeDescribe("Summary API", func() { +var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() { f := framework.NewDefaultFramework("summary-test") Context("when querying /stats/summary", func() { AfterEach(func() { diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index 5f62e3c04f3..39f176e5003 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -34,7 +34,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() { f := framework.NewDefaultFramework("kubelet-volume-manager") Describe("Volume Manager", func() { Context("On terminatation of pod with memory backed volume", func() { - It("should remove the volume from the node", func() { + It("should remove the volume from the node [NodeConformance]", func() { var ( memoryBackedPod *v1.Pod volumeName string From ff62f037b81e6b004ef9c3c4ba442a431e1ee7a1 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 17:16:10 -0700 Subject: [PATCH 074/307] Re-tag benchmark tests --- test/e2e_node/density_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 94fea94c8c6..ea4a7299807 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -154,7 +154,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg - desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]", itArg.podsNr, itArg.interval) + desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval) It(desc, func() { itArg.createMethod = "batch" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) @@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg Context("", func() { - desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit) + desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit) // The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency. // It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated. // Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance. @@ -273,7 +273,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg - desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark]", itArg.podsNr, itArg.bgPodsNr) + desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]", itArg.podsNr, itArg.bgPodsNr) It(desc, func() { itArg.createMethod = "sequence" testInfo := getTestNodeInfo(f, itArg.getTestName(), desc) From 90750c77c3d047b9e018e31ac24987db224aa9f2 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 17:24:29 -0700 Subject: [PATCH 075/307] test/e2e_node: Add NodeFeature tags to non-conformance tests Serial tests are not considered for conformance tests. --- test/e2e_node/container_manager_test.go | 2 +- test/e2e_node/critical_pod_test.go | 2 +- test/e2e_node/eviction_test.go | 14 +++++++------- test/e2e_node/garbage_collector_test.go | 2 +- test/e2e_node/image_id_test.go | 2 +- test/e2e_node/node_container_manager_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 2 +- test/e2e_node/restart_test.go | 2 +- test/e2e_node/security_context_test.go | 14 +++++++------- 9 files changed, 21 insertions(+), 21 deletions(-) diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 71a011045e5..789a954fef8 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -75,7 +75,7 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { f := framework.NewDefaultFramework("kubelet-container-manager") - Describe("Validate OOM score adjustments", func() { + Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() { Context("once the node is setup", func() { It("container runtime's oom-score-adj should be -999", func() { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 6f22dc2aae3..3ed2924d555 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -40,7 +40,7 @@ const ( bestEffortPodName = "best-effort" ) -var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() { f := framework.NewDefaultFramework("critical-pod-test") Context("when we need to admit a critical pod", func() { diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 08e5752fca9..3f038216692 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images // Disk pressure is induced by pulling large images -var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("image-gc-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", // MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods. // Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved. -var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("memory-allocatable-eviction-test") expectedNodeCondition := v1.NodeMemoryPressure pressureTimeout := 10 * time.Minute @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space. -var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold. // Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run. -var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup }) // LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions -var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:LocalStorageCapacityIsolation]", func() { +var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") evictionTestTimeout := 10 * time.Minute Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() { @@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se // PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") expectedNodeCondition := v1.NodeMemoryPressure pressureTimeout := 10 * time.Minute @@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ // PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") expectedNodeCondition := v1.NodeDiskPressure pressureTimeout := 10 * time.Minute diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index d6a9244ad43..2ee6ce772db 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -71,7 +71,7 @@ type testRun struct { // GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here: // http://kubernetes.io/docs/admin/garbage-collection/ -var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() { +var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() { f := framework.NewDefaultFramework("garbage-collect-test") containerNamePrefix := "gc-test-container-" podNamePrefix := "gc-test-pod-" diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 090eafa2863..198d204c44a 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("ImageID", func() { +var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() { busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff" diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 09e6dfad41d..58223e08434 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -56,7 +56,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() { f := framework.NewDefaultFramework("node-container-manager") - Describe("Validate Node Allocatable", func() { + Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() { It("set's up the node and runs the test", func() { framework.ExpectNoError(runTest(f)) }) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 3baf3287337..64aa7ac7207 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -40,7 +40,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("NodeProblemDetector", func() { +var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() { const ( pollInterval = 1 * time.Second pollConsistent = 5 * time.Second diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 7b68ea74011..4b50c3479ea 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -59,7 +59,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) ( return runningPods } -var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() { +var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() { const ( // Saturate the node. It's not necessary that all these pods enter // Running/Ready, because we don't know the number of cores in the diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index a6e2f619bc8..3d0aa231cc1 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -156,7 +156,7 @@ var _ = framework.KubeDescribe("Security Context", func() { nginxPid = strings.TrimSpace(output) }) - It("should show its pid in the host PID namespace", func() { + It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -176,7 +176,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("should not show its pid in the non-hostpid containers", func() { + It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Security Context", func() { framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) }) - It("should show the shared memory ID in the host IPC containers", func() { + It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("should not show the shared memory ID in the non-hostIPC containers", func() { + It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("Security Context", func() { framework.Logf("Opened a new tcp port %q", listeningPort) }) - It("should listen on same port in the host network containers", func() { + It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -329,7 +329,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("shouldn't show the same port in the non-hostnetwork containers", func() { + It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -555,7 +555,7 @@ var _ = framework.KubeDescribe("Security Context", func() { return podName } - It("should run the container as privileged when true", func() { + It("should run the container as privileged when true [NodeFeature:HostAccess]", func() { podName := createAndWaitUserPod(true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { From a14f423ebbd122f81dcb447a3a622ce5200e12c5 Mon Sep 17 00:00:00 2001 From: Haowei Cai Date: Mon, 21 May 2018 18:04:54 -0700 Subject: [PATCH 076/307] generated --- api/openapi-spec/swagger.json | 3 +-- api/swagger-spec/admissionregistration.k8s.io.json | 3 +-- api/swagger-spec/apis.json | 3 +-- api/swagger-spec/apps.json | 3 +-- api/swagger-spec/authentication.k8s.io.json | 3 +-- api/swagger-spec/authorization.k8s.io.json | 3 +-- api/swagger-spec/autoscaling.json | 3 +-- api/swagger-spec/batch.json | 3 +-- api/swagger-spec/certificates.k8s.io.json | 3 +-- api/swagger-spec/events.k8s.io.json | 3 +-- api/swagger-spec/extensions.json | 3 +-- api/swagger-spec/networking.k8s.io.json | 3 +-- api/swagger-spec/policy.json | 3 +-- api/swagger-spec/rbac.authorization.k8s.io.json | 3 +-- api/swagger-spec/scheduling.k8s.io.json | 3 +-- api/swagger-spec/settings.k8s.io.json | 3 +-- api/swagger-spec/storage.k8s.io.json | 3 +-- .../src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto | 1 + 18 files changed, 18 insertions(+), 34 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 66c8520117f..afb9d5709d2 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -84336,8 +84336,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "apiVersion": { diff --git a/api/swagger-spec/admissionregistration.k8s.io.json b/api/swagger-spec/admissionregistration.k8s.io.json index 8a93d2b976f..4b5bba0b60a 100644 --- a/api/swagger-spec/admissionregistration.k8s.io.json +++ b/api/swagger-spec/admissionregistration.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/apis.json b/api/swagger-spec/apis.json index 18d960ca599..9070fc9096b 100644 --- a/api/swagger-spec/apis.json +++ b/api/swagger-spec/apis.json @@ -62,8 +62,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/apps.json b/api/swagger-spec/apps.json index ecfaa626a6e..603237ab4b1 100644 --- a/api/swagger-spec/apps.json +++ b/api/swagger-spec/apps.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/authentication.k8s.io.json b/api/swagger-spec/authentication.k8s.io.json index d0ef25599e9..175735dba82 100644 --- a/api/swagger-spec/authentication.k8s.io.json +++ b/api/swagger-spec/authentication.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/authorization.k8s.io.json b/api/swagger-spec/authorization.k8s.io.json index 9bd028308d7..98200f75838 100644 --- a/api/swagger-spec/authorization.k8s.io.json +++ b/api/swagger-spec/authorization.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/autoscaling.json b/api/swagger-spec/autoscaling.json index fbb9329e3a7..39bdb8664fa 100644 --- a/api/swagger-spec/autoscaling.json +++ b/api/swagger-spec/autoscaling.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/batch.json b/api/swagger-spec/batch.json index c8604aed950..ad5fcc2c274 100644 --- a/api/swagger-spec/batch.json +++ b/api/swagger-spec/batch.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/certificates.k8s.io.json b/api/swagger-spec/certificates.k8s.io.json index bdd12bab60a..64799705329 100644 --- a/api/swagger-spec/certificates.k8s.io.json +++ b/api/swagger-spec/certificates.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/events.k8s.io.json b/api/swagger-spec/events.k8s.io.json index 423933664fc..b6513828374 100644 --- a/api/swagger-spec/events.k8s.io.json +++ b/api/swagger-spec/events.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/extensions.json b/api/swagger-spec/extensions.json index 2e2fc1afd97..bc161e4ef44 100644 --- a/api/swagger-spec/extensions.json +++ b/api/swagger-spec/extensions.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/networking.k8s.io.json b/api/swagger-spec/networking.k8s.io.json index debc58cd79a..9a67396ddd7 100644 --- a/api/swagger-spec/networking.k8s.io.json +++ b/api/swagger-spec/networking.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/policy.json b/api/swagger-spec/policy.json index 51c62c22174..12ee477ccdc 100644 --- a/api/swagger-spec/policy.json +++ b/api/swagger-spec/policy.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/rbac.authorization.k8s.io.json b/api/swagger-spec/rbac.authorization.k8s.io.json index d162e47b9fd..7752ae8a683 100644 --- a/api/swagger-spec/rbac.authorization.k8s.io.json +++ b/api/swagger-spec/rbac.authorization.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/scheduling.k8s.io.json b/api/swagger-spec/scheduling.k8s.io.json index ca8030cbd41..639e581b077 100644 --- a/api/swagger-spec/scheduling.k8s.io.json +++ b/api/swagger-spec/scheduling.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/settings.k8s.io.json b/api/swagger-spec/settings.k8s.io.json index b82334cab59..f1462290704 100644 --- a/api/swagger-spec/settings.k8s.io.json +++ b/api/swagger-spec/settings.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/api/swagger-spec/storage.k8s.io.json b/api/swagger-spec/storage.k8s.io.json index 733f4ecef65..956159c8c18 100644 --- a/api/swagger-spec/storage.k8s.io.json +++ b/api/swagger-spec/storage.k8s.io.json @@ -38,8 +38,7 @@ "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", "required": [ "name", - "versions", - "serverAddressByClientCIDRs" + "versions" ], "properties": { "kind": { diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index e78380ab6d8..4baf44f3de9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -49,6 +49,7 @@ message APIGroup { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; } From 3815dfe478695d58c82e84c4e8097ca29ee58df7 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 18:09:33 -0700 Subject: [PATCH 077/307] test/e2e/common: add NodeConformance tag to all Conformance tests --- test/e2e/common/configmap.go | 4 +- test/e2e/common/configmap_volume.go | 18 ++++----- test/e2e/common/container_probe.go | 14 +++---- test/e2e/common/docker_containers.go | 8 ++-- test/e2e/common/downward_api.go | 10 ++--- test/e2e/common/downwardapi_volume.go | 22 +++++------ test/e2e/common/empty_dir.go | 28 +++++++------- test/e2e/common/expansion.go | 6 +-- test/e2e/common/host_path.go | 2 +- test/e2e/common/kubelet_etc_hosts.go | 2 +- test/e2e/common/networking.go | 8 ++-- test/e2e/common/pods.go | 10 ++--- test/e2e/common/projected.go | 56 +++++++++++++-------------- test/e2e/common/secrets.go | 4 +- test/e2e/common/secrets_volume.go | 14 +++---- 15 files changed, 103 insertions(+), 103 deletions(-) diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index 54bae8aaadd..90038618f5d 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -34,7 +34,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() { Description: Make sure config map value can be used as an environment variable in the container (on container.env field) */ - framework.ConformanceIt("should be consumable via environment variable ", func() { + framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) @@ -82,7 +82,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() { Description: Make sure config map value can be used as an source for environment variables in the container (on container.envFrom field) */ - framework.ConformanceIt("should be consumable via the environment ", func() { + framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newEnvFromConfigMap(f, name) By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 66b78749266..d0c09996df6 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -37,7 +37,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure config map without mappings works by mounting it to a volume with a custom path (mapping) on the pod with no other settings. */ - framework.ConformanceIt("should be consumable from pods in volume ", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { doConfigMapE2EWithoutMappings(f, 0, 0, nil) }) @@ -46,7 +46,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure config map without mappings works by mounting it to a volume with a custom path (mapping) on the pod with defaultMode set */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() { defaultMode := int32(0400) doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode) }) @@ -61,7 +61,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure config map without mappings works by mounting it to a volume with a custom path (mapping) on the pod as non-root. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root ", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() { doConfigMapE2EWithoutMappings(f, 1000, 0, nil) }) @@ -75,7 +75,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { a custom path (mapping) on the pod with no other settings and make sure the pod actually consumes it. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { doConfigMapE2EWithMappings(f, 0, 0, nil) }) @@ -84,7 +84,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure config map works with an item mode (e.g. 0400) for the config map item. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() { mode := int32(0400) doConfigMapE2EWithMappings(f, 0, 0, &mode) }) @@ -93,7 +93,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Testname: configmap-simple-user-mapped Description: Make sure config map works when it is mounted as non-root. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root ", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() { doConfigMapE2EWithMappings(f, 1000, 0, nil) }) @@ -106,7 +106,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure update operation is working on config map and the result is observed on volumes mounted in containers. */ - framework.ConformanceIt("updates should be reflected in volume ", func() { + framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -280,7 +280,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure Create, Update, Delete operations are all working on config map and the result is observed on volumes mounted in containers. */ - framework.ConformanceIt("optional updates should be reflected in volume ", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -463,7 +463,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Description: Make sure config map works when it mounted as two different volumes on the same node. */ - framework.ConformanceIt("should be consumable in multiple volumes in the same pod ", func() { + framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() { var ( name = "configmap-test-volume-" + string(uuid.NewUUID()) volumeName = "configmap-volume" diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 1cd53c07bd8..565741887c2 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure that pod with readiness probe should not be ready before initial delay and never restart. */ - framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart ", func() { + framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() { p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) f.WaitForPodReady(p.Name) @@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure that pod with readiness probe that fails should never be ready and never restart. */ - framework.ConformanceIt("with readiness probe that fails should never be ready and never restart ", func() { + framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() { p := podClient.Create(makePodSpec(probe.withFailing().build(), nil)) Consistently(func() (bool, error) { p, err := podClient.Get(p.Name, metav1.GetOptions{}) @@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure the pod is restarted with a cat /tmp/health liveness probe. */ - framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe", func() { + framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() { runLivenessTest(f, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "liveness-exec", @@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure the pod is not restarted with a cat /tmp/health liveness probe. */ - framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe", func() { + framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() { runLivenessTest(f, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "liveness-exec", @@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure when http liveness probe fails, the pod should be restarted. */ - framework.ConformanceIt("should be restarted with a /healthz http liveness probe ", func() { + framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() { runLivenessTest(f, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "liveness-http", @@ -209,7 +209,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure when a pod gets restarted, its start count should increase. */ - framework.ConformanceIt("should have monotonically increasing restart count [Slow]", func() { + framework.ConformanceIt("should have monotonically increasing restart count [Slow][NodeConformance]", func() { runLivenessTest(f, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "liveness-http", @@ -242,7 +242,7 @@ var _ = framework.KubeDescribe("Probing container", func() { Description: Make sure when http liveness probe succeeds, the pod should not be restarted. */ - framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe ", func() { + framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() { runLivenessTest(f, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "liveness-http", diff --git a/test/e2e/common/docker_containers.go b/test/e2e/common/docker_containers.go index dc5c9bf2eee..cef5a2de2f2 100644 --- a/test/e2e/common/docker_containers.go +++ b/test/e2e/common/docker_containers.go @@ -33,7 +33,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() { provided for a Container, ensure that the docker image's default command and args are used. */ - framework.ConformanceIt("should use the image defaults if command and args are blank ", func() { + framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() { f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{ "[/ep default arguments]", }) @@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() { Container, ensure that they take precedent to the docker image's default arguments, but that the default command is used. */ - framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) ", func() { + framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Args = []string{"override", "arguments"} @@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() { Container, ensure that it takes precedent to the docker image's default command. */ - framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) ", func() { + framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Command = []string{"/ep-2"} @@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() { provided for a Container, ensure that they take precedent to the docker image's default command and arguments. */ - framework.ConformanceIt("should be able to override the image's default command and arguments ", func() { + framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Command = []string{"/ep-2"} pod.Spec.Containers[0].Args = []string{"override", "arguments"} diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go index d4a6e74bb1f..8c3d0618b32 100644 --- a/test/e2e/common/downward_api.go +++ b/test/e2e/common/downward_api.go @@ -42,7 +42,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() { Description: Ensure that downward API can provide pod's name, namespace and IP address as environment variables. */ - framework.ConformanceIt("should provide pod name, namespace and IP address as env vars ", func() { + framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -88,7 +88,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() { Description: Ensure that downward API can provide an IP address for host node as an environment variable. */ - framework.ConformanceIt("should provide host IP as an env var ", func() { + framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() { framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery()) podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ @@ -115,7 +115,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() { Description: Ensure that downward API can provide CPU/memory limit and CPU/memory request as environment variables. */ - framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars ", func() { + framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -167,7 +167,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() { allocatable values for CPU and memory as environment variables if CPU and memory limits are not specified for a container. */ - framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable ", func() { + framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -217,7 +217,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() { Description: Ensure that downward API can provide pod UID as an environment variable. */ - framework.ConformanceIt("should provide pod UID as env vars ", func() { + framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() { framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery()) podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ diff --git a/test/e2e/common/downwardapi_volume.go b/test/e2e/common/downwardapi_volume.go index 595436d72b3..a10eb941e74 100644 --- a/test/e2e/common/downwardapi_volume.go +++ b/test/e2e/common/downwardapi_volume.go @@ -44,7 +44,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can provide pod's name through DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide podname only ", func() { + framework.ConformanceIt("should provide podname only [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") @@ -58,7 +58,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can set default file permission mode for DownwardAPIVolumeFiles if no mode is specified. */ - framework.ConformanceIt("should set DefaultMode on files ", func() { + framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) defaultMode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) @@ -73,7 +73,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can set file permission mode for DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should set mode on item file ", func() { + framework.ConformanceIt("should set mode on item file [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) mode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) @@ -117,7 +117,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API updates labels in DownwardAPIVolumeFiles when pod's labels get modified. */ - framework.ConformanceIt("should update labels on modification ", func() { + framework.ConformanceIt("should update labels on modification [NodeConformance]", func() { labels := map[string]string{} labels["key1"] = "value1" labels["key2"] = "value2" @@ -149,7 +149,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API updates annotations in DownwardAPIVolumeFiles when pod's annotations get modified. */ - framework.ConformanceIt("should update annotations on modification ", func() { + framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() { annotations := map[string]string{} annotations["builder"] = "bar" podName := "annotationupdate" + string(uuid.NewUUID()) @@ -183,7 +183,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can provide container's CPU limit through DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's cpu limit ", func() { + framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -197,7 +197,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can provide container's memory limit through DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's memory limit ", func() { + framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") @@ -211,7 +211,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can provide container's CPU request through DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's cpu request ", func() { + framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") @@ -225,7 +225,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { Description: Ensure that downward API can provide container's memory request through DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's memory request ", func() { + framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") @@ -240,7 +240,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { allocatable value for CPU through DownwardAPIVolumeFiles if CPU limit is not specified for a container. */ - framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set ", func() { + framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -253,7 +253,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { allocatable value for memory through DownwardAPIVolumeFiles if memory limit is not specified for a container. */ - framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set ", func() { + framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") diff --git a/test/e2e/common/empty_dir.go b/test/e2e/common/empty_dir.go index 06e3714ae3f..d428e9bbe0e 100644 --- a/test/e2e/common/empty_dir.go +++ b/test/e2e/common/empty_dir.go @@ -72,7 +72,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs mount type. */ - framework.ConformanceIt("volume on tmpfs should have the correct mode", func() { + framework.ConformanceIt("volume on tmpfs should have the correct mode [NodeConformance]", func() { doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory) }) @@ -82,7 +82,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure a root owned file with 0644 unix file permissions is created correctly, has tmpfs mount type, and enforces the permissions. */ - framework.ConformanceIt("should support (root,0644,tmpfs)", func() { + framework.ConformanceIt("should support (root,0644,tmpfs) [NodeConformance]", func() { doTest0644(f, testImageRootUid, v1.StorageMediumMemory) }) @@ -92,7 +92,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure a root owned file with 0666 unix file permissions is created correctly, has tmpfs mount type, and enforces the permissions. */ - framework.ConformanceIt("should support (root,0666,tmpfs)", func() { + framework.ConformanceIt("should support (root,0666,tmpfs) [NodeConformance]", func() { doTest0666(f, testImageRootUid, v1.StorageMediumMemory) }) @@ -102,7 +102,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure a root owned file with 0777 unix file permissions is created correctly, has tmpfs mount type, and enforces the permissions. */ - framework.ConformanceIt("should support (root,0777,tmpfs)", func() { + framework.ConformanceIt("should support (root,0777,tmpfs) [NodeConformance]", func() { doTest0777(f, testImageRootUid, v1.StorageMediumMemory) }) @@ -112,7 +112,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure a user owned file with 0644 unix file permissions is created correctly, has tmpfs mount type, and enforces the permissions. */ - framework.ConformanceIt("should support (non-root,0644,tmpfs)", func() { + framework.ConformanceIt("should support (non-root,0644,tmpfs) [NodeConformance]", func() { doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory) }) @@ -122,7 +122,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure a user owned file with 0666 unix file permissions is created correctly, has tmpfs mount type, and enforces the permissions. */ - framework.ConformanceIt("should support (non-root,0666,tmpfs)", func() { + framework.ConformanceIt("should support (non-root,0666,tmpfs) [NodeConformance]", func() { doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory) }) @@ -132,7 +132,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { of 'Memory', ensure a user owned file with 0777 unix file permissions is created correctly, has tmpfs mount type, and enforces the permissions. */ - framework.ConformanceIt("should support (non-root,0777,tmpfs)", func() { + framework.ConformanceIt("should support (non-root,0777,tmpfs) [NodeConformance]", func() { doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory) }) @@ -141,7 +141,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { Description: For a Pod created with an 'emptyDir' Volume, ensure the volume has 0777 unix file permissions. */ - framework.ConformanceIt("volume on default medium should have the correct mode", func() { + framework.ConformanceIt("volume on default medium should have the correct mode [NodeConformance]", func() { doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault) }) @@ -151,7 +151,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { root owned file with 0644 unix file permissions is created and enforced correctly. */ - framework.ConformanceIt("should support (root,0644,default)", func() { + framework.ConformanceIt("should support (root,0644,default) [NodeConformance]", func() { doTest0644(f, testImageRootUid, v1.StorageMediumDefault) }) @@ -161,7 +161,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { root owned file with 0666 unix file permissions is created and enforced correctly. */ - framework.ConformanceIt("should support (root,0666,default)", func() { + framework.ConformanceIt("should support (root,0666,default) [NodeConformance]", func() { doTest0666(f, testImageRootUid, v1.StorageMediumDefault) }) @@ -171,7 +171,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { root owned file with 0777 unix file permissions is created and enforced correctly. */ - framework.ConformanceIt("should support (root,0777,default)", func() { + framework.ConformanceIt("should support (root,0777,default) [NodeConformance]", func() { doTest0777(f, testImageRootUid, v1.StorageMediumDefault) }) @@ -181,7 +181,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { user owned file with 0644 unix file permissions is created and enforced correctly. */ - framework.ConformanceIt("should support (non-root,0644,default)", func() { + framework.ConformanceIt("should support (non-root,0644,default) [NodeConformance]", func() { doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault) }) @@ -191,7 +191,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { user owned file with 0666 unix file permissions is created and enforced correctly. */ - framework.ConformanceIt("should support (non-root,0666,default)", func() { + framework.ConformanceIt("should support (non-root,0666,default) [NodeConformance]", func() { doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault) }) @@ -201,7 +201,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() { user owned file with 0777 unix file permissions is created and enforced correctly. */ - framework.ConformanceIt("should support (non-root,0777,default)", func() { + framework.ConformanceIt("should support (non-root,0777,default) [NodeConformance]", func() { doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault) }) }) diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go index 303df66f952..eadb324fecf 100644 --- a/test/e2e/common/expansion.go +++ b/test/e2e/common/expansion.go @@ -34,7 +34,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Description: Make sure environment variables can be set using an expansion of previously defined environment variables */ - framework.ConformanceIt("should allow composing env vars into new env vars ", func() { + framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Description: Make sure a container's commands can be set using an expansion of environment variables. */ - framework.ConformanceIt("should allow substituting values in a container's command ", func() { + framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -114,7 +114,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { Description: Make sure a container's args can be set using an expansion of environment variables. */ - framework.ConformanceIt("should allow substituting values in a container's args ", func() { + framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() { podName := "var-expansion-" + string(uuid.NewUUID()) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/common/host_path.go b/test/e2e/common/host_path.go index 0f08030fbf5..478b28c387a 100644 --- a/test/e2e/common/host_path.go +++ b/test/e2e/common/host_path.go @@ -45,7 +45,7 @@ var _ = Describe("[sig-storage] HostPath", func() { volume is a directory with 0777 unix file permissions and that is has the sticky bit (mode flag t) set. */ - framework.ConformanceIt("should give a volume the correct mode", func() { + framework.ConformanceIt("should give a volume the correct mode [NodeConformance]", func() { source := &v1.HostPathVolumeSource{ Path: "/tmp", } diff --git a/test/e2e/common/kubelet_etc_hosts.go b/test/e2e/common/kubelet_etc_hosts.go index d9d4bdbb3a1..21d2b2a660a 100644 --- a/test/e2e/common/kubelet_etc_hosts.go +++ b/test/e2e/common/kubelet_etc_hosts.go @@ -55,7 +55,7 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() { Description: Make sure Kubelet correctly manages /etc/hosts and mounts it into the container. */ - framework.ConformanceIt("should test kubelet managed /etc/hosts file ", func() { + framework.ConformanceIt("should test kubelet managed /etc/hosts file [NodeConformance]", func() { By("Setting up the test") config.setup() diff --git a/test/e2e/common/networking.go b/test/e2e/common/networking.go index 1cb46071999..71b7dfacdfe 100644 --- a/test/e2e/common/networking.go +++ b/test/e2e/common/networking.go @@ -35,7 +35,7 @@ var _ = Describe("[sig-network] Networking", func() { Description: Try to hit test endpoints from a test container and make sure each of them can report a unique hostname. */ - framework.ConformanceIt("should function for intra-pod communication: http ", func() { + framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() { config := framework.NewCoreNetworkingTestConfig(f) for _, endpointPod := range config.EndpointPods { config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -47,7 +47,7 @@ var _ = Describe("[sig-network] Networking", func() { Description: Try to hit test endpoints from a test container using udp and make sure each of them can report a unique hostname. */ - framework.ConformanceIt("should function for intra-pod communication: udp ", func() { + framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() { config := framework.NewCoreNetworkingTestConfig(f) for _, endpointPod := range config.EndpointPods { config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -59,7 +59,7 @@ var _ = Describe("[sig-network] Networking", func() { Description: Try to hit test endpoints from the pod and make sure each of them can report a unique hostname. */ - framework.ConformanceIt("should function for node-pod communication: http ", func() { + framework.ConformanceIt("should function for node-pod communication: http [NodeConformance]", func() { config := framework.NewCoreNetworkingTestConfig(f) for _, endpointPod := range config.EndpointPods { config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -71,7 +71,7 @@ var _ = Describe("[sig-network] Networking", func() { Description: Try to hit test endpoints from the pod using udp and make sure each of them can report a unique hostname. */ - framework.ConformanceIt("should function for node-pod communication: udp ", func() { + framework.ConformanceIt("should function for node-pod communication: udp [NodeConformance]", func() { config := framework.NewCoreNetworkingTestConfig(f) for _, endpointPod := range config.EndpointPods { config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 5647033c896..72e24744820 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Pods", func() { Description: Make sure when a pod is created that it is assigned a host IP Address. */ - framework.ConformanceIt("should get a host IP ", func() { + framework.ConformanceIt("should get a host IP [NodeConformance]", func() { name := "pod-hostip-" + string(uuid.NewUUID()) testHostIP(podClient, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Pods", func() { Description: Makes sure a pod is created, a watch can be setup for the pod, pod creation was observed, pod is deleted, and pod deletion is observed. */ - framework.ConformanceIt("should be submitted and removed ", func() { + framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() { By("creating the pod") name := "pod-submit-remove-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("Pods", func() { Testname: pods-updated-successfully Description: Make sure it is possible to successfully update a pod's labels. */ - framework.ConformanceIt("should be updated ", func() { + framework.ConformanceIt("should be updated [NodeConformance]", func() { By("creating the pod") name := "pod-update-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Pods", func() { activeDeadlineSecondsValue, and then waits for the deadline to pass and verifies the pod is terminated. */ - framework.ConformanceIt("should allow activeDeadlineSeconds to be updated ", func() { + framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() { By("creating the pod") name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -381,7 +381,7 @@ var _ = framework.KubeDescribe("Pods", func() { Description: Make sure that when a pod is created it contains environment variables for each active service. */ - framework.ConformanceIt("should contain environment variables for services ", func() { + framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() { // Make a pod that will be a service. // This pod serves its hostname via HTTP. serverName := "server-envvars-" + string(uuid.NewUUID()) diff --git a/test/e2e/common/projected.go b/test/e2e/common/projected.go index 9ad9c39ded4..641132a15bf 100644 --- a/test/e2e/common/projected.go +++ b/test/e2e/common/projected.go @@ -39,7 +39,7 @@ var _ = Describe("[sig-storage] Projected", func() { Testname: projected-secret-no-defaultMode Description: Simple projected Secret test with no defaultMode set. */ - framework.ConformanceIt("should be consumable from pods in volume", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -47,7 +47,7 @@ var _ = Describe("[sig-storage] Projected", func() { Testname: projected-secret-with-defaultMode Description: Simple projected Secret test with defaultMode set. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() { defaultMode := int32(0400) doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -57,7 +57,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Simple projected Secret test as non-root with defaultMode and fsGroup set. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) uid := int64(1000) @@ -70,7 +70,7 @@ var _ = Describe("[sig-storage] Projected", func() { mounting it to a volume with a custom path (mapping) on the pod with no other settings and make sure the pod actually consumes it. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { doProjectedSecretE2EWithMapping(f, nil) }) @@ -79,7 +79,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Repeat the projected-secret-simple-mapped but this time with an item mode (e.g. 0400) for the secret map item. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() { mode := int32(0400) doProjectedSecretE2EWithMapping(f, &mode) }) @@ -110,7 +110,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Make sure secrets works when mounted as two different volumes on the same node. */ - framework.ConformanceIt("should be consumable in multiple volumes in a pod", func() { + framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() { // This test ensures that the same secret can be mounted in multiple // volumes in the same pod. This test case exists to prevent // regressions that break this use-case. @@ -203,7 +203,7 @@ var _ = Describe("[sig-storage] Projected", func() { Testname: projected-secret-simple-optional Description: Make sure secrets works when optional updates included. */ - framework.ConformanceIt("optional updates should be reflected in volume", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -405,7 +405,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Make sure that a projected volume with a configMap with no mappings succeeds properly. */ - framework.ConformanceIt("should be consumable from pods in volume", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil) }) @@ -414,7 +414,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Make sure that a projected volume configMap is consumable with defaultMode set. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() { defaultMode := int32(0400) doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode) }) @@ -429,7 +429,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Make sure that a projected volume configMap is consumable by a non-root userID. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() { doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil) }) @@ -443,7 +443,7 @@ var _ = Describe("[sig-storage] Projected", func() { map and mounting it to a volume with a custom path (mapping) on the pod with no other settings and make sure the pod actually consumes it. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { doProjectedConfigMapE2EWithMappings(f, 0, 0, nil) }) @@ -452,7 +452,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Repeat the projected-secret-simple-mapped but this time with an item mode (e.g. 0400) for the secret map item */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() { mode := int32(0400) doProjectedConfigMapE2EWithMappings(f, 0, 0, &mode) }) @@ -462,7 +462,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Repeat the projected-config-map-simple-mapped but this time with a user other than root. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() { doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil) }) @@ -476,7 +476,7 @@ var _ = Describe("[sig-storage] Projected", func() { that the values in these configMaps can be updated, deleted, and created. */ - framework.ConformanceIt("updates should be reflected in volume", func() { + framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -565,7 +565,7 @@ var _ = Describe("[sig-storage] Projected", func() { configMaps, that the values in these configMaps can be updated, deleted, and created. */ - framework.ConformanceIt("optional updates should be reflected in volume", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -766,7 +766,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Make sure config map works when it mounted as two different volumes on the same node. */ - framework.ConformanceIt("should be consumable in multiple volumes in the same pod", func() { + framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() { var ( name = "projected-configmap-test-volume-" + string(uuid.NewUUID()) volumeName = "projected-configmap-volume" @@ -864,7 +864,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Ensure that downward API can provide pod's name through DownwardAPIVolumeFiles in a projected volume. */ - framework.ConformanceIt("should provide podname only", func() { + framework.ConformanceIt("should provide podname only [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") @@ -879,7 +879,7 @@ var _ = Describe("[sig-storage] Projected", func() { mode for DownwardAPIVolumeFiles if no mode is specified in a projected volume. */ - framework.ConformanceIt("should set DefaultMode on files", func() { + framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) defaultMode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) @@ -894,7 +894,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Ensure that downward API can set file permission mode for DownwardAPIVolumeFiles in a projected volume. */ - framework.ConformanceIt("should set mode on item file", func() { + framework.ConformanceIt("should set mode on item file [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) mode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) @@ -939,7 +939,7 @@ var _ = Describe("[sig-storage] Projected", func() { DownwardAPIVolumeFiles when pod's labels get modified in a projected volume. */ - framework.ConformanceIt("should update labels on modification", func() { + framework.ConformanceIt("should update labels on modification [NodeConformance]", func() { labels := map[string]string{} labels["key1"] = "value1" labels["key2"] = "value2" @@ -972,7 +972,7 @@ var _ = Describe("[sig-storage] Projected", func() { DownwardAPIVolumeFiles when pod's annotations get modified in a projected volume. */ - framework.ConformanceIt("should update annotations on modification", func() { + framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() { annotations := map[string]string{} annotations["builder"] = "bar" podName := "annotationupdate" + string(uuid.NewUUID()) @@ -1006,7 +1006,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Ensure that downward API can provide container's CPU limit through DownwardAPIVolumeFiles in a projected volume. */ - framework.ConformanceIt("should provide container's cpu limit", func() { + framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -1020,7 +1020,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Ensure that downward API can provide container's memory limit through DownwardAPIVolumeFiles in a projected volume. */ - framework.ConformanceIt("should provide container's memory limit", func() { + framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") @@ -1034,7 +1034,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Ensure that downward API can provide container's CPU request through DownwardAPIVolumeFiles in a projected volume. */ - framework.ConformanceIt("should provide container's cpu request", func() { + framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") @@ -1048,7 +1048,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: Ensure that downward API can provide container's memory request through DownwardAPIVolumeFiles in a projected volume. */ - framework.ConformanceIt("should provide container's memory request", func() { + framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") @@ -1063,7 +1063,7 @@ var _ = Describe("[sig-storage] Projected", func() { allocatable value for CPU through DownwardAPIVolumeFiles if CPU limit is not specified for a container in a projected volume. */ - framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set", func() { + framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -1076,7 +1076,7 @@ var _ = Describe("[sig-storage] Projected", func() { allocatable value for memory through DownwardAPIVolumeFiles if memory limit is not specified for a container in a projected volume. */ - framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set", func() { + framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") @@ -1089,7 +1089,7 @@ var _ = Describe("[sig-storage] Projected", func() { Description: This test projects a secret and configmap into the same directory to ensure projection is working as intended. */ - framework.ConformanceIt("should project all components that make up the projection API [Projection]", func() { + framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() { var err error podName := "projected-volume-" + string(uuid.NewUUID()) secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID()) diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index 895e0bcdfae..0bb329ddd6f 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -35,7 +35,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() { Description: Ensure that secret can be consumed via environment variables. */ - framework.ConformanceIt("should be consumable from pods in env vars ", func() { + framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() { name := "secret-test-" + string(uuid.NewUUID()) secret := secretForTest(f.Namespace.Name, name) @@ -84,7 +84,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() { Description: Ensure that secret can be consumed via source of a set of ConfigMaps. */ - framework.ConformanceIt("should be consumable via the environment ", func() { + framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() { name := "secret-test-" + string(uuid.NewUUID()) secret := newEnvFromSecret(f.Namespace.Name, name) By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index 1a37048702a..e9f7cfe59fd 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -38,7 +38,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Description: Ensure that secret can be mounted without mapping to a pod volume. */ - framework.ConformanceIt("should be consumable from pods in volume ", func() { + framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() { doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -47,7 +47,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Description: Ensure that secret can be mounted without mapping to a pod volume in default mode. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() { defaultMode := int32(0400) doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -57,7 +57,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Description: Ensure that secret can be mounted without mapping to a pod volume as non-root in default mode with fsGroup set. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set ", func() { + framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) uid := int64(1000) @@ -69,7 +69,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Description: Ensure that secret can be mounted with mapping to a pod volume. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() { doSecretE2EWithMapping(f, nil) }) @@ -78,7 +78,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Description: Ensure that secret can be mounted with mapping to a pod volume in item mode. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set ", func() { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() { mode := int32(0400) doSecretE2EWithMapping(f, &mode) }) @@ -108,7 +108,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Testname: secret-multiple-volume-mounts Description: Ensure that secret can be mounted to multiple pod volumes. */ - framework.ConformanceIt("should be consumable in multiple volumes in a pod ", func() { + framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() { // This test ensures that the same secret can be mounted in multiple // volumes in the same pod. This test case exists to prevent // regressions that break this use-case. @@ -186,7 +186,7 @@ var _ = Describe("[sig-storage] Secrets", func() { Description: Ensure that optional update change to secret can be reflected on a mounted volume. */ - framework.ConformanceIt("optional updates should be reflected in volume ", func() { + framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true From a1bba62202cbd8714e404f169f9cacff1cf95c9d Mon Sep 17 00:00:00 2001 From: Cao Shufeng Date: Sun, 20 May 2018 13:26:53 +0800 Subject: [PATCH 078/307] fix kubectl get --show-kind --- pkg/kubectl/cmd/get/get.go | 7 +-- pkg/kubectl/cmd/get/get_flags.go | 9 +++- pkg/kubectl/cmd/get/get_test.go | 54 ++++++++++++++++++++++ pkg/kubectl/cmd/get/humanreadable_flags.go | 13 +++--- 4 files changed, 72 insertions(+), 11 deletions(-) diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index 40bebe837f5..ad6554c0ef8 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -219,6 +219,9 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri o.IncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, false) + if resource.MultipleTypesRequested(args) { + o.PrintFlags.EnsureWithKind() + } o.ToPrinter = func(mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinterFunc, error) { // make a new copy of current flags / opts before mutating printFlags := o.PrintFlags.Copy() @@ -229,9 +232,7 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri printFlags.UseOpenAPIColumns(apiSchema, mapping) } } - if resource.MultipleTypesRequested(args) { - printFlags.EnsureWithKind(mapping.GroupVersionKind.GroupKind()) - } + printFlags.SetKind(mapping.GroupVersionKind.GroupKind()) } if withNamespace { printFlags.EnsureWithNamespace() diff --git a/pkg/kubectl/cmd/get/get_flags.go b/pkg/kubectl/cmd/get/get_flags.go index 4540100b121..e36321c8c13 100644 --- a/pkg/kubectl/cmd/get/get_flags.go +++ b/pkg/kubectl/cmd/get/get_flags.go @@ -41,6 +41,11 @@ type PrintFlags struct { OutputFormat *string } +// SetKind sets the Kind option of humanreadable flags +func (f *PrintFlags) SetKind(kind schema.GroupKind) { + f.HumanReadableFlags.SetKind(kind) +} + // EnsureWithNamespace ensures that humanreadable flags return // a printer capable of printing with a "namespace" column. func (f *PrintFlags) EnsureWithNamespace() error { @@ -49,8 +54,8 @@ func (f *PrintFlags) EnsureWithNamespace() error { // EnsureWithKind ensures that humanreadable flags return // a printer capable of including resource kinds. -func (f *PrintFlags) EnsureWithKind(kind schema.GroupKind) error { - return f.HumanReadableFlags.EnsureWithKind(kind) +func (f *PrintFlags) EnsureWithKind() error { + return f.HumanReadableFlags.EnsureWithKind() } // Copy returns a copy of PrintFlags for mutation diff --git a/pkg/kubectl/cmd/get/get_test.go b/pkg/kubectl/cmd/get/get_test.go index e094463cdd9..c27b61d23a9 100644 --- a/pkg/kubectl/cmd/get/get_test.go +++ b/pkg/kubectl/cmd/get/get_test.go @@ -329,6 +329,60 @@ foo 0/0 0 } } +func TestGetObjectsShowKind(t *testing.T) { + pods, _, _ := testData() + + tf := cmdtesting.NewTestFactory() + defer tf.Cleanup() + codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) + + tf.UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, + } + tf.Namespace = "test" + + streams, _, buf, _ := genericclioptions.NewTestIOStreams() + cmd := NewCmdGet("kubectl", tf, streams) + cmd.SetOutput(buf) + cmd.Flags().Set("show-kind", "true") + cmd.Run(cmd, []string{"pods", "foo"}) + + expected := `NAME READY STATUS RESTARTS AGE +pod/foo 0/0 0 +` + if e, a := expected, buf.String(); e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + +func TestGetObjectsShowLabels(t *testing.T) { + pods, _, _ := testData() + + tf := cmdtesting.NewTestFactory() + defer tf.Cleanup() + codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) + + tf.UnstructuredClient = &fake.RESTClient{ + NegotiatedSerializer: unstructuredSerializer, + Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, + } + tf.Namespace = "test" + + streams, _, buf, _ := genericclioptions.NewTestIOStreams() + cmd := NewCmdGet("kubectl", tf, streams) + cmd.SetOutput(buf) + cmd.Flags().Set("show-labels", "true") + cmd.Run(cmd, []string{"pods", "foo"}) + + expected := `NAME READY STATUS RESTARTS AGE LABELS +foo 0/0 0 +` + if e, a := expected, buf.String(); e != a { + t.Errorf("expected %v, got %v", e, a) + } +} + func TestGetObjectIgnoreNotFound(t *testing.T) { initTestErrorHandler(t) diff --git a/pkg/kubectl/cmd/get/humanreadable_flags.go b/pkg/kubectl/cmd/get/humanreadable_flags.go index 5a185b84524..43c303a5f92 100644 --- a/pkg/kubectl/cmd/get/humanreadable_flags.go +++ b/pkg/kubectl/cmd/get/humanreadable_flags.go @@ -43,13 +43,14 @@ type HumanPrintFlags struct { WithNamespace bool } -// EnsureWithKind sets the provided GroupKind humanreadable value. -// If the kind received is non-empty, the "showKind" humanreadable -// printer option is set to true. -func (f *HumanPrintFlags) EnsureWithKind(kind schema.GroupKind) error { - showKind := !kind.Empty() - +// SetKind sets the Kind option +func (f *HumanPrintFlags) SetKind(kind schema.GroupKind) { f.Kind = kind +} + +// EnsureWithKind sets the "Showkind" humanreadable option to true. +func (f *HumanPrintFlags) EnsureWithKind() error { + showKind := true f.ShowKind = &showKind return nil } From 7026f23cd2024372b055ba95996a8d2a273b6d0d Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Tue, 22 May 2018 10:49:42 +0800 Subject: [PATCH 079/307] convert Duration into seconds by go library function --- pkg/kubectl/cmd/logs.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/kubectl/cmd/logs.go b/pkg/kubectl/cmd/logs.go index e81db051c3e..71eb390c6a9 100644 --- a/pkg/kubectl/cmd/logs.go +++ b/pkg/kubectl/cmd/logs.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "math" "os" "time" @@ -184,7 +183,7 @@ func (o *LogsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []str } if sinceSeconds := cmdutil.GetFlagDuration(cmd, "since"); sinceSeconds != 0 { // round up to the nearest second - sec := int64(math.Ceil(float64(sinceSeconds) / float64(time.Second))) + sec := int64(sinceSeconds.Round(time.Second).Seconds()) logOptions.SinceSeconds = &sec } o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) From 2f7941dd08077dc5be46e2249acaf3842c0b6067 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Mon, 21 May 2018 09:42:10 +0800 Subject: [PATCH 080/307] load kernel modules required by IPVS in kubeadm --- cmd/kubeadm/.import-restrictions | 1 + cmd/kubeadm/app/preflight/checks.go | 9 ++ pkg/util/ipvs/ipvs.go | 11 ++ pkg/util/ipvs/kernelcheck_linux.go | 94 ++++++++++++++++ pkg/util/ipvs/kernelcheck_linux_test.go | 130 +++++++++++++++++++++++ pkg/util/ipvs/kernelcheck_unsupported.go | 39 +++++++ 6 files changed, 284 insertions(+) create mode 100644 pkg/util/ipvs/kernelcheck_linux.go create mode 100644 pkg/util/ipvs/kernelcheck_linux_test.go create mode 100644 pkg/util/ipvs/kernelcheck_unsupported.go diff --git a/cmd/kubeadm/.import-restrictions b/cmd/kubeadm/.import-restrictions index b62e156460b..3b77e68e8e2 100644 --- a/cmd/kubeadm/.import-restrictions +++ b/cmd/kubeadm/.import-restrictions @@ -140,6 +140,7 @@ "k8s.io/kubernetes/pkg/util/slice", "k8s.io/kubernetes/pkg/util/taints", "k8s.io/kubernetes/pkg/util/version", + "k8s.io/kubernetes/pkg/util/ipvs", "k8s.io/kubernetes/pkg/version", "k8s.io/kubernetes/pkg/volume", "k8s.io/kubernetes/pkg/volume/util" diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index a7e7312c363..2519b041676 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -50,6 +50,7 @@ import ( authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/initsystem" + ipvsutil "k8s.io/kubernetes/pkg/util/ipvs" "k8s.io/kubernetes/pkg/util/procfs" versionutil "k8s.io/kubernetes/pkg/util/version" kubeadmversion "k8s.io/kubernetes/pkg/version" @@ -867,6 +868,13 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi } checks = addCommonChecks(execer, cfg, checks) + // Check ipvs required kernel module once we use ipvs kube-proxy mode + if cfg.KubeProxy.Config.Mode == ipvsutil.IPVSProxyMode { + checks = append(checks, + ipvsutil.RequiredIPVSKernelModulesAvailableCheck{Executor: execer}, + ) + } + if len(cfg.Etcd.Endpoints) == 0 { // Only do etcd related checks when no external endpoints were specified checks = append(checks, @@ -922,6 +930,7 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura FileAvailableCheck{Path: cfg.CACertPath}, FileAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)}, FileAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletBootstrapKubeConfigFileName)}, + ipvsutil.RequiredIPVSKernelModulesAvailableCheck{Executor: execer}, } checks = addCommonChecks(execer, cfg, checks) diff --git a/pkg/util/ipvs/ipvs.go b/pkg/util/ipvs/ipvs.go index 9d15beb0c63..58e76a56c90 100644 --- a/pkg/util/ipvs/ipvs.go +++ b/pkg/util/ipvs/ipvs.go @@ -61,8 +61,19 @@ const ( FlagPersistent = 0x1 // FlagHashed specify IPVS service hash flag FlagHashed = 0x2 + // IPVSProxyMode is match set up cluster with ipvs proxy model + IPVSProxyMode = "ipvs" ) +// Sets of IPVS required kernel modules. +var ipvsModules = []string{ + "ip_vs", + "ip_vs_rr", + "ip_vs_wrr", + "ip_vs_sh", + "nf_conntrack_ipv4", +} + // Equal check the equality of virtual server. // We don't use struct == since it doesn't work because of slice. func (svc *VirtualServer) Equal(other *VirtualServer) bool { diff --git a/pkg/util/ipvs/kernelcheck_linux.go b/pkg/util/ipvs/kernelcheck_linux.go new file mode 100644 index 00000000000..83c3e6b0fe9 --- /dev/null +++ b/pkg/util/ipvs/kernelcheck_linux.go @@ -0,0 +1,94 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "fmt" + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + utilsexec "k8s.io/utils/exec" + + "github.com/golang/glog" +) + +// RequiredIPVSKernelModulesAvailableCheck tests IPVS required kernel modules. +type RequiredIPVSKernelModulesAvailableCheck struct { + Executor utilsexec.Interface +} + +// Name returns label for RequiredIPVSKernelModulesAvailableCheck +func (r RequiredIPVSKernelModulesAvailableCheck) Name() string { + return "RequiredIPVSKernelModulesAvailable" +} + +// Check try to validates IPVS required kernel modules exists or not. +// The name of function can not be changed. +func (r RequiredIPVSKernelModulesAvailableCheck) Check() (warnings, errors []error) { + glog.V(1).Infoln("validating the kernel module IPVS required exists in machine or not") + + // Find out loaded kernel modules + out, err := r.Executor.Command("cut", "-f1", "-d", " ", "/proc/modules").CombinedOutput() + if err != nil { + errors = append(errors, fmt.Errorf("error getting installed ipvs required kernel modules: %v(%s)", err, out)) + return nil, errors + } + mods := strings.Split(string(out), "\n") + + wantModules := sets.NewString() + loadModules := sets.NewString() + wantModules.Insert(ipvsModules...) + loadModules.Insert(mods...) + modules := wantModules.Difference(loadModules).UnsortedList() + + // Check builtin modules exist or not + if len(modules) != 0 { + kernelVersionFile := "/proc/sys/kernel/osrelease" + b, err := r.Executor.Command("cut", "-f1", "-d", " ", kernelVersionFile).CombinedOutput() + if err != nil { + errors = append(errors, fmt.Errorf("error getting os release kernel version: %v(%s)", err, out)) + return nil, errors + } + + kernelVersion := strings.TrimSpace(string(b)) + builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersion) + out, err := r.Executor.Command("cut", "-f1", "-d", " ", builtinModsFilePath).CombinedOutput() + if err != nil { + errors = append(errors, fmt.Errorf("error getting required builtin kernel modules: %v(%s)", err, out)) + return nil, errors + } + + builtInModules := sets.NewString() + for _, builtInMode := range ipvsModules { + match, _ := regexp.Match(builtInMode+".ko", out) + if !match { + builtInModules.Insert(string(builtInMode)) + } + } + if len(builtInModules) != 0 { + warnings = append(warnings, fmt.Errorf( + "the IPVS proxier will not be used, because the following required kernel modules are not loaded: %v or no builtin kernel ipvs support: %v\n"+ + "you can solve this problem with following methods:\n 1. Run 'modprobe -- ' to load missing kernel modules;\n"+ + "2. Provide the missing builtin kernel ipvs support\n", modules, builtInModules)) + } + } + + return warnings, errors +} diff --git a/pkg/util/ipvs/kernelcheck_linux_test.go b/pkg/util/ipvs/kernelcheck_linux_test.go new file mode 100644 index 00000000000..1f87a923901 --- /dev/null +++ b/pkg/util/ipvs/kernelcheck_linux_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "testing" + + utilsexec "k8s.io/utils/exec" + fakeexec "k8s.io/utils/exec/testing" +) + +func TestRequiredIPVSKernelModulesAvailableCheck(t *testing.T) { + cases := []struct { + caseName string + + loadedKernel string + kernelVersion string + builtinKernel string + + expectErrors bool + expectWarnings bool + }{ + { + caseName: "no installed kernel modules and no builtin kernel modules", + loadedKernel: "", + kernelVersion: "3.13.0-24-generic", + builtinKernel: "", + expectErrors: false, + expectWarnings: true, + }, + { + caseName: "no installed kernel modules and missing builtin kernel modules", + loadedKernel: "", + kernelVersion: "3.13.0-24-generic", + builtinKernel: "kernel/net/netfilter/ipvs/ip_vs.ko\n" + + "kernel/net/ipv4/netfilter/nf_conntrack_ipv4.ko", + expectErrors: false, + expectWarnings: true, + }, + { + caseName: "no installed kernel modules and own all builtin kernel modules", + loadedKernel: "", + kernelVersion: "3.13.0-24-generic", + builtinKernel: "kernel/net/netfilter/ipvs/ip_vs.ko\n" + + "kernel/net/netfilter/ipvs/ip_vs_rr.ko\n" + + "kernel/net/netfilter/ipvs/ip_vs_wrr.ko\n" + + "kernel/net/netfilter/ipvs/ip_vs_sh.ko\n" + + "kernel/net/ipv4/netfilter/nf_conntrack_ipv4.ko", + expectErrors: false, + expectWarnings: false, + }, + { + caseName: "missing installed kernel modules and no builtin kernel modules", + loadedKernel: "ip_vs", + kernelVersion: "3.13.0-24-generic", + builtinKernel: "", + expectErrors: false, + expectWarnings: true, + }, + { + caseName: "own all installed kernel modules and no builtin kernel modules", + loadedKernel: "ip_vs\n" + "ip_vs_wrr\n" + "nf_conntrack_ipv4\n" + + "ip_vs_rr\n" + "ip_vs_sh", + kernelVersion: "3.13.0-24-generic", + builtinKernel: "", + expectErrors: false, + expectWarnings: false, + }, + { + caseName: "own all installed kernel modules and all builtin kernel modules", + loadedKernel: "ip_vs\n" + "ip_vs_wrr\n" + "nf_conntrack_ipv4\n" + "ip_vs_rr\n" + "ip_vs_sh", + kernelVersion: "3.13.0-24-generic", + builtinKernel: "kernel/net/netfilter/ipvs/ip_vs.ko\n" + + "kernel/net/netfilter/ipvs/ip_vs_rr.ko\n" + + "kernel/net/netfilter/ipvs/ip_vs_wrr.ko\n" + + "kernel/net/netfilter/ipvs/ip_vs_sh.ko\n" + + "kernel/net/ipv4/netfilter/nf_conntrack_ipv4.ko", + expectErrors: false, + expectWarnings: false, + }, + } + + for i, tc := range cases { + fcmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{ + func() ([]byte, error) { return []byte(cases[i].loadedKernel), nil }, + func() ([]byte, error) { return []byte(cases[i].kernelVersion), nil }, + func() ([]byte, error) { return []byte(cases[i].builtinKernel), nil }, + }, + } + + fexec := fakeexec.FakeExec{ + CommandScript: []fakeexec.FakeCommandAction{ + func(cmd string, args ...string) utilsexec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) utilsexec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + func(cmd string, args ...string) utilsexec.Cmd { return fakeexec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + + check := RequiredIPVSKernelModulesAvailableCheck{ + Executor: &fexec, + } + warnings, errors := check.Check() + + switch { + case warnings != nil && !tc.expectWarnings: + t.Errorf("RequiredIPVSKernelModulesAvailableCheck: unexpected warnings for installed kernel modules %v and builtin kernel modules %v. Warnings: %v", tc.loadedKernel, tc.builtinKernel, warnings) + case warnings == nil && tc.expectWarnings: + t.Errorf("RequiredIPVSKernelModulesAvailableCheck: expected warnings for installed kernel modules %v and builtin kernel modules %v but got nothing", tc.loadedKernel, tc.builtinKernel) + case errors != nil && !tc.expectErrors: + t.Errorf("RequiredIPVSKernelModulesAvailableCheck: unexpected errors for installed kernel modules %v and builtin kernel modules %v. errors: %v", tc.loadedKernel, tc.builtinKernel, errors) + case errors == nil && tc.expectErrors: + t.Errorf("RequiredIPVSKernelModulesAvailableCheck: expected errors for installed kernel modules %v and builtin kernel modules %v but got nothing", tc.loadedKernel, tc.builtinKernel) + } + } +} diff --git a/pkg/util/ipvs/kernelcheck_unsupported.go b/pkg/util/ipvs/kernelcheck_unsupported.go new file mode 100644 index 00000000000..6247bff8513 --- /dev/null +++ b/pkg/util/ipvs/kernelcheck_unsupported.go @@ -0,0 +1,39 @@ +// +build !linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + utilsexec "k8s.io/utils/exec" +) + +// RequiredIPVSKernelModulesAvailableCheck tests IPVS required kernel modules. +type RequiredIPVSKernelModulesAvailableCheck struct { + Executor utilsexec.Interface +} + +// Name returns label for RequiredIPVSKernelModulesAvailableCheck +func (r RequiredIPVSKernelModulesAvailableCheck) Name() string { + return "RequiredIPVSKernelModulesAvailable" +} + +// Check try to validates IPVS required kernel modules exists or not. +func (r RequiredIPVSKernelModulesAvailableCheck) Check() (warnings, errors []error) { + + return nil, nil +} From 5f4025c0291b39409ab39e3a69d9936c635610eb Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Mon, 21 May 2018 09:46:34 +0800 Subject: [PATCH 081/307] auto generated file --- cmd/kubeadm/app/preflight/BUILD | 1 + pkg/util/ipvs/BUILD | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index 35717029fd7..2f8982bde4b 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -56,6 +56,7 @@ go_library( "//pkg/kubeapiserver/authorizer/modes:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/initsystem:go_default_library", + "//pkg/util/ipvs:go_default_library", "//pkg/util/procfs:go_default_library", "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", diff --git a/pkg/util/ipvs/BUILD b/pkg/util/ipvs/BUILD index 90ddf07b0d1..5a85d7448e1 100644 --- a/pkg/util/ipvs/BUILD +++ b/pkg/util/ipvs/BUILD @@ -13,6 +13,7 @@ go_test( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "ipvs_linux_test.go", + "kernelcheck_linux_test.go", ], "//conditions:default": [], }), @@ -20,6 +21,8 @@ go_test( deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//vendor/k8s.io/utils/exec:go_default_library", + "//vendor/k8s.io/utils/exec/testing:go_default_library", ], "//conditions:default": [], }), @@ -32,36 +35,47 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:android": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:darwin": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:dragonfly": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:linux": [ "ipvs_linux.go", + "kernelcheck_linux.go", ], "@io_bazel_rules_go//go/platform:nacl": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:netbsd": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:plan9": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:solaris": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "@io_bazel_rules_go//go/platform:windows": [ "ipvs_unsupported.go", + "kernelcheck_unsupported.go", ], "//conditions:default": [], }), @@ -82,6 +96,7 @@ go_library( "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ From 46d8cf23ef8142c2f1240e18d675a33479fc88d5 Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Tue, 22 May 2018 11:34:50 +0800 Subject: [PATCH 082/307] kubectl use its own logs --- cmd/kubectl/BUILD | 2 +- cmd/kubectl/kubectl.go | 2 +- pkg/kubectl/util/logs/logs.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/kubectl/BUILD b/cmd/kubectl/BUILD index 7b4fdf632e1..01be15a7637 100644 --- a/cmd/kubectl/BUILD +++ b/cmd/kubectl/BUILD @@ -20,9 +20,9 @@ go_library( visibility = ["//visibility:private"], deps = [ "//pkg/kubectl/cmd:go_default_library", + "//pkg/kubectl/util/logs:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", "//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library", ], ) diff --git a/cmd/kubectl/kubectl.go b/cmd/kubectl/kubectl.go index 5ca950ed4b9..b568c7fa630 100644 --- a/cmd/kubectl/kubectl.go +++ b/cmd/kubectl/kubectl.go @@ -26,8 +26,8 @@ import ( "github.com/spf13/pflag" utilflag "k8s.io/apiserver/pkg/util/flag" - "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/pkg/kubectl/cmd" + "k8s.io/kubernetes/pkg/kubectl/util/logs" // Import to initialize client auth plugins. _ "k8s.io/client-go/plugin/pkg/client/auth" diff --git a/pkg/kubectl/util/logs/logs.go b/pkg/kubectl/util/logs/logs.go index 392bbc0fbad..eae42f795cd 100644 --- a/pkg/kubectl/util/logs/logs.go +++ b/pkg/kubectl/util/logs/logs.go @@ -46,7 +46,7 @@ func (writer GlogWriter) Write(data []byte) (n int, err error) { func InitLogs() { log.SetOutput(GlogWriter{}) log.SetFlags(0) - // The default glog flush interval is 30 seconds, which is frighteningly long. + // The default glog flush interval is 5 seconds. go wait.Until(glog.Flush, *logFlushFreq, wait.NeverStop) } From 39bb841827ff8c5b74113bfabc912990cab11cc3 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 21:28:42 -0700 Subject: [PATCH 083/307] test/e2e/common: Add NodeFeature or NodeConformance tags The following tests are not tagged because they are not really run in node e2e suites: * A subset of host path tests that required SSH access - should evaluate whether the tests should be moved to test/e2e_node * GCP volume tests - should evaluate whether these tests should be moved out of the common directly since they are not shared with the node e2e suite. --- test/e2e/common/configmap_volume.go | 8 ++++---- test/e2e/common/downward_api.go | 2 +- test/e2e/common/downwardapi_volume.go | 4 ++-- test/e2e/common/empty_dir.go | 2 +- test/e2e/common/host_path.go | 4 ++-- test/e2e/common/init_container.go | 2 +- test/e2e/common/pods.go | 8 ++++---- test/e2e/common/privileged.go | 2 +- test/e2e/common/projected.go | 12 ++++++------ test/e2e/common/secrets_volume.go | 2 +- test/e2e/common/sysctl.go | 2 +- 11 files changed, 24 insertions(+), 24 deletions(-) diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index d0c09996df6..4a6e170c866 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -51,7 +51,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode) }) - It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set", func() { + It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ doConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode) }) @@ -65,7 +65,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { doConfigMapE2EWithoutMappings(f, 1000, 0, nil) }) - It("should be consumable from pods in volume as non-root with FSGroup", func() { + It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() { doConfigMapE2EWithoutMappings(f, 1000, 1001, nil) }) @@ -97,7 +97,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { doConfigMapE2EWithMappings(f, 1000, 0, nil) }) - It("should be consumable from pods in volume with mappings as non-root with FSGroup", func() { + It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() { doConfigMapE2EWithMappings(f, 1000, 1001, nil) }) @@ -184,7 +184,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2")) }) - It("binary data should be reflected in volume ", func() { + It("binary data should be reflected in volume [NodeConformance]", func() { podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go index 8c3d0618b32..0541dc394ce 100644 --- a/test/e2e/common/downward_api.go +++ b/test/e2e/common/downward_api.go @@ -240,7 +240,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() { }) }) -var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:EphemeralStorage]", func() { f := framework.NewDefaultFramework("downward-api") Context("Downward API tests for local ephemeral storage", func() { diff --git a/test/e2e/common/downwardapi_volume.go b/test/e2e/common/downwardapi_volume.go index a10eb941e74..7914a1970ba 100644 --- a/test/e2e/common/downwardapi_volume.go +++ b/test/e2e/common/downwardapi_volume.go @@ -83,7 +83,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { }) }) - It("should provide podname as non-root with fsgroup", func() { + It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() { podName := "metadata-volume-" + string(uuid.NewUUID()) uid := int64(1001) gid := int64(1234) @@ -97,7 +97,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { }) }) - It("should provide podname as non-root with fsgroup and defaultMode", func() { + It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() { podName := "metadata-volume-" + string(uuid.NewUUID()) uid := int64(1001) gid := int64(1234) diff --git a/test/e2e/common/empty_dir.go b/test/e2e/common/empty_dir.go index d428e9bbe0e..a587f35f43d 100644 --- a/test/e2e/common/empty_dir.go +++ b/test/e2e/common/empty_dir.go @@ -40,7 +40,7 @@ var ( var _ = Describe("[sig-storage] EmptyDir volumes", func() { f := framework.NewDefaultFramework("emptydir") - Context("when FSGroup is specified", func() { + Context("when FSGroup is specified [NodeFeature:FSGroup]", func() { It("new files should be created with FSGroup ownership when container is root", func() { doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory) }) diff --git a/test/e2e/common/host_path.go b/test/e2e/common/host_path.go index 478b28c387a..dd21c953855 100644 --- a/test/e2e/common/host_path.go +++ b/test/e2e/common/host_path.go @@ -61,7 +61,7 @@ var _ = Describe("[sig-storage] HostPath", func() { }) // This test requires mounting a folder into a container with write privileges. - It("should support r/w", func() { + It("should support r/w [NodeConformance]", func() { filePath := path.Join(volumePath, "test-file") retryDuration := 180 source := &v1.HostPathVolumeSource{ @@ -85,7 +85,7 @@ var _ = Describe("[sig-storage] HostPath", func() { }) }) - It("should support subPath", func() { + It("should support subPath [NodeConformance]", func() { subPath := "sub-path" fileName := "test-file" retryDuration := 180 diff --git a/test/e2e/common/init_container.go b/test/e2e/common/init_container.go index 0e82dfea385..ad32cc8414a 100644 --- a/test/e2e/common/init_container.go +++ b/test/e2e/common/init_container.go @@ -35,7 +35,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("InitContainer", func() { +var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() { f := framework.NewDefaultFramework("init-container") var podClient *framework.PodClient BeforeEach(func() { diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 72e24744820..2fa6fbc4204 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -467,7 +467,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, maxRetries, "Container should have service environment variables set") }) - It("should support remote command execution over websockets", func() { + It("should support remote command execution over websockets [NodeConformance]", func() { config, err := framework.LoadConfig() Expect(err).NotTo(HaveOccurred(), "unable to get base config") @@ -543,7 +543,7 @@ var _ = framework.KubeDescribe("Pods", func() { }, time.Minute, 10*time.Second).Should(BeNil()) }) - It("should support retrieving logs from the container over websockets", func() { + It("should support retrieving logs from the container over websockets [NodeConformance]", func() { config, err := framework.LoadConfig() Expect(err).NotTo(HaveOccurred(), "unable to get base config") @@ -600,7 +600,7 @@ var _ = framework.KubeDescribe("Pods", func() { } }) - It("should have their auto-restart back-off timer reset on image update [Slow]", func() { + It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() { podName := "pod-back-off-image" containerName := "back-off" pod := &v1.Pod{ @@ -641,7 +641,7 @@ var _ = framework.KubeDescribe("Pods", func() { }) // Slow issue #19027 (20 mins) - It("should cap back-off at MaxContainerBackOff [Slow]", func() { + It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() { podName := "back-off-cap" containerName := "back-off-cap" pod := &v1.Pod{ diff --git a/test/e2e/common/privileged.go b/test/e2e/common/privileged.go index a56b0627350..76757a90a97 100644 --- a/test/e2e/common/privileged.go +++ b/test/e2e/common/privileged.go @@ -36,7 +36,7 @@ type PrivilegedPodTestConfig struct { pod *v1.Pod } -var _ = framework.KubeDescribe("PrivilegedPod", func() { +var _ = framework.KubeDescribe("PrivilegedPod [NodeConformance]", func() { config := &PrivilegedPodTestConfig{ f: framework.NewDefaultFramework("e2e-privileged-pod"), privilegedPod: "privileged-pod", diff --git a/test/e2e/common/projected.go b/test/e2e/common/projected.go index 641132a15bf..6a3e735b5e8 100644 --- a/test/e2e/common/projected.go +++ b/test/e2e/common/projected.go @@ -84,7 +84,7 @@ var _ = Describe("[sig-storage] Projected", func() { doProjectedSecretE2EWithMapping(f, &mode) }) - It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() { + It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() { var ( namespace2 *v1.Namespace err error @@ -419,7 +419,7 @@ var _ = Describe("[sig-storage] Projected", func() { doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode) }) - It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set", func() { + It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode) }) @@ -433,7 +433,7 @@ var _ = Describe("[sig-storage] Projected", func() { doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil) }) - It("should be consumable from pods in volume as non-root with FSGroup", func() { + It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() { doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, nil) }) @@ -466,7 +466,7 @@ var _ = Describe("[sig-storage] Projected", func() { doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil) }) - It("should be consumable from pods in volume with mappings as non-root with FSGroup", func() { + It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() { doProjectedConfigMapE2EWithMappings(f, 1000, 1001, nil) }) @@ -904,7 +904,7 @@ var _ = Describe("[sig-storage] Projected", func() { }) }) - It("should provide podname as non-root with fsgroup", func() { + It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() { podName := "metadata-volume-" + string(uuid.NewUUID()) uid := int64(1001) gid := int64(1234) @@ -918,7 +918,7 @@ var _ = Describe("[sig-storage] Projected", func() { }) }) - It("should provide podname as non-root with fsgroup and defaultMode", func() { + It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() { podName := "metadata-volume-" + string(uuid.NewUUID()) uid := int64(1001) gid := int64(1234) diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index e9f7cfe59fd..a65c6e9dbc9 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -83,7 +83,7 @@ var _ = Describe("[sig-storage] Secrets", func() { doSecretE2EWithMapping(f, &mode) }) - It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() { + It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() { var ( namespace2 *v1.Namespace err error diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index aa05d109ad8..5f07371d02e 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -28,7 +28,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("Sysctls", func() { +var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { f := framework.NewDefaultFramework("sysctl") var podClient *framework.PodClient From 0aa0f3208a769a3c1c14568a7ca889532efcaeef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 22 May 2018 09:12:25 +0300 Subject: [PATCH 084/307] kubeadm: Write kubelet config file to disk and persist in-cluster. Also write runtime environment file and fixup the kubelet phases command --- cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 11 +- .../app/apis/kubeadm/v1alpha1/defaults.go | 36 ++- .../app/apis/kubeadm/v1alpha2/defaults.go | 36 ++- cmd/kubeadm/app/cmd/init.go | 62 +++-- cmd/kubeadm/app/cmd/join.go | 24 +- cmd/kubeadm/app/cmd/phases/kubelet.go | 210 ++++++++-------- cmd/kubeadm/app/cmd/phases/kubelet_test.go | 28 ++- cmd/kubeadm/app/cmd/upgrade/common.go | 18 +- cmd/kubeadm/app/constants/constants.go | 26 +- cmd/kubeadm/app/phases/kubelet/config.go | 187 ++++++++++++++ cmd/kubeadm/app/phases/kubelet/config_test.go | 78 ++++++ cmd/kubeadm/app/phases/kubelet/dynamic.go | 117 +++++++++ .../app/phases/kubelet/dynamic_test.go | 63 +++++ cmd/kubeadm/app/phases/kubelet/flags.go | 78 ++++++ cmd/kubeadm/app/phases/kubelet/kubelet.go | 235 ------------------ .../app/phases/kubelet/kubelet_test.go | 134 ---------- .../app/phases/upgrade/configuration.go | 77 ------ cmd/kubeadm/app/phases/upgrade/postupgrade.go | 6 + cmd/kubeadm/app/preflight/checks.go | 7 +- cmd/kubeadm/app/util/config/cluster.go | 65 +++++ pkg/util/initsystem/initsystem.go | 21 ++ 21 files changed, 885 insertions(+), 634 deletions(-) create mode 100644 cmd/kubeadm/app/phases/kubelet/config.go create mode 100644 cmd/kubeadm/app/phases/kubelet/config_test.go create mode 100644 cmd/kubeadm/app/phases/kubelet/dynamic.go create mode 100644 cmd/kubeadm/app/phases/kubelet/dynamic_test.go create mode 100644 cmd/kubeadm/app/phases/kubelet/flags.go delete mode 100644 cmd/kubeadm/app/phases/kubelet/kubelet.go delete mode 100644 cmd/kubeadm/app/phases/kubelet/kubelet_test.go delete mode 100644 cmd/kubeadm/app/phases/upgrade/configuration.go create mode 100644 cmd/kubeadm/app/util/config/cluster.go diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index a8e8c8ce447..3bd46500aa1 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -68,9 +68,16 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { StaticPodPath: "foo", ClusterDNS: []string{"foo"}, ClusterDomain: "foo", - Authorization: kubeletconfigv1beta1.KubeletAuthorization{Mode: "foo"}, + Authorization: kubeletconfigv1beta1.KubeletAuthorization{ + Mode: "Webhook", + }, Authentication: kubeletconfigv1beta1.KubeletAuthentication{ - X509: kubeletconfigv1beta1.KubeletX509Authentication{ClientCAFile: "foo"}, + X509: kubeletconfigv1beta1.KubeletX509Authentication{ + ClientCAFile: "/etc/kubernetes/pki/ca.crt", + }, + Anonymous: kubeletconfigv1beta1.KubeletAnonymousAuthentication{ + Enabled: utilpointer.BoolPtr(false), + }, }, }, } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index 46d05355064..9b2b499f6cc 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -24,11 +24,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/features" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" kubeproxyscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" + utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) const ( @@ -143,9 +143,7 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { } SetDefaultsEtcdSelfHosted(obj) - if features.Enabled(obj.FeatureGates, features.DynamicKubeletConfig) { - SetDefaults_KubeletConfiguration(obj) - } + SetDefaults_KubeletConfiguration(obj) SetDefaults_ProxyConfiguration(obj) SetDefaults_AuditPolicyConfiguration(obj) } @@ -235,15 +233,31 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { } } if obj.KubeletConfiguration.BaseConfig.ClusterDomain == "" { - obj.KubeletConfiguration.BaseConfig.ClusterDomain = DefaultServiceDNSDomain - } - if obj.KubeletConfiguration.BaseConfig.Authorization.Mode == "" { - obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook - } - if obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile == "" { - obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile = DefaultCACertPath + obj.KubeletConfiguration.BaseConfig.ClusterDomain = obj.Networking.DNSDomain } + // Enforce security-related kubelet options + + // Require all clients to the kubelet API to have client certs signed by the cluster CA + obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile = DefaultCACertPath + obj.KubeletConfiguration.BaseConfig.Authentication.Anonymous.Enabled = utilpointer.BoolPtr(false) + + // On every client request to the kubelet API, execute a webhook (SubjectAccessReview request) to the API server + // and ask it whether the client is authorized to access the kubelet API + obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook + + // Let clients using other authentication methods like ServiceAccount tokens also access the kubelet API + // TODO: Enable in a future PR + // obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) + + // Disable the readonly port of the kubelet, in order to not expose unnecessary information + // TODO: Enable in a future PR + // obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 + + // Serve a /healthz webserver on localhost:10248 that kubeadm can talk to + obj.KubeletConfiguration.BaseConfig.HealthzBindAddress = "127.0.0.1" + obj.KubeletConfiguration.BaseConfig.HealthzPort = utilpointer.Int32Ptr(10248) + scheme, _, _ := kubeletscheme.NewSchemeAndCodecs() if scheme != nil { scheme.Default(obj.KubeletConfiguration.BaseConfig) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go index 266f0033a9b..946d3f2e6e8 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go @@ -23,11 +23,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/cmd/kubeadm/app/features" kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" kubeproxyscheme "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme" kubeproxyconfigv1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1" + utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) const ( @@ -127,9 +127,7 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { obj.ClusterName = DefaultClusterName } - if features.Enabled(obj.FeatureGates, features.DynamicKubeletConfig) { - SetDefaults_KubeletConfiguration(obj) - } + SetDefaults_KubeletConfiguration(obj) SetDefaults_ProxyConfiguration(obj) SetDefaults_AuditPolicyConfiguration(obj) } @@ -198,15 +196,31 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { } } if obj.KubeletConfiguration.BaseConfig.ClusterDomain == "" { - obj.KubeletConfiguration.BaseConfig.ClusterDomain = DefaultServiceDNSDomain - } - if obj.KubeletConfiguration.BaseConfig.Authorization.Mode == "" { - obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook - } - if obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile == "" { - obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile = DefaultCACertPath + obj.KubeletConfiguration.BaseConfig.ClusterDomain = obj.Networking.DNSDomain } + // Enforce security-related kubelet options + + // Require all clients to the kubelet API to have client certs signed by the cluster CA + obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile = DefaultCACertPath + obj.KubeletConfiguration.BaseConfig.Authentication.Anonymous.Enabled = utilpointer.BoolPtr(false) + + // On every client request to the kubelet API, execute a webhook (SubjectAccessReview request) to the API server + // and ask it whether the client is authorized to access the kubelet API + obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook + + // Let clients using other authentication methods like ServiceAccount tokens also access the kubelet API + // TODO: Enable in a future PR + // obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) + + // Disable the readonly port of the kubelet, in order to not expose unnecessary information + // TODO: Enable in a future PR + // obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 + + // Serve a /healthz webserver on localhost:10248 that kubeadm can talk to + obj.KubeletConfiguration.BaseConfig.HealthzBindAddress = "127.0.0.1" + obj.KubeletConfiguration.BaseConfig.HealthzPort = utilpointer.Int32Ptr(10248) + scheme, _, _ := kubeletscheme.NewSchemeAndCodecs() if scheme != nil { scheme.Default(obj.KubeletConfiguration.BaseConfig) diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 9b5ab21bc7e..7c03b5b4c49 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -259,22 +259,29 @@ func NewInit(cfgPath string, externalcfg *kubeadmapiv1alpha2.MasterConfiguration return nil, err } - // Try to start the kubelet service in case it's inactive - glog.V(1).Infof("Starting kubelet") - preflight.TryStartKubelet(ignorePreflightErrors) - - return &Init{cfg: cfg, skipTokenPrint: skipTokenPrint, dryRun: dryRun}, nil + return &Init{cfg: cfg, skipTokenPrint: skipTokenPrint, dryRun: dryRun, ignorePreflightErrors: ignorePreflightErrors}, nil } // Init defines struct used by "kubeadm init" command type Init struct { - cfg *kubeadmapi.MasterConfiguration - skipTokenPrint bool - dryRun bool + cfg *kubeadmapi.MasterConfiguration + skipTokenPrint bool + dryRun bool + ignorePreflightErrors sets.String } // Run executes master node provisioning, including certificates, needed static pod manifests, etc. func (i *Init) Run(out io.Writer) error { + + // Write env file with flags for the kubelet to use + if err := kubeletphase.WriteKubeletDynamicEnvFile(i.cfg); err != nil { + return err + } + + // Try to start the kubelet service in case it's inactive + glog.V(1).Infof("Starting kubelet") + preflight.TryStartKubelet(i.ignorePreflightErrors) + // Get directories to write files to; can be faked if we're dry-running glog.V(1).Infof("[init] Getting certificates directory from configuration") realCertsDir := i.cfg.CertificatesDir @@ -346,14 +353,14 @@ func (i *Init) Run(out io.Writer) error { return fmt.Errorf("error printing files on dryrun: %v", err) } - // NOTE: flag "--dynamic-config-dir" should be specified in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - if features.Enabled(i.cfg.FeatureGates, features.DynamicKubeletConfig) { - glog.V(1).Infof("[init] feature --dynamic-config-dir is enabled") - glog.V(1).Infof("[init] writing base kubelet configuration to disk on master") - // Write base kubelet configuration for dynamic kubelet configuration feature. - if err := kubeletphase.WriteInitKubeletConfigToDiskOnMaster(i.cfg); err != nil { - return fmt.Errorf("error writing base kubelet configuration to disk: %v", err) - } + kubeletVersion, err := preflight.GetKubeletVersion(utilsexec.New()) + if err != nil { + return err + } + + // Write the kubelet configuration to disk. + if err := kubeletphase.WriteConfigToDisk(i.cfg.KubeletConfiguration.BaseConfig, kubeletVersion); err != nil { + return fmt.Errorf("error writing kubelet configuration to disk: %v", err) } // Create a kubernetes client and wait for the API server to be healthy (if not dryrunning) @@ -381,15 +388,6 @@ func (i *Init) Run(out io.Writer) error { return fmt.Errorf("couldn't initialize a Kubernetes cluster") } - // NOTE: flag "--dynamic-config-dir" should be specified in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - if features.Enabled(i.cfg.FeatureGates, features.DynamicKubeletConfig) { - // Create base kubelet configuration for dynamic kubelet configuration feature. - glog.V(1).Infof("[init] creating base kubelet configuration") - if err := kubeletphase.CreateBaseKubeletConfiguration(i.cfg, client); err != nil { - return fmt.Errorf("error creating base kubelet configuration: %v", err) - } - } - // Upload currently used configuration to the cluster // Note: This is done right in the beginning of cluster initialization; as we might want to make other phases // depend on centralized information from this source in the future @@ -398,12 +396,26 @@ func (i *Init) Run(out io.Writer) error { return fmt.Errorf("error uploading configuration: %v", err) } + glog.V(1).Infof("[init] creating kubelet configuration configmap") + if err := kubeletphase.CreateConfigMap(i.cfg, client); err != nil { + return fmt.Errorf("error creating kubelet configuration ConfigMap: %v", err) + } + // PHASE 4: Mark the master with the right label/taint glog.V(1).Infof("[init] marking the master with right label") if err := markmasterphase.MarkMaster(client, i.cfg.NodeName, !i.cfg.NoTaintMaster); err != nil { return fmt.Errorf("error marking master: %v", err) } + // NOTE: flag "--dynamic-config-dir" should be specified in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + // This feature is disabled by default, as it is alpha still + if features.Enabled(i.cfg.FeatureGates, features.DynamicKubeletConfig) { + // Enable dynamic kubelet configuration for the node. + if err := kubeletphase.EnableDynamicConfigForNode(client, i.cfg.NodeName, kubeletVersion); err != nil { + return fmt.Errorf("error enabling dynamic kubelet configuration: %v", err) + } + } + // PHASE 5: Set up the node bootstrap tokens if !i.skipTokenPrint { glog.Infof("[bootstraptoken] using token: %s\n", i.cfg.Token) diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index ce6566cd6e8..77fcb6697ca 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -252,10 +252,28 @@ func (j *Join) Run(out io.Writer) error { return fmt.Errorf("couldn't save the CA certificate to disk: %v", err) } - // NOTE: flag "--dynamic-config-dir" should be specified in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf - glog.V(1).Infoln("[join] consuming base kubelet configuration") + kubeletVersion, err := preflight.GetKubeletVersion(utilsexec.New()) + if err != nil { + return err + } + + // Write the configuration for the kubelet down to disk so the kubelet can start + if err := kubeletphase.DownloadConfig(kubeconfigFile, kubeletVersion); err != nil { + return err + } + + // Now the kubelet will perform the TLS Bootstrap, transforming bootstrap-kubeconfig.conf to kubeconfig.conf in /etc/kubernetes + + // NOTE: the "--dynamic-config-dir" flag should be specified in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf for this to work + // This feature is disabled by default, as it is alpha still + glog.V(1).Infoln("[join] enabling dynamic kubelet configuration") if features.Enabled(j.cfg.FeatureGates, features.DynamicKubeletConfig) { - if err := kubeletphase.ConsumeBaseKubeletConfiguration(j.cfg.NodeName); err != nil { + client, err := kubeletphase.GetLocalNodeTLSBootstrappedClient() + if err != nil { + return err + } + + if err := kubeletphase.EnableDynamicConfigForNode(client, j.cfg.NodeName, kubeletVersion); err != nil { return fmt.Errorf("error consuming base kubelet configuration: %v", err) } } diff --git a/cmd/kubeadm/app/cmd/phases/kubelet.go b/cmd/kubeadm/app/cmd/phases/kubelet.go index 3e6f6811054..c07257d9b7f 100644 --- a/cmd/kubeadm/app/cmd/phases/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/kubelet.go @@ -18,184 +18,172 @@ package phases import ( "fmt" - "io/ioutil" + "os" "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/runtime" - kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" - "k8s.io/kubernetes/cmd/kubeadm/app/features" kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/normalizer" + "k8s.io/kubernetes/pkg/util/version" ) var ( - kubeletWriteInitConfigLongDesc = normalizer.LongDesc(` - Writes init kubelet configuration to disk for dynamic kubelet configuration feature. - Please note that the kubelet configuration can be passed to kubeadm as a value into the master configuration file. + kubeletWriteConfigToDiskLongDesc = normalizer.LongDesc(` + Writes kubelet configuration to disk, either based on the kubelet-config-1.X ConfigMap in the cluster, or from the + configuration passed to the command via "--config". ` + cmdutil.AlphaDisclaimer) - kubeletWriteInitConfigExample = normalizer.Examples(` - # Writes init kubelet configuration to disk. - kubeadm alpha phase kubelet init + kubeletWriteConfigToDiskExample = normalizer.Examples(` + # Writes kubelet configuration for a node to disk. The information is fetched from the cluster ConfigMap + kubeadm alpha phase kubelet write-config-to-disk --kubelet-version v1.11.0 --kubeconfig /etc/kubernetes/kubelet.conf + + # Writes kubelet configuration down to disk, based on the configuration flag passed to --config + kubeadm alpha phase kubelet write-config-to-disk --kubelet-version v1.11.0 --config kubeadm.yaml `) kubeletUploadDynamicConfigLongDesc = normalizer.LongDesc(` - Uploads dynamic kubelet configuration as ConfigMap and links it to the current node as ConfigMapRef. - Please note that the kubelet configuration can be passed to kubeadm as a value into the master configuration file. + Uploads kubelet configuration extracted from the kubeadm MasterConfiguration object to a ConfigMap + of the form kubelet-config-1.X in the cluster, where X is the minor version of the current Kubernetes version ` + cmdutil.AlphaDisclaimer) kubeletUploadDynamicConfigExample = normalizer.Examples(` - # Uploads dynamic kubelet configuration as ConfigMap. - kubeadm alpha phase kubelet upload + # Uploads the kubelet configuration from the kubeadm Config file to a ConfigMap in the cluster. + kubeadm alpha phase kubelet upload-config --config kubeadm.yaml `) kubeletEnableDynamicConfigLongDesc = normalizer.LongDesc(` - Enables or updates dynamic kubelet configuration on node. This should be run on nodes. - Please note that the kubelet configuration can be passed to kubeadm as a value into the master configuration file. + Enables or updates dynamic kubelet configuration for a Node, against the kubelet-config-1.X ConfigMap in the cluster, + where X is the minor version of the desired kubelet version. + + WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it + may have surprising side-effects at this stage. + ` + cmdutil.AlphaDisclaimer) kubeletEnableDynamicConfigExample = normalizer.Examples(` - # Enables dynamic kubelet configuration on node. - kubeadm alpha phase kubelet enable + # Enables dynamic kubelet configuration for a Node. + kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version v1.11.0 + + WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it + may have surprising side-effects at this stage. `) ) // NewCmdKubelet returns main command for Kubelet phase func NewCmdKubelet() *cobra.Command { + var kubeConfigFile string cmd := &cobra.Command{ Use: "kubelet", - Short: "Adopts dynamic kubelet configuration.", + Short: "Handles kubelet configuration.", Long: cmdutil.MacroCommandLongDescription, } - cmd.AddCommand(NewCmdKubeletWriteInitConfig()) - cmd.AddCommand(NewCmdKubeletUploadDynamicConfig()) - cmd.AddCommand(NewCmdKubeletEnableDynamicConfig()) + cmd.PersistentFlags().StringVar(&kubeConfigFile, "kubeconfig", "/etc/kubernetes/admin.conf", "The KubeConfig file to use when talking to the cluster") + cmd.AddCommand(NewCmdKubeletWriteConfigToDisk(&kubeConfigFile)) + cmd.AddCommand(NewCmdKubeletUploadConfig(&kubeConfigFile)) + cmd.AddCommand(NewCmdKubeletEnableDynamicConfig(&kubeConfigFile)) return cmd } -// NewCmdKubeletWriteInitConfig calls cobra.Command for writing init kubelet configuration -func NewCmdKubeletWriteInitConfig() *cobra.Command { +// NewCmdKubeletUploadConfig calls cobra.Command for uploading dynamic kubelet configuration +func NewCmdKubeletUploadConfig(kubeConfigFile *string) *cobra.Command { var cfgPath string - cmd := &cobra.Command{ - Use: "init", - Short: "Writes init kubelet configuration to disk", - Long: kubeletWriteInitConfigLongDesc, - Example: kubeletWriteInitConfigExample, - Run: func(cmd *cobra.Command, args []string) { - cfg := &kubeadmapiv1alpha2.MasterConfiguration{ - // KubernetesVersion is not used by kubelet init, but we set this explicitly to avoid - // the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig - KubernetesVersion: "v1.9.0", - } - kubeadmscheme.Scheme.Default(cfg) - - // This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags - internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, cfg) - kubeadmutil.CheckErr(err) - if features.Enabled(internalcfg.FeatureGates, features.DynamicKubeletConfig) { - err = kubeletphase.WriteInitKubeletConfigToDiskOnMaster(internalcfg) - kubeadmutil.CheckErr(err) - } else { - fmt.Println("[kubelet] feature gate DynamicKubeletConfig is not enabled, do nothing.") - } - }, - } - - cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") - - return cmd -} - -// NewCmdKubeletUploadDynamicConfig calls cobra.Command for uploading dynamic kubelet configuration -func NewCmdKubeletUploadDynamicConfig() *cobra.Command { - var cfgPath, kubeConfigFile string cmd := &cobra.Command{ - Use: "upload", - Short: "Uploads dynamic kubelet configuration as ConfigMap", + Use: "upload-config", + Short: "Uploads kubelet configuration to a ConfigMap", Long: kubeletUploadDynamicConfigLongDesc, Example: kubeletUploadDynamicConfigExample, Run: func(cmd *cobra.Command, args []string) { - cfg := &kubeadmapiv1alpha2.MasterConfiguration{ - // KubernetesVersion is not used by kubelet upload, but we set this explicitly to avoid - // the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig - KubernetesVersion: "v1.9.0", + if len(cfgPath) == 0 { + kubeadmutil.CheckErr(fmt.Errorf("The --config argument is required")) } - kubeadmscheme.Scheme.Default(cfg) - // This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags - internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, cfg) + // This call returns the ready-to-use configuration based on the configuration file + internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, &kubeadmapiv1alpha2.MasterConfiguration{}) + kubeadmutil.CheckErr(err) + + client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) + kubeadmutil.CheckErr(err) + + err = kubeletphase.CreateConfigMap(internalcfg, client) kubeadmutil.CheckErr(err) - if features.Enabled(internalcfg.FeatureGates, features.DynamicKubeletConfig) { - client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) - kubeadmutil.CheckErr(err) - err = kubeletphase.CreateBaseKubeletConfiguration(internalcfg, client) - kubeadmutil.CheckErr(err) - } else { - fmt.Println("[kubelet] feature gate DynamicKubeletConfig is not enabled, do nothing.") - } }, } cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") - cmd.Flags().StringVar(&kubeConfigFile, "kubeconfig", "/etc/kubernetes/admin.conf", "The KubeConfig file to use when talking to the cluster") + return cmd +} +// NewCmdKubeletWriteConfigToDisk calls cobra.Command for writing init kubelet configuration +func NewCmdKubeletWriteConfigToDisk(kubeConfigFile *string) *cobra.Command { + var cfgPath, kubeletVersionStr string + cmd := &cobra.Command{ + Use: "write-config-to-disk", + Short: "Writes kubelet configuration to disk, either based on the --config argument or the kubeadm-config ConfigMap.", + Long: kubeletWriteConfigToDiskLongDesc, + Example: kubeletWriteConfigToDiskExample, + Run: func(cmd *cobra.Command, args []string) { + if len(kubeletVersionStr) == 0 { + kubeadmutil.CheckErr(fmt.Errorf("The --kubelet-version argument is required")) + } + + kubeletVersion, err := version.ParseSemantic(kubeletVersionStr) + kubeadmutil.CheckErr(err) + + client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) + kubeadmutil.CheckErr(err) + + // This call returns the ready-to-use configuration based on the configuration file + internalcfg, err := configutil.FetchConfigFromFileOrCluster(client, os.Stdout, "kubelet", cfgPath) + kubeadmutil.CheckErr(err) + + err = kubeletphase.WriteConfigToDisk(internalcfg.KubeletConfiguration.BaseConfig, kubeletVersion) + kubeadmutil.CheckErr(err) + }, + } + + cmd.Flags().StringVar(&kubeletVersionStr, "kubelet-version", kubeletVersionStr, "The desired version for the kubelet") + cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") return cmd } // NewCmdKubeletEnableDynamicConfig calls cobra.Command for enabling dynamic kubelet configuration on node -func NewCmdKubeletEnableDynamicConfig() *cobra.Command { - cfg := &kubeadmapiv1alpha2.NodeConfiguration{} - kubeadmscheme.Scheme.Default(cfg) +// This feature is still in alpha and an experimental state +func NewCmdKubeletEnableDynamicConfig(kubeConfigFile *string) *cobra.Command { + var nodeName, kubeletVersionStr string - var cfgPath string cmd := &cobra.Command{ - Use: "enable", - Aliases: []string{"update"}, - Short: "Enables or updates dynamic kubelet configuration on node", + Use: "enable-dynamic-config", + Short: "EXPERIMENTAL: Enables or updates dynamic kubelet configuration for a Node", Long: kubeletEnableDynamicConfigLongDesc, Example: kubeletEnableDynamicConfigExample, Run: func(cmd *cobra.Command, args []string) { - nodeName, err := getNodeName(cfgPath, cfg) - kubeadmutil.CheckErr(err) - if features.Enabled(cfg.FeatureGates, features.DynamicKubeletConfig) { - err = kubeletphase.ConsumeBaseKubeletConfiguration(nodeName) - kubeadmutil.CheckErr(err) - } else { - fmt.Println("[kubelet] feature gate DynamicKubeletConfig is not enabled, do nothing.") + if len(nodeName) == 0 { + kubeadmutil.CheckErr(fmt.Errorf("The --node-name argument is required")) } + if len(kubeletVersionStr) == 0 { + kubeadmutil.CheckErr(fmt.Errorf("The --kubelet-version argument is required")) + } + + kubeletVersion, err := version.ParseSemantic(kubeletVersionStr) + kubeadmutil.CheckErr(err) + + client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) + kubeadmutil.CheckErr(err) + + err = kubeletphase.EnableDynamicConfigForNode(client, nodeName, kubeletVersion) + kubeadmutil.CheckErr(err) }, } - cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") - cmd.Flags().StringVar(&cfg.NodeName, "node-name", cfg.NodeName, "Name of the node that should enable the dynamic kubelet configuration") - + cmd.Flags().StringVar(&nodeName, "node-name", nodeName, "Name of the node that should enable the dynamic kubelet configuration") + cmd.Flags().StringVar(&kubeletVersionStr, "kubelet-version", kubeletVersionStr, "The desired version for the kubelet") return cmd } - -func getNodeName(cfgPath string, cfg *kubeadmapiv1alpha2.NodeConfiguration) (string, error) { - if cfgPath != "" { - b, err := ioutil.ReadFile(cfgPath) - if err != nil { - return "", fmt.Errorf("unable to read config from %q [%v]", cfgPath, err) - } - if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), b, cfg); err != nil { - return "", fmt.Errorf("unable to decode config from %q [%v]", cfgPath, err) - } - } - - if cfg.NodeName == "" { - cfg.NodeName = nodeutil.GetHostname("") - } - - return cfg.NodeName, nil -} diff --git a/cmd/kubeadm/app/cmd/phases/kubelet_test.go b/cmd/kubeadm/app/cmd/phases/kubelet_test.go index 25ba6f8a445..89159d26105 100644 --- a/cmd/kubeadm/app/cmd/phases/kubelet_test.go +++ b/cmd/kubeadm/app/cmd/phases/kubelet_test.go @@ -25,33 +25,37 @@ import ( ) func TestKubeletSubCommandsHasFlags(t *testing.T) { + kubeConfigFile := "foo" subCmds := []*cobra.Command{ - NewCmdKubeletWriteInitConfig(), - NewCmdKubeletUploadDynamicConfig(), - NewCmdKubeletEnableDynamicConfig(), + NewCmdKubeletUploadConfig(&kubeConfigFile), + NewCmdKubeletWriteConfigToDisk(&kubeConfigFile), + NewCmdKubeletEnableDynamicConfig(&kubeConfigFile), } - commonFlags := []string{ - "config", - } + commonFlags := []string{} var tests = []struct { command string additionalFlags []string }{ { - command: "init", - }, - { - command: "upload", + command: "upload-config", additionalFlags: []string{ - "kubeconfig", + "config", }, }, { - command: "enable", + command: "write-config-to-disk", + additionalFlags: []string{ + "kubelet-version", + "config", + }, + }, + { + command: "enable-dynamic-config", additionalFlags: []string{ "node-name", + "kubelet-version", }, }, } diff --git a/cmd/kubeadm/app/cmd/upgrade/common.go b/cmd/kubeadm/app/cmd/upgrade/common.go index e6cadbb8fb3..1f5f2336ca8 100644 --- a/cmd/kubeadm/app/cmd/upgrade/common.go +++ b/cmd/kubeadm/app/cmd/upgrade/common.go @@ -24,17 +24,21 @@ import ( "os" "strings" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" fakediscovery "k8s.io/client-go/discovery/fake" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" + configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" ) @@ -61,8 +65,20 @@ func enforceRequirements(flags *cmdUpgradeFlags, dryRun bool, newK8sVersion stri } // Fetch the configuration from a file or ConfigMap and validate it - cfg, err := upgrade.FetchConfiguration(client, os.Stdout, flags.cfgPath) + fmt.Println("[upgrade/config] Making sure the configuration is correct:") + cfg, err := configutil.FetchConfigFromFileOrCluster(client, os.Stdout, "upgrade/config", flags.cfgPath) if err != nil { + if apierrors.IsNotFound(err) { + fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem) + fmt.Println("[upgrade/config] Without this information, 'kubeadm upgrade' won't know how to configure your upgraded cluster.") + fmt.Println("") + fmt.Println("[upgrade/config] Next steps:") + fmt.Printf("\t- OPTION 1: Run 'kubeadm config upload from-flags' and specify the same CLI arguments you passed to 'kubeadm init' when you created your master.\n") + fmt.Printf("\t- OPTION 2: Run 'kubeadm config upload from-file' and specify the same config file you passed to 'kubeadm init' when you created your master.\n") + fmt.Printf("\t- OPTION 3: Pass a config file to 'kubeadm upgrade' using the --config flag.\n") + fmt.Println("") + err = fmt.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem) + } return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err) } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index ea53fade786..afd5ce5ba5c 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -161,9 +161,6 @@ const ( // system:nodes group subject is removed if present. NodesClusterRoleBinding = "system:node" - // KubeletBaseConfigMapRoleName defines the base kubelet configuration ConfigMap. - KubeletBaseConfigMapRoleName = "kubeadm:kubelet-base-configmap" - // APICallRetryInterval defines how long kubeadm should wait before retrying a failed API operation APICallRetryInterval = 500 * time.Millisecond // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the master when doing discovery @@ -191,17 +188,17 @@ const ( // MasterConfigurationConfigMapKey specifies in what ConfigMap key the master configuration should be stored MasterConfigurationConfigMapKey = "MasterConfiguration" - // KubeletBaseConfigurationConfigMap specifies in what ConfigMap in the kube-system namespace the initial remote configuration of kubelet should be stored - KubeletBaseConfigurationConfigMap = "kubelet-base-config-1.9" + // KubeletBaseConfigurationConfigMapPrefix specifies in what ConfigMap in the kube-system namespace the initial remote configuration of kubelet should be stored + KubeletBaseConfigurationConfigMapPrefix = "kubelet-config-" // KubeletBaseConfigurationConfigMapKey specifies in what ConfigMap key the initial remote configuration of kubelet should be stored KubeletBaseConfigurationConfigMapKey = "kubelet" - // KubeletBaseConfigurationDir specifies the directory on the node where stores the initial remote configuration of kubelet - KubeletBaseConfigurationDir = "/var/lib/kubelet/config/init" + // KubeletBaseConfigMapRolePrefix defines the base kubelet configuration ConfigMap. + KubeletBaseConfigMapRolePrefix = "kubeadm:kubelet-config-" - // KubeletBaseConfigurationFile specifies the file name on the node which stores initial remote configuration of kubelet - KubeletBaseConfigurationFile = "kubelet" + // KubeletConfigurationFile specifies the file name on the node which stores initial remote configuration of kubelet + KubeletConfigurationFile = "/var/lib/kubelet/config.yaml" // MinExternalEtcdVersion indicates minimum external etcd version which kubeadm supports MinExternalEtcdVersion = "3.2.17" @@ -260,6 +257,14 @@ const ( // Copied from pkg/master/reconcilers to avoid pulling extra dependencies // TODO: Import this constant from a consts only package, that does not pull any further dependencies. LeaseEndpointReconcilerType = "lease" + + // KubeletEnvFile is a file "kubeadm init" writes at runtime. Using that interface, kubeadm can customize certain + // kubelet flags conditionally based on the environment at runtime. Also, parameters given to the configuration file + // might be passed through this file. "kubeadm init" writes one variable, with the name ${KubeletEnvFileVariableName}. + KubeletEnvFile = "/var/lib/kubelet/kubeadm-flags.env" + + // KubeletEnvFileVariableName specifies the shell script variable name "kubeadm init" should write a value to in KubeletEnvFile + KubeletEnvFileVariableName = "KUBELET_KUBEADM_ARGS" ) var ( @@ -290,6 +295,9 @@ var ( // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports MinimumKubeletVersion = version.MustParseSemantic("v1.10.0") + // MinimumKubeletConfigVersion specifies the minimum version of Kubernetes where kubeadm supports specifying --config to the kubelet + MinimumKubeletConfigVersion = version.MustParseSemantic("v1.11.0-alpha.1") + // SupportedEtcdVersion lists officially supported etcd versions with corresponding kubernetes releases SupportedEtcdVersion = map[uint8]string{ 10: "3.1.12", diff --git a/cmd/kubeadm/app/phases/kubelet/config.go b/cmd/kubeadm/app/phases/kubelet/config.go new file mode 100644 index 00000000000..5503a16a7fc --- /dev/null +++ b/cmd/kubeadm/app/phases/kubelet/config.go @@ -0,0 +1,187 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "io/ioutil" + + "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" + kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" + rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1" + kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" + kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" + "k8s.io/kubernetes/pkg/util/version" +) + +// WriteConfigToDisk writes the kubelet config object down to a file +// Used at "kubeadm init" and "kubeadm upgrade" time +func WriteConfigToDisk(kubeletConfig *kubeletconfigv1beta1.KubeletConfiguration, kubeletVersion *version.Version) error { + + // If the kubelet version is v1.10.x, exit + if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { + return nil + } + + kubeletBytes, err := getConfigBytes(kubeletConfig) + if err != nil { + return err + } + return writeConfigBytesToDisk(kubeletBytes) +} + +// CreateConfigMap creates a ConfigMap with the generic kubelet configuration. +// Used at "kubeadm init" and "kubeadm upgrade" time +func CreateConfigMap(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error { + + k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion) + if err != nil { + return err + } + + // If Kubernetes version is v1.10.x, exit + if k8sVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { + return nil + } + + configMapName := configMapName(k8sVersion) + fmt.Printf("[kubelet] Creating a ConfigMap %q in namespace %s with the configuration for the kubelets in the cluster\n", configMapName, metav1.NamespaceSystem) + + kubeletBytes, err := getConfigBytes(cfg.KubeletConfiguration.BaseConfig) + if err != nil { + return err + } + + if err := apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes), + }, + }); err != nil { + return err + } + + if err := createConfigMapRBACRules(client, k8sVersion); err != nil { + return fmt.Errorf("error creating kubelet configuration configmap RBAC rules: %v", err) + } + return nil +} + +// createConfigMapRBACRules creates the RBAC rules for exposing the base kubelet ConfigMap in the kube-system namespace to unauthenticated users +func createConfigMapRBACRules(client clientset.Interface, k8sVersion *version.Version) error { + if err := apiclient.CreateOrUpdateRole(client, &rbac.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapRBACName(k8sVersion), + Namespace: metav1.NamespaceSystem, + }, + Rules: []rbac.PolicyRule{ + rbachelper.NewRule("get").Groups("").Resources("configmaps").Names(configMapName(k8sVersion)).RuleOrDie(), + }, + }); err != nil { + return err + } + + return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapRBACName(k8sVersion), + Namespace: metav1.NamespaceSystem, + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbac.GroupName, + Kind: "Role", + Name: configMapRBACName(k8sVersion), + }, + Subjects: []rbac.Subject{ + { + Kind: rbac.GroupKind, + Name: kubeadmconstants.NodesGroup, + }, + { + Kind: rbac.GroupKind, + Name: kubeadmconstants.NodeBootstrapTokenAuthGroup, + }, + }, + }) +} + +// DownloadConfig downloads the kubelet configuration from a ConfigMap and writes it to disk. +// Used at "kubeadm join" time +func DownloadConfig(kubeletKubeConfig string, kubeletVersion *version.Version) error { + + // If the kubelet version is v1.10.x, exit + if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { + return nil + } + + // Download the ConfigMap from the cluster based on what version the kubelet is + configMapName := configMapName(kubeletVersion) + + fmt.Printf("[kubelet] Downloading configuration for the kubelet from the %q ConfigMap in the %s namespace\n", + configMapName, metav1.NamespaceSystem) + + client, err := kubeconfigutil.ClientSetFromFile(kubeletKubeConfig) + if err != nil { + return fmt.Errorf("couldn't create client from kubeconfig file %q", kubeletKubeConfig) + } + + kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) + if err != nil { + return err + } + + return writeConfigBytesToDisk([]byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey])) +} + +// configMapName returns the right ConfigMap name for the right branch of k8s +func configMapName(k8sVersion *version.Version) string { + return fmt.Sprintf("%s%d.%d", kubeadmconstants.KubeletBaseConfigurationConfigMapPrefix, k8sVersion.Major(), k8sVersion.Minor()) +} + +// configMapRBACName returns the name for the Role/RoleBinding for the kubelet config configmap for the right branch of k8s +func configMapRBACName(k8sVersion *version.Version) string { + return fmt.Sprintf("%s%d.%d", kubeadmconstants.KubeletBaseConfigMapRolePrefix, k8sVersion.Major(), k8sVersion.Minor()) +} + +// getConfigBytes marshals a kubeletconfiguration object to bytes +func getConfigBytes(kubeletConfig *kubeletconfigv1beta1.KubeletConfiguration) ([]byte, error) { + _, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs() + if err != nil { + return []byte{}, err + } + + return kubeadmutil.MarshalToYamlForCodecs(kubeletConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs) +} + +// writeConfigBytesToDisk writes a byte slice down to disk at the specific location of the kubelet config file +func writeConfigBytesToDisk(b []byte) error { + fmt.Printf("[kubelet] Writing kubelet configuration to file %q\n", kubeadmconstants.KubeletConfigurationFile) + + if err := ioutil.WriteFile(kubeadmconstants.KubeletConfigurationFile, b, 0644); err != nil { + return fmt.Errorf("failed to write kubelet configuration to the file %q: %v", kubeadmconstants.KubeletConfigurationFile, err) + } + return nil +} diff --git a/cmd/kubeadm/app/phases/kubelet/config_test.go b/cmd/kubeadm/app/phases/kubelet/config_test.go new file mode 100644 index 00000000000..0e6d5b68c39 --- /dev/null +++ b/cmd/kubeadm/app/phases/kubelet/config_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" + "k8s.io/kubernetes/pkg/util/version" +) + +func TestCreateConfigMap(t *testing.T) { + nodeName := "fake-node" + client := fake.NewSimpleClientset() + cfg := &kubeadmapi.MasterConfiguration{ + NodeName: nodeName, + KubernetesVersion: "v1.11.0", + KubeletConfiguration: kubeadmapi.KubeletConfiguration{ + BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{}, + }, + } + + client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + Spec: v1.NodeSpec{}, + }, nil + }) + client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + + if err := CreateConfigMap(cfg, client); err != nil { + t.Errorf("CreateConfigMap: unexpected error %v", err) + } +} + +func TestCreateConfigMapRBACRules(t *testing.T) { + client := fake.NewSimpleClientset() + client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + + if err := createConfigMapRBACRules(client, version.MustParseSemantic("v1.11.0")); err != nil { + t.Errorf("createConfigMapRBACRules: unexpected error %v", err) + } +} diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic.go b/cmd/kubeadm/app/phases/kubelet/dynamic.go new file mode 100644 index 00000000000..b65d7460370 --- /dev/null +++ b/cmd/kubeadm/app/phases/kubelet/dynamic.go @@ -0,0 +1,117 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" + "k8s.io/kubernetes/pkg/util/version" +) + +// EnableDynamicConfigForNode updates the Node's ConfigSource to enable Dynamic Kubelet Configuration, depending on what version the kubelet is +// Used at "kubeadm init", "kubeadm join" and "kubeadm upgrade" time +// This func is ONLY run if the user enables the `DynamicKubeletConfig` feature gate, which is by default off +func EnableDynamicConfigForNode(client clientset.Interface, nodeName string, kubeletVersion *version.Version) error { + + // If the kubelet version is v1.10.x, exit + if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { + return nil + } + + configMapName := configMapName(kubeletVersion) + fmt.Printf("[kubelet] Enabling Dynamic Kubelet Config for Node %q; config sourced from ConfigMap %q in namespace %s\n", + nodeName, configMapName, metav1.NamespaceSystem) + fmt.Println("[kubelet] WARNING: The Dynamic Kubelet Config feature is alpha and off by default. It hasn't been well-tested yet at this stage, use with caution.") + + // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. + return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.UpdateNodeTimeout, func() (bool, error) { + node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + oldData, err := json.Marshal(node) + if err != nil { + return false, err + } + + kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + node.Spec.ConfigSource = &v1.NodeConfigSource{ + ConfigMap: &v1.ConfigMapNodeConfigSource{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + UID: kubeletCfg.UID, + KubeletConfigKey: kubeadmconstants.KubeletBaseConfigurationConfigMapKey, + }, + } + + newData, err := json.Marshal(node) + if err != nil { + return false, err + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return false, err + } + + if _, err := client.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if apierrs.IsConflict(err) { + fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)") + return false, nil + } + return false, err + } + + return true, nil + }) +} + +// GetLocalNodeTLSBootstrappedClient waits for the kubelet to perform the TLS bootstrap +// and then creates a client from config file /etc/kubernetes/kubelet.conf +func GetLocalNodeTLSBootstrappedClient() (clientset.Interface, error) { + fmt.Println("[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...") + + kubeletKubeConfig := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName) + + // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. + err := wait.PollImmediateInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { + _, err := os.Stat(kubeletKubeConfig) + return (err == nil), nil + }) + if err != nil { + return nil, err + } + + return kubeconfigutil.ClientSetFromFile(kubeletKubeConfig) +} diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go new file mode 100644 index 00000000000..fe0ba35f41c --- /dev/null +++ b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" + "k8s.io/kubernetes/pkg/util/version" +) + +func TestEnableDynamicConfigForNode(t *testing.T) { + nodeName := "fake-node" + client := fake.NewSimpleClientset() + client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + Spec: v1.NodeSpec{ + ConfigSource: &v1.NodeConfigSource{ + ConfigMap: &v1.ConfigMapNodeConfigSource{ + UID: "", + }, + }, + }, + }, nil + }) + client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubelet-config-1.11", + Namespace: metav1.NamespaceSystem, + UID: "fake-uid", + }, + }, nil + }) + client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, nil + }) + + if err := EnableDynamicConfigForNode(client, nodeName, version.MustParseSemantic("v1.11.0")); err != nil { + t.Errorf("UpdateNodeWithConfigMap: unexpected error %v", err) + } +} diff --git a/cmd/kubeadm/app/phases/kubelet/flags.go b/cmd/kubeadm/app/phases/kubelet/flags.go new file mode 100644 index 00000000000..167966793aa --- /dev/null +++ b/cmd/kubeadm/app/phases/kubelet/flags.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubelet + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" +) + +// WriteKubeletDynamicEnvFile writes a environment file with dynamic flags to the kubelet. +// Used at "kubeadm init" and "kubeadm join" time. +func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.MasterConfiguration) error { + + // TODO: Pass through extra arguments from the config file here in the future + argList := kubeadmutil.BuildArgumentListFromMap(buildKubeletArgMap(cfg), map[string]string{}) + envFileContent := fmt.Sprintf("%s=%s\n", constants.KubeletEnvFileVariableName, strings.Join(argList, " ")) + + return writeKubeletFlagBytesToDisk([]byte(envFileContent)) +} + +// buildKubeletArgMap takes a MasterConfiguration object and builds based on that a string-string map with flags +// that should be given to the local kubelet daemon. +func buildKubeletArgMap(cfg *kubeadmapi.MasterConfiguration) map[string]string { + kubeletFlags := map[string]string{} + + if cfg.CRISocket == kubeadmapiv1alpha2.DefaultCRISocket { + // These flags should only be set when running docker + kubeletFlags["network-plugin"] = "cni" + kubeletFlags["cni-conf-dir"] = "/etc/cni/net.d" + kubeletFlags["cni-bin-dir"] = "/opt/cni/bin" + } else { + kubeletFlags["container-runtime"] = "remote" + kubeletFlags["container-runtime-endpoint"] = cfg.CRISocket + } + // TODO: Add support for registering custom Taints and Labels + // TODO: Add support for overriding flags with ExtraArgs + // TODO: Pass through --hostname-override if a custom name is used? + // TODO: Check if `systemd-resolved` is running, and set `--resolv-conf` based on that + // TODO: Conditionally set `--cgroup-driver` to either `systemd` or `cgroupfs` + + return kubeletFlags +} + +// writeKubeletFlagBytesToDisk writes a byte slice down to disk at the specific location of the kubelet flag overrides file +func writeKubeletFlagBytesToDisk(b []byte) error { + fmt.Printf("[kubelet] Writing kubelet environment file with flags to file %q\n", constants.KubeletEnvFile) + + // creates target folder if not already exists + if err := os.MkdirAll(filepath.Dir(constants.KubeletEnvFile), 0700); err != nil { + return fmt.Errorf("failed to create directory %q: %v", filepath.Dir(constants.KubeletEnvFile), err) + } + if err := ioutil.WriteFile(constants.KubeletEnvFile, b, 0644); err != nil { + return fmt.Errorf("failed to write kubelet configuration to the file %q: %v", constants.KubeletEnvFile, err) + } + return nil +} diff --git a/cmd/kubeadm/app/phases/kubelet/kubelet.go b/cmd/kubeadm/app/phases/kubelet/kubelet.go deleted file mode 100644 index 4f0a51888a7..00000000000 --- a/cmd/kubeadm/app/phases/kubelet/kubelet.go +++ /dev/null @@ -1,235 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelet - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "k8s.io/api/core/v1" - rbac "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" - "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1" - kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme" - kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" -) - -// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature. -func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error { - fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster\n", - kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem) - - _, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs() - if err != nil { - return err - } - kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs) - if err != nil { - return err - } - - if err = apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmconstants.KubeletBaseConfigurationConfigMap, - Namespace: metav1.NamespaceSystem, - }, - Data: map[string]string{ - kubeadmconstants.KubeletBaseConfigurationConfigMapKey: string(kubeletBytes), - }, - }); err != nil { - return err - } - - if err := createKubeletBaseConfigMapRBACRules(client); err != nil { - return fmt.Errorf("error creating base kubelet configmap RBAC rules: %v", err) - } - - return updateNodeWithConfigMap(client, cfg.NodeName) -} - -// ConsumeBaseKubeletConfiguration consumes base kubelet configuration for dynamic kubelet configuration feature. -func ConsumeBaseKubeletConfiguration(nodeName string) error { - client, err := getLocalNodeTLSBootstrappedClient() - if err != nil { - return err - } - - kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.GetOptions{}) - if err != nil { - return err - } - - if err := writeInitKubeletConfigToDisk([]byte(kubeletCfg.Data[kubeadmconstants.KubeletBaseConfigurationConfigMapKey])); err != nil { - return fmt.Errorf("failed to write initial remote configuration of kubelet to disk for node %s: %v", nodeName, err) - } - - return updateNodeWithConfigMap(client, nodeName) -} - -// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap -func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error { - fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s\n", - nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem) - - // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. - return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.UpdateNodeTimeout, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - oldData, err := json.Marshal(node) - if err != nil { - return false, err - } - - kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - node.Spec.ConfigSource = &v1.NodeConfigSource{ - ConfigMap: &v1.ConfigMapNodeConfigSource{ - Name: kubeadmconstants.KubeletBaseConfigurationConfigMap, - Namespace: metav1.NamespaceSystem, - UID: kubeletCfg.UID, - KubeletConfigKey: kubeadmconstants.KubeletBaseConfigurationConfigMapKey, - }, - } - - newData, err := json.Marshal(node) - if err != nil { - return false, err - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) - if err != nil { - return false, err - } - - if _, err := client.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { - if apierrs.IsConflict(err) { - fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)") - return false, nil - } - return false, err - } - - return true, nil - }) -} - -// createKubeletBaseConfigMapRBACRules creates the RBAC rules for exposing the base kubelet ConfigMap in the kube-system namespace to unauthenticated users -func createKubeletBaseConfigMapRBACRules(client clientset.Interface) error { - if err := apiclient.CreateOrUpdateRole(client, &rbac.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmconstants.KubeletBaseConfigMapRoleName, - Namespace: metav1.NamespaceSystem, - }, - Rules: []rbac.PolicyRule{ - rbachelper.NewRule("get").Groups("").Resources("configmaps").Names(kubeadmconstants.KubeletBaseConfigurationConfigMap).RuleOrDie(), - }, - }); err != nil { - return err - } - - return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmconstants.KubeletBaseConfigMapRoleName, - Namespace: metav1.NamespaceSystem, - }, - RoleRef: rbac.RoleRef{ - APIGroup: rbac.GroupName, - Kind: "Role", - Name: kubeadmconstants.KubeletBaseConfigMapRoleName, - }, - Subjects: []rbac.Subject{ - { - Kind: rbac.GroupKind, - Name: kubeadmconstants.NodesGroup, - }, - { - Kind: rbac.GroupKind, - Name: kubeadmconstants.NodeBootstrapTokenAuthGroup, - }, - }, - }) -} - -// getLocalNodeTLSBootstrappedClient waits for the kubelet to perform the TLS bootstrap -// and then creates a client from config file /etc/kubernetes/kubelet.conf -func getLocalNodeTLSBootstrappedClient() (clientset.Interface, error) { - fmt.Println("[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...") - - kubeletKubeConfig := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName) - - // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. - err := wait.PollImmediateInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { - _, err := os.Stat(kubeletKubeConfig) - return (err == nil), nil - }) - if err != nil { - return nil, err - } - - return kubeconfigutil.ClientSetFromFile(kubeletKubeConfig) -} - -// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master. -func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error { - fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s\n", cfg.NodeName) - - _, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs() - if err != nil { - return err - } - - kubeletBytes, err := kubeadmutil.MarshalToYamlForCodecs(cfg.KubeletConfiguration.BaseConfig, kubeletconfigv1beta1.SchemeGroupVersion, *kubeletCodecs) - if err != nil { - return err - } - - if err := writeInitKubeletConfigToDisk(kubeletBytes); err != nil { - return fmt.Errorf("failed to write base configuration of kubelet to disk on master node %s: %v", cfg.NodeName, err) - } - - return nil -} - -func writeInitKubeletConfigToDisk(kubeletConfig []byte) error { - if err := os.MkdirAll(kubeadmconstants.KubeletBaseConfigurationDir, 0644); err != nil { - return fmt.Errorf("failed to create directory %q: %v", kubeadmconstants.KubeletBaseConfigurationDir, err) - } - baseConfigFile := filepath.Join(kubeadmconstants.KubeletBaseConfigurationDir, kubeadmconstants.KubeletBaseConfigurationFile) - if err := ioutil.WriteFile(baseConfigFile, kubeletConfig, 0644); err != nil { - return fmt.Errorf("failed to write initial remote configuration of kubelet into file %q: %v", baseConfigFile, err) - } - return nil -} diff --git a/cmd/kubeadm/app/phases/kubelet/kubelet_test.go b/cmd/kubeadm/app/phases/kubelet/kubelet_test.go deleted file mode 100644 index 8d211b052a7..00000000000 --- a/cmd/kubeadm/app/phases/kubelet/kubelet_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubelet - -import ( - "testing" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/fake" - core "k8s.io/client-go/testing" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" -) - -func TestCreateBaseKubeletConfiguration(t *testing.T) { - nodeName := "fake-node" - client := fake.NewSimpleClientset() - cfg := &kubeadmapi.MasterConfiguration{ - NodeName: nodeName, - KubeletConfiguration: kubeadmapi.KubeletConfiguration{ - BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeletConfiguration", - }, - }, - }, - } - - client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Spec: v1.NodeSpec{ - ConfigSource: &v1.NodeConfigSource{ - ConfigMap: &v1.ConfigMapNodeConfigSource{ - UID: "", - }, - }, - }, - }, nil - }) - client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmconstants.KubeletBaseConfigurationConfigMap, - Namespace: metav1.NamespaceSystem, - UID: "fake-uid", - }, - }, nil - }) - client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - client.PrependReactor("create", "configmaps", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - - if err := CreateBaseKubeletConfiguration(cfg, client); err != nil { - t.Errorf("CreateBaseKubeletConfiguration: unexepected error %v", err) - } -} - -func TestUpdateNodeWithConfigMap(t *testing.T) { - nodeName := "fake-node" - client := fake.NewSimpleClientset() - client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, - }, - Spec: v1.NodeSpec{ - ConfigSource: &v1.NodeConfigSource{ - ConfigMap: &v1.ConfigMapNodeConfigSource{ - UID: "", - }, - }, - }, - }, nil - }) - client.PrependReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: kubeadmconstants.KubeletBaseConfigurationConfigMap, - Namespace: metav1.NamespaceSystem, - UID: "fake-uid", - }, - }, nil - }) - client.PrependReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - - if err := updateNodeWithConfigMap(client, nodeName); err != nil { - t.Errorf("UpdateNodeWithConfigMap: unexepected error %v", err) - } -} - -func TestCreateKubeletBaseConfigMapRBACRules(t *testing.T) { - client := fake.NewSimpleClientset() - client.PrependReactor("create", "roles", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - client.PrependReactor("create", "rolebindings", func(action core.Action) (bool, runtime.Object, error) { - return true, nil, nil - }) - - if err := createKubeletBaseConfigMapRBACRules(client); err != nil { - t.Errorf("createKubeletBaseConfigMapRBACRules: unexepected error %v", err) - } -} diff --git a/cmd/kubeadm/app/phases/upgrade/configuration.go b/cmd/kubeadm/app/phases/upgrade/configuration.go deleted file mode 100644 index de798b2c3d3..00000000000 --- a/cmd/kubeadm/app/phases/upgrade/configuration.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upgrade - -import ( - "fmt" - "io" - "io/ioutil" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" - configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" -) - -// FetchConfiguration fetches configuration required for upgrading your cluster from a file (which has precedence) or a ConfigMap in the cluster -func FetchConfiguration(client clientset.Interface, w io.Writer, cfgPath string) (*kubeadmapi.MasterConfiguration, error) { - fmt.Println("[upgrade/config] Making sure the configuration is correct:") - - // Load the configuration from a file or the cluster - configBytes, err := loadConfigurationBytes(client, w, cfgPath) - if err != nil { - return nil, err - } - - // Take the versioned configuration populated from the file or configmap, convert it to internal, default and validate - versionedcfg, err := configutil.BytesToInternalConfig(configBytes) - if err != nil { - return nil, fmt.Errorf("could not decode configuration: %v", err) - } - return versionedcfg, nil -} - -// loadConfigurationBytes loads the configuration byte slice from either a file or the cluster ConfigMap -func loadConfigurationBytes(client clientset.Interface, w io.Writer, cfgPath string) ([]byte, error) { - // The config file has the highest priority - if cfgPath != "" { - fmt.Printf("[upgrade/config] Reading configuration options from a file: %s\n", cfgPath) - return ioutil.ReadFile(cfgPath) - } - - fmt.Println("[upgrade/config] Reading configuration from the cluster...") - - configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.MasterConfigurationConfigMap, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem) - fmt.Println("[upgrade/config] Without this information, 'kubeadm upgrade' won't know how to configure your upgraded cluster.") - fmt.Println("") - fmt.Println("[upgrade/config] Next steps:") - fmt.Printf("\t- OPTION 1: Run 'kubeadm config upload from-flags' and specify the same CLI arguments you passed to 'kubeadm init' when you created your master.\n") - fmt.Printf("\t- OPTION 2: Run 'kubeadm config upload from-file' and specify the same config file you passed to 'kubeadm init' when you created your master.\n") - fmt.Printf("\t- OPTION 3: Pass a config file to 'kubeadm upgrade' using the --config flag.\n") - fmt.Println("") - return []byte{}, fmt.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem) - } else if err != nil { - return []byte{}, fmt.Errorf("an unexpected error happened when trying to get the ConfigMap %q in the %s namespace: %v", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem, err) - } - - fmt.Printf("[upgrade/config] FYI: You can look at this config file with 'kubectl -n %s get cm %s -oyaml'\n", metav1.NamespaceSystem, constants.MasterConfigurationConfigMap) - return []byte(configMap.Data[constants.MasterConfigurationConfigMapKey]), nil -} diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 4672f227957..47a20ef70cb 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo" nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting" "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" @@ -102,6 +103,11 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC } } + // Create the new, version-branched kubelet ComponentConfig ConfigMap + if err := kubeletphase.CreateConfigMap(cfg, client); err != nil { + errs = append(errs, fmt.Errorf("error creating kubelet configuration ConfigMap: %v", err)) + } + // Upgrade kube-dns/CoreDNS and kube-proxy if err := dns.EnsureDNSAddon(cfg, client); err != nil { errs = append(errs, err) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index a7f9df241dd..52dc8f93346 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -1043,10 +1043,11 @@ func TryStartKubelet(ignorePreflightErrors sets.String) { initSystem, err := initsystem.GetInitSystem() if err != nil { glog.Infoln("[preflight] no supported init system detected, won't ensure kubelet is running.") - } else if initSystem.ServiceExists("kubelet") && !initSystem.ServiceIsActive("kubelet") { + } else if initSystem.ServiceExists("kubelet") { - glog.Infoln("[preflight] starting the kubelet service") - if err := initSystem.ServiceStart("kubelet"); err != nil { + glog.Infoln("[preflight] Activating the kubelet service") + // This runs "systemctl daemon-reload && systemctl restart kubelet" + if err := initSystem.ServiceRestart("kubelet"); err != nil { glog.Warningf("[preflight] unable to start the kubelet service: [%v]\n", err) glog.Warningf("[preflight] please ensure kubelet is running manually.") } diff --git a/cmd/kubeadm/app/util/config/cluster.go b/cmd/kubeadm/app/util/config/cluster.go new file mode 100644 index 00000000000..f2ec1a777c9 --- /dev/null +++ b/cmd/kubeadm/app/util/config/cluster.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "io" + "io/ioutil" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" +) + +// TODO: Add unit tests for this file + +// FetchConfigFromFileOrCluster fetches configuration required for upgrading your cluster from a file (which has precedence) or a ConfigMap in the cluster +func FetchConfigFromFileOrCluster(client clientset.Interface, w io.Writer, logPrefix, cfgPath string) (*kubeadmapi.MasterConfiguration, error) { + // Load the configuration from a file or the cluster + configBytes, err := loadConfigurationBytes(client, w, logPrefix, cfgPath) + if err != nil { + return nil, err + } + + // Take the versioned configuration populated from the file or ConfigMap, convert it to internal, default and validate + return BytesToInternalConfig(configBytes) +} + +// loadConfigurationBytes loads the configuration byte slice from either a file or the cluster ConfigMap +func loadConfigurationBytes(client clientset.Interface, w io.Writer, logPrefix, cfgPath string) ([]byte, error) { + // The config file has the highest priority + if cfgPath != "" { + fmt.Fprintf(w, "[%s] Reading configuration options from a file: %s\n", logPrefix, cfgPath) + return ioutil.ReadFile(cfgPath) + } + + fmt.Fprintf(w, "[%s] Reading configuration from the cluster...\n", logPrefix) + + configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.MasterConfigurationConfigMap, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // Return the apierror directly so the caller of this function can know what type of error occurred and act based on that + return []byte{}, err + } else if err != nil { + return []byte{}, fmt.Errorf("an unexpected error happened when trying to get the ConfigMap %q in the %s namespace: %v", constants.MasterConfigurationConfigMap, metav1.NamespaceSystem, err) + } + + fmt.Fprintf(w, "[%s] FYI: You can look at this config file with 'kubectl -n %s get cm %s -oyaml'\n", logPrefix, metav1.NamespaceSystem, constants.MasterConfigurationConfigMap) + return []byte(configMap.Data[constants.MasterConfigurationConfigMapKey]), nil +} diff --git a/pkg/util/initsystem/initsystem.go b/pkg/util/initsystem/initsystem.go index e4f8870a364..aca7a41b860 100644 --- a/pkg/util/initsystem/initsystem.go +++ b/pkg/util/initsystem/initsystem.go @@ -29,6 +29,9 @@ type InitSystem interface { // ServiceStop tries to stop a specific service ServiceStop(service string) error + // ServiceRestart tries to reload the environment and restart the specific service + ServiceRestart(service string) error + // ServiceExists ensures the service is defined for this init system. ServiceExists(service string) bool @@ -47,6 +50,14 @@ func (sysd SystemdInitSystem) ServiceStart(service string) error { return err } +func (sysd SystemdInitSystem) ServiceRestart(service string) error { + if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { + return fmt.Errorf("failed to reload systemd: %v", err) + } + args := []string{"restart", service} + return exec.Command("systemctl", args...).Run() +} + func (sysd SystemdInitSystem) ServiceStop(service string) error { args := []string{"stop", service} err := exec.Command("systemctl", args...).Run() @@ -95,6 +106,16 @@ func (sysd WindowsInitSystem) ServiceStart(service string) error { return err } +func (sysd WindowsInitSystem) ServiceRestart(service string) error { + if err := sysd.ServiceStop(service); err != nil { + return fmt.Errorf("couldn't stop service: %v", err) + } + if err := sysd.ServiceStart(service); err != nil { + return fmt.Errorf("couldn't start service: %v", err) + } + return nil +} + func (sysd WindowsInitSystem) ServiceStop(service string) error { args := []string{"Stop-Service", service} err := exec.Command("powershell", args...).Run() From 60b0eeb2a4b0f9d63c11e7f7565db20578685ef9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 22 May 2018 09:31:28 +0300 Subject: [PATCH 085/307] autogenerated --- cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD | 2 +- cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD | 2 +- cmd/kubeadm/app/cmd/phases/BUILD | 3 +- cmd/kubeadm/app/cmd/upgrade/BUILD | 2 + cmd/kubeadm/app/phases/kubelet/BUILD | 15 +++- cmd/kubeadm/app/phases/upgrade/BUILD | 3 +- cmd/kubeadm/app/util/config/BUILD | 4 ++ .../testdata/defaulting/master/defaulted.yaml | 69 ++++++++++++++++++- docs/.generated_docs | 12 ++-- ...ha_phase_kubelet_enable-dynamic-config.md} | 0 ...eadm_alpha_phase_kubelet_upload-config.md} | 0 ...pha_phase_kubelet_write-config-to-disk.md} | 0 ...pha-phase-kubelet-enable-dynamic-config.1} | 0 ...beadm-alpha-phase-kubelet-upload-config.1} | 0 ...lpha-phase-kubelet-write-config-to-disk.1} | 0 15 files changed, 96 insertions(+), 16 deletions(-) rename docs/admin/{kubeadm_alpha_phase_kubelet_enable.md => kubeadm_alpha_phase_kubelet_enable-dynamic-config.md} (100%) rename docs/admin/{kubeadm_alpha_phase_kubelet_init.md => kubeadm_alpha_phase_kubelet_upload-config.md} (100%) rename docs/admin/{kubeadm_alpha_phase_kubelet_upload.md => kubeadm_alpha_phase_kubelet_write-config-to-disk.md} (100%) rename docs/man/man1/{kubeadm-alpha-phase-kubelet-enable.1 => kubeadm-alpha-phase-kubelet-enable-dynamic-config.1} (100%) rename docs/man/man1/{kubeadm-alpha-phase-kubelet-init.1 => kubeadm-alpha-phase-kubelet-upload-config.1} (100%) rename docs/man/man1/{kubeadm-alpha-phase-kubelet-upload.1 => kubeadm-alpha-phase-kubelet-write-config-to-disk.1} (100%) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD index 6f7bae103df..7b6140cf55c 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD @@ -53,11 +53,11 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", - "//cmd/kubeadm/app/features:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//pkg/util/pointer:go_default_library", "//vendor/github.com/json-iterator/go:go_default_library", "//vendor/github.com/ugorji/go/codec:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD index 52fb9b18d37..841c006d451 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD @@ -51,11 +51,11 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", - "//cmd/kubeadm/app/features:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/scheme:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//pkg/util/pointer:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index 28b3fe9e3c1..faa232fb266 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -44,13 +44,12 @@ go_library( "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/kubeconfig:go_default_library", - "//pkg/util/node:go_default_library", "//pkg/util/normalizer:go_default_library", + "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index f22be4b4435..b9c31b4780e 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -33,6 +33,8 @@ go_library( "//vendor/github.com/pmezard/go-difflib/difflib:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/client-go/discovery/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index e9664603d76..0dc4429f902 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -2,11 +2,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["kubelet.go"], + srcs = [ + "config.go", + "dynamic.go", + "flags.go", + ], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet", visibility = ["//visibility:public"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", @@ -14,6 +19,7 @@ go_library( "//pkg/apis/rbac/v1:go_default_library", "//pkg/kubelet/apis/kubeletconfig/scheme:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", + "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -27,12 +33,15 @@ go_library( go_test( name = "go_default_test", - srcs = ["kubelet_test.go"], + srcs = [ + "config_test.go", + "dynamic_test.go", + ], embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/constants:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", + "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index aee22935751..7f8d4efac9e 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "compute.go", - "configuration.go", "health.go", "policy.go", "postupgrade.go", @@ -28,12 +27,12 @@ go_library( "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/app/phases/controlplane:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", + "//cmd/kubeadm/app/phases/kubelet:go_default_library", "//cmd/kubeadm/app/phases/selfhosting:go_default_library", "//cmd/kubeadm/app/phases/uploadconfig:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", - "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/dryrun:go_default_library", "//cmd/kubeadm/app/util/etcd:go_default_library", "//pkg/util/version:go_default_library", diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD index c86018fc705..02eacd1243f 100644 --- a/cmd/kubeadm/app/util/config/BUILD +++ b/cmd/kubeadm/app/util/config/BUILD @@ -9,6 +9,7 @@ load( go_library( name = "go_default_library", srcs = [ + "cluster.go", "masterconfig.go", "nodeconfig.go", ], @@ -25,8 +26,11 @@ go_library( "//pkg/util/node:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index 09506810936..a852a56a357 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -56,7 +56,74 @@ kubeProxy: portRange: "" resourceContainer: /kube-proxy udpIdleTimeout: 250ms -kubeletConfiguration: {} +kubeletConfiguration: + baseConfig: + address: 0.0.0.0 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 2m0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 5m0s + cacheUnauthorizedTTL: 30s + cgroupDriver: cgroupfs + cgroupsPerQOS: true + clusterDNS: + - 10.192.0.10 + clusterDomain: cluster.global + containerLogMaxFiles: 5 + containerLogMaxSize: 10Mi + contentType: application/vnd.kubernetes.protobuf + cpuCFSQuota: true + cpuManagerPolicy: none + cpuManagerReconcilePeriod: 10s + enableControllerAttachDetach: true + enableDebuggingHandlers: true + enforceNodeAllocatable: + - pods + eventBurst: 10 + eventRecordQPS: 5 + evictionHard: + imagefs.available: 15% + memory.available: 100Mi + nodefs.available: 10% + nodefs.inodesFree: 5% + evictionPressureTransitionPeriod: 5m0s + failSwapOn: true + fileCheckFrequency: 20s + hairpinMode: promiscuous-bridge + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 20s + imageGCHighThresholdPercent: 85 + imageGCLowThresholdPercent: 80 + imageMinimumGCAge: 2m0s + iptablesDropBit: 15 + iptablesMasqueradeBit: 14 + kubeAPIBurst: 10 + kubeAPIQPS: 5 + makeIPTablesUtilChains: true + maxOpenFiles: 1000000 + maxPods: 110 + nodeStatusUpdateFrequency: 10s + oomScoreAdj: -999 + podPidsLimit: -1 + port: 10250 + registryBurst: 10 + registryPullQPS: 5 + resolvConf: /etc/resolv.conf + runtimeRequestTimeout: 2m0s + serializeImagePulls: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 4h0m0s + syncFrequency: 1m0s + volumeStatsAggPeriod: 1m0s kubernetesVersion: v1.10.2 networking: dnsDomain: cluster.global diff --git a/docs/.generated_docs b/docs/.generated_docs index 7a4aefa192b..98a0042af75 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -46,9 +46,9 @@ docs/admin/kubeadm_alpha_phase_kubeconfig_kubelet.md docs/admin/kubeadm_alpha_phase_kubeconfig_scheduler.md docs/admin/kubeadm_alpha_phase_kubeconfig_user.md docs/admin/kubeadm_alpha_phase_kubelet.md -docs/admin/kubeadm_alpha_phase_kubelet_enable.md -docs/admin/kubeadm_alpha_phase_kubelet_init.md -docs/admin/kubeadm_alpha_phase_kubelet_upload.md +docs/admin/kubeadm_alpha_phase_kubelet_enable-dynamic-config.md +docs/admin/kubeadm_alpha_phase_kubelet_upload-config.md +docs/admin/kubeadm_alpha_phase_kubelet_write-config-to-disk.md docs/admin/kubeadm_alpha_phase_mark-master.md docs/admin/kubeadm_alpha_phase_preflight.md docs/admin/kubeadm_alpha_phase_preflight_master.md @@ -123,9 +123,9 @@ docs/man/man1/kubeadm-alpha-phase-kubeconfig-kubelet.1 docs/man/man1/kubeadm-alpha-phase-kubeconfig-scheduler.1 docs/man/man1/kubeadm-alpha-phase-kubeconfig-user.1 docs/man/man1/kubeadm-alpha-phase-kubeconfig.1 -docs/man/man1/kubeadm-alpha-phase-kubelet-enable.1 -docs/man/man1/kubeadm-alpha-phase-kubelet-init.1 -docs/man/man1/kubeadm-alpha-phase-kubelet-upload.1 +docs/man/man1/kubeadm-alpha-phase-kubelet-enable-dynamic-config.1 +docs/man/man1/kubeadm-alpha-phase-kubelet-upload-config.1 +docs/man/man1/kubeadm-alpha-phase-kubelet-write-config-to-disk.1 docs/man/man1/kubeadm-alpha-phase-kubelet.1 docs/man/man1/kubeadm-alpha-phase-mark-master.1 docs/man/man1/kubeadm-alpha-phase-preflight-master.1 diff --git a/docs/admin/kubeadm_alpha_phase_kubelet_enable.md b/docs/admin/kubeadm_alpha_phase_kubelet_enable-dynamic-config.md similarity index 100% rename from docs/admin/kubeadm_alpha_phase_kubelet_enable.md rename to docs/admin/kubeadm_alpha_phase_kubelet_enable-dynamic-config.md diff --git a/docs/admin/kubeadm_alpha_phase_kubelet_init.md b/docs/admin/kubeadm_alpha_phase_kubelet_upload-config.md similarity index 100% rename from docs/admin/kubeadm_alpha_phase_kubelet_init.md rename to docs/admin/kubeadm_alpha_phase_kubelet_upload-config.md diff --git a/docs/admin/kubeadm_alpha_phase_kubelet_upload.md b/docs/admin/kubeadm_alpha_phase_kubelet_write-config-to-disk.md similarity index 100% rename from docs/admin/kubeadm_alpha_phase_kubelet_upload.md rename to docs/admin/kubeadm_alpha_phase_kubelet_write-config-to-disk.md diff --git a/docs/man/man1/kubeadm-alpha-phase-kubelet-enable.1 b/docs/man/man1/kubeadm-alpha-phase-kubelet-enable-dynamic-config.1 similarity index 100% rename from docs/man/man1/kubeadm-alpha-phase-kubelet-enable.1 rename to docs/man/man1/kubeadm-alpha-phase-kubelet-enable-dynamic-config.1 diff --git a/docs/man/man1/kubeadm-alpha-phase-kubelet-init.1 b/docs/man/man1/kubeadm-alpha-phase-kubelet-upload-config.1 similarity index 100% rename from docs/man/man1/kubeadm-alpha-phase-kubelet-init.1 rename to docs/man/man1/kubeadm-alpha-phase-kubelet-upload-config.1 diff --git a/docs/man/man1/kubeadm-alpha-phase-kubelet-upload.1 b/docs/man/man1/kubeadm-alpha-phase-kubelet-write-config-to-disk.1 similarity index 100% rename from docs/man/man1/kubeadm-alpha-phase-kubelet-upload.1 rename to docs/man/man1/kubeadm-alpha-phase-kubelet-write-config-to-disk.1 From a0b9219c4c4fce12d6fdafee1dc38f5097563914 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Pab=C3=B3n?= Date: Tue, 22 May 2018 02:39:34 -0400 Subject: [PATCH 086/307] e2e: Remove flaky from CSI E2E test The tests have been passing consistently and now we can remove the Flaky tag. --- test/e2e/storage/csi_volumes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 369d480d074..82397eb008c 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -50,7 +50,7 @@ var csiTestDrivers = map[string]func(f *framework.Framework, config framework.Vo "[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD, } -var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() { +var _ = utils.SIGDescribe("CSI Volumes", func() { f := framework.NewDefaultFramework("csi-mock-plugin") var ( From 541edb744811ee0cef82188ba7887e872f8bbc72 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 22 May 2018 06:26:56 +0000 Subject: [PATCH 087/307] add volumeName in getVolumeSpecFromGlobalMapPath fix test build failure --- pkg/volume/azure_dd/azure_dd_block.go | 8 ++++++-- pkg/volume/azure_dd/azure_dd_block_test.go | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/volume/azure_dd/azure_dd_block.go b/pkg/volume/azure_dd/azure_dd_block.go index 6a02e786235..9ded780ef46 100644 --- a/pkg/volume/azure_dd/azure_dd_block.go +++ b/pkg/volume/azure_dd/azure_dd_block.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" @@ -49,10 +50,10 @@ func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, vo return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) } - return getVolumeSpecFromGlobalMapPath(globalMapPath) + return getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName) } -func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) { +func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.Spec, error) { // Get volume spec information from globalMapPath // globalMapPath example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID} @@ -64,6 +65,9 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName) block := v1.PersistentVolumeBlock pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeName, + }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ diff --git a/pkg/volume/azure_dd/azure_dd_block_test.go b/pkg/volume/azure_dd/azure_dd_block_test.go index 127870f707d..1951707470d 100644 --- a/pkg/volume/azure_dd/azure_dd_block_test.go +++ b/pkg/volume/azure_dd/azure_dd_block_test.go @@ -50,13 +50,13 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) { expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath) //Bad Path - badspec, err := getVolumeSpecFromGlobalMapPath("") + badspec, err := getVolumeSpecFromGlobalMapPath("", "") if badspec != nil || err == nil { t.Errorf("Expected not to get spec from GlobalMapPath but did") } // Good Path - spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath) + spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath, "") if spec == nil || err != nil { t.Fatalf("Failed to get spec from GlobalMapPath: %v", err) } From 027d15e58c05059cf1e6fdbf202b95b9f962ae2c Mon Sep 17 00:00:00 2001 From: PhilipGough Date: Sun, 20 May 2018 10:55:47 +0100 Subject: [PATCH 088/307] Allow env from resource with keys & updated tests --- hack/make-rules/test-cmd-util.sh | 14 ++- pkg/kubectl/cmd/set/set_env.go | 63 ++++++++---- pkg/kubectl/cmd/set/set_env_test.go | 147 ++++++++++++++++++++++++++++ 3 files changed, 203 insertions(+), 21 deletions(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index bba8d4656cc..6c0860d5b52 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -3174,12 +3174,22 @@ run_deployment_tests() { kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:' kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:' + # Set env of deployments by configmap from keys + kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}" + # Assert correct value in deployment env + kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2' + # Assert single value in deployment env + kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1' + # Set env of deployments by configmap + kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}" + # Assert all values in deployment env + kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2' # Set env of deployments for all container kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}" # Set env of deployments for specific container kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}" - # Set env of deployments by configmap - kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}" + # Set env of deployments by secret from keys + kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}" # Set env of deployments by secret kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}" # Remove specific env of deployment diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 87a83c7a10d..7a8b6f5fb1b 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -61,7 +61,7 @@ var ( ` + envResources) envExample = templates.Examples(` - # Update deployment 'registry' with a new environment variable + # Update deployment 'registry' with a new environment variable kubectl set env deployment/registry STORAGE_DIR=/local # List the environment variables defined on a deployments 'sample-build' @@ -82,6 +82,9 @@ var ( # Import environment from a config map with a prefix kubectl set env --from=configmap/myconfigmap --prefix=MYSQL_ deployment/myapp + # Import specific keys from a config map + kubectl set env --keys=my-example-key --from=configmap/myconfigmap deployment/myapp + # Remove the environment variable ENV from container 'c1' in all deployment configs kubectl set env deployments --all --containers="c1" ENV- @@ -107,6 +110,7 @@ type EnvOptions struct { Selector string From string Prefix string + Keys []string PrintObj printers.ResourcePrinterFunc @@ -157,6 +161,7 @@ func NewCmdEnv(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co cmd.Flags().StringVarP(&o.From, "from", "", "", "The name of a resource from which to inject environment variables") cmd.Flags().StringVarP(&o.Prefix, "prefix", "", "", "Prefix to append to variable names") cmd.Flags().StringArrayVarP(&o.EnvParams, "env", "e", o.EnvParams, "Specify a key-value pair for an environment variable to set into each container.") + cmd.Flags().StringSliceVarP(&o.Keys, "keys", "", o.Keys, "Comma-separated list of keys to import from specified resource") cmd.Flags().BoolVar(&o.List, "list", o.List, "If true, display the environment and any changes in the standard format. this flag will removed when we have kubectl view env.") cmd.Flags().BoolVar(&o.Resolve, "resolve", o.Resolve, "If true, show secret or configmap references when listing variables") cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on") @@ -183,6 +188,19 @@ func keyToEnvName(key string) string { return strings.ToUpper(validEnvNameRegexp.ReplaceAllString(key, "_")) } +func contains(key string, keyList []string) bool { + if len(keyList) == 0 { + return true + } + + for _, k := range keyList { + if k == key { + return true + } + } + return false +} + func (o *EnvOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { if o.All && len(o.Selector) > 0 { return fmt.Errorf("cannot set --all and --selector at the same time") @@ -230,6 +248,9 @@ func (o *EnvOptions) Validate() error { if o.List && len(o.output) > 0 { return fmt.Errorf("--list and --output may not be specified together") } + if len(o.Keys) > 0 && len(o.From) == 0 { + return fmt.Errorf("when specifying --keys, a configmap or secret must be provided with --from") + } return nil } @@ -265,33 +286,37 @@ func (o *EnvOptions) RunEnv() error { switch from := info.Object.(type) { case *v1.Secret: for key := range from.Data { - envVar := v1.EnvVar{ - Name: keyToEnvName(key), - ValueFrom: &v1.EnvVarSource{ - SecretKeyRef: &v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: from.Name, + if contains(key, o.Keys) { + envVar := v1.EnvVar{ + Name: keyToEnvName(key), + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: from.Name, + }, + Key: key, }, - Key: key, }, - }, + } + env = append(env, envVar) } - env = append(env, envVar) } case *v1.ConfigMap: for key := range from.Data { - envVar := v1.EnvVar{ - Name: keyToEnvName(key), - ValueFrom: &v1.EnvVarSource{ - ConfigMapKeyRef: &v1.ConfigMapKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: from.Name, + if contains(key, o.Keys) { + envVar := v1.EnvVar{ + Name: keyToEnvName(key), + ValueFrom: &v1.EnvVarSource{ + ConfigMapKeyRef: &v1.ConfigMapKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: from.Name, + }, + Key: key, }, - Key: key, }, - }, + } + env = append(env, envVar) } - env = append(env, envVar) } default: return fmt.Errorf("unsupported resource specified in --from") diff --git a/pkg/kubectl/cmd/set/set_env_test.go b/pkg/kubectl/cmd/set/set_env_test.go index c4d453b83f5..d5d73a5163a 100644 --- a/pkg/kubectl/cmd/set/set_env_test.go +++ b/pkg/kubectl/cmd/set/set_env_test.go @@ -492,3 +492,150 @@ func TestSetEnvRemote(t *testing.T) { }) } } + +func TestSetEnvFromResource(t *testing.T) { + mockConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "testconfigmap"}, + Data: map[string]string{ + "env": "prod", + "test-key": "testValue", + "test-key-two": "testValueTwo", + }, + } + + mockSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "testsecret"}, + Data: map[string][]byte{ + "env": []byte("prod"), + "test-key": []byte("testValue"), + "test-key-two": []byte("testValueTwo"), + }, + } + + inputs := []struct { + name string + args []string + from string + keys []string + assertIncludes []string + assertExcludes []string + }{ + { + name: "test from configmap", + args: []string{"deployment", "nginx"}, + from: "configmap/testconfigmap", + keys: []string{}, + assertIncludes: []string{ + `{"name":"ENV","valueFrom":{"configMapKeyRef":{"key":"env","name":"testconfigmap"}}}`, + `{"name":"TEST_KEY","valueFrom":{"configMapKeyRef":{"key":"test-key","name":"testconfigmap"}}}`, + `{"name":"TEST_KEY_TWO","valueFrom":{"configMapKeyRef":{"key":"test-key-two","name":"testconfigmap"}}}`, + }, + assertExcludes: []string{}, + }, + { + name: "test from secret", + args: []string{"deployment", "nginx"}, + from: "secret/testsecret", + keys: []string{}, + assertIncludes: []string{ + `{"name":"ENV","valueFrom":{"secretKeyRef":{"key":"env","name":"testsecret"}}}`, + `{"name":"TEST_KEY","valueFrom":{"secretKeyRef":{"key":"test-key","name":"testsecret"}}}`, + `{"name":"TEST_KEY_TWO","valueFrom":{"secretKeyRef":{"key":"test-key-two","name":"testsecret"}}}`, + }, + assertExcludes: []string{}, + }, + { + name: "test from configmap with keys", + args: []string{"deployment", "nginx"}, + from: "configmap/testconfigmap", + keys: []string{"env", "test-key-two"}, + assertIncludes: []string{ + `{"name":"ENV","valueFrom":{"configMapKeyRef":{"key":"env","name":"testconfigmap"}}}`, + `{"name":"TEST_KEY_TWO","valueFrom":{"configMapKeyRef":{"key":"test-key-two","name":"testconfigmap"}}}`, + }, + assertExcludes: []string{`{"name":"TEST_KEY","valueFrom":{"configMapKeyRef":{"key":"test-key","name":"testconfigmap"}}}`}, + }, + { + name: "test from secret with keys", + args: []string{"deployment", "nginx"}, + from: "secret/testsecret", + keys: []string{"env", "test-key-two"}, + assertIncludes: []string{ + `{"name":"ENV","valueFrom":{"secretKeyRef":{"key":"env","name":"testsecret"}}}`, + `{"name":"TEST_KEY_TWO","valueFrom":{"secretKeyRef":{"key":"test-key-two","name":"testsecret"}}}`, + }, + assertExcludes: []string{`{"name":"TEST_KEY","valueFrom":{"secretKeyRef":{"key":"test-key","name":"testsecret"}}}`}, + }, + } + + for _, input := range inputs { + mockDeployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx"}, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + t.Run(input.name, func(t *testing.T) { + tf := cmdtesting.NewTestFactory() + defer tf.Cleanup() + + tf.Namespace = "test" + tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} + tf.Client = &fake.RESTClient{ + GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, + NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/configmaps/testconfigmap" && m == http.MethodGet: + return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(mockConfigMap)}, nil + case p == "/namespaces/test/secrets/testsecret" && m == http.MethodGet: + return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(mockSecret)}, nil + case p == "/namespaces/test/deployments/nginx" && m == http.MethodGet: + return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(mockDeployment)}, nil + case p == "/namespaces/test/deployments/nginx" && m == http.MethodPatch: + stream, err := req.GetBody() + if err != nil { + return nil, err + } + bytes, err := ioutil.ReadAll(stream) + if err != nil { + return nil, err + } + for _, include := range input.assertIncludes { + assert.Contains(t, string(bytes), include) + } + for _, exclude := range input.assertExcludes { + assert.NotContains(t, string(bytes), exclude) + } + return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(mockDeployment)}, nil + default: + t.Errorf("%s: unexpected request: %#v\n%#v", input.name, req.URL, req) + return nil, nil + } + }), + } + + outputFormat := "yaml" + streams := genericclioptions.NewTestIOStreamsDiscard() + opts := NewEnvOptions(streams) + opts.From = input.from + opts.Keys = input.keys + opts.PrintFlags = genericclioptions.NewPrintFlags("").WithDefaultOutput(outputFormat).WithTypeSetter(scheme.Scheme) + opts.Local = false + opts.IOStreams = streams + err := opts.Complete(tf, NewCmdEnv(tf, streams), input.args) + assert.NoError(t, err) + err = opts.RunEnv() + assert.NoError(t, err) + }) + } +} From 275a4bf91fe12181f124baf198f49cb935806ab5 Mon Sep 17 00:00:00 2001 From: "Da K. Ma" Date: Tue, 22 May 2018 16:41:43 +0800 Subject: [PATCH 089/307] Simplify the volume util by v1helper. Signed-off-by: Da K. Ma --- pkg/volume/util/util.go | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index a89260a7285..663d8d62946 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -278,18 +278,11 @@ func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]stri if pv.Spec.NodeAffinity.Required != nil { terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms glog.V(10).Infof("Match for Required node selector terms %+v", terms) - for _, term := range terms { - selector, err := v1helper.NodeSelectorRequirementsAsSelector(term.MatchExpressions) - if err != nil { - return fmt.Errorf("Failed to parse MatchExpressions: %v", err) - } - if selector.Matches(labels.Set(nodeLabels)) { - // Terms are ORed, so only one needs to match - return nil - } + if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) { + return fmt.Errorf("No matching NodeSelectorTerms") } - return fmt.Errorf("No matching NodeSelectorTerms") } + return nil } From a29a1cbb955e993300c9388a218a4f323f3fdf8f Mon Sep 17 00:00:00 2001 From: Gregory Man Date: Thu, 19 Oct 2017 18:31:46 +0300 Subject: [PATCH 090/307] Added unit tests to sample-controller Added unit tests. Changed NewController function to accept informers and not informers factory. This make code more testable and align it with other controllers. --- staging/src/k8s.io/sample-controller/BUILD | 25 +- .../sample-controller/Godeps/Godeps.json | 120 +++++++ .../k8s.io/sample-controller/controller.go | 13 +- .../sample-controller/controller_test.go | 313 ++++++++++++++++++ staging/src/k8s.io/sample-controller/main.go | 4 +- 5 files changed, 464 insertions(+), 11 deletions(-) create mode 100644 staging/src/k8s.io/sample-controller/controller_test.go diff --git a/staging/src/k8s.io/sample-controller/BUILD b/staging/src/k8s.io/sample-controller/BUILD index 22f5196260f..88eca68da52 100644 --- a/staging/src/k8s.io/sample-controller/BUILD +++ b/staging/src/k8s.io/sample-controller/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") go_library( name = "go_default_library", @@ -18,6 +18,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", @@ -30,6 +31,7 @@ go_library( "//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned:go_default_library", "//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme:go_default_library", "//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions:go_default_library", + "//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1:go_default_library", "//vendor/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1:go_default_library", "//vendor/k8s.io/sample-controller/pkg/signals:go_default_library", ], @@ -61,3 +63,24 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["controller_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/tools/record:go_default_library", + "//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library", + "//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/fake:go_default_library", + "//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions:go_default_library", + ], +) diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index a3817172799..520574a0a56 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -626,6 +626,10 @@ "ImportPath": "k8s.io/client-go/kubernetes", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/scheme", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -634,118 +638,234 @@ "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/networking/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/listers/admissionregistration/v1alpha1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index c3fa44bc215..3159e7bcb0e 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - kubeinformers "k8s.io/client-go/informers" + appsinformers "k8s.io/client-go/informers/apps/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -40,7 +40,7 @@ import ( samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1" clientset "k8s.io/sample-controller/pkg/client/clientset/versioned" samplescheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme" - informers "k8s.io/sample-controller/pkg/client/informers/externalversions" + informers "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1" listers "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1" ) @@ -88,13 +88,8 @@ type Controller struct { func NewController( kubeclientset kubernetes.Interface, sampleclientset clientset.Interface, - kubeInformerFactory kubeinformers.SharedInformerFactory, - sampleInformerFactory informers.SharedInformerFactory) *Controller { - - // obtain references to shared index informers for the Deployment and Foo - // types. - deploymentInformer := kubeInformerFactory.Apps().V1().Deployments() - fooInformer := sampleInformerFactory.Samplecontroller().V1alpha1().Foos() + deploymentInformer appsinformers.DeploymentInformer, + fooInformer informers.FooInformer) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be diff --git a/staging/src/k8s.io/sample-controller/controller_test.go b/staging/src/k8s.io/sample-controller/controller_test.go new file mode 100644 index 00000000000..8eea816131e --- /dev/null +++ b/staging/src/k8s.io/sample-controller/controller_test.go @@ -0,0 +1,313 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "reflect" + "testing" + "time" + + apps "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + kubeinformers "k8s.io/client-go/informers" + k8sfake "k8s.io/client-go/kubernetes/fake" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + + samplecontroller "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1" + "k8s.io/sample-controller/pkg/client/clientset/versioned/fake" + informers "k8s.io/sample-controller/pkg/client/informers/externalversions" +) + +var ( + alwaysReady = func() bool { return true } + noResyncPeriodFunc = func() time.Duration { return 0 } +) + +type fixture struct { + t *testing.T + + client *fake.Clientset + kubeclient *k8sfake.Clientset + // Objects to put in the store. + fooLister []*samplecontroller.Foo + deploymentLister []*apps.Deployment + // Actions expected to happen on the client. + kubeactions []core.Action + actions []core.Action + // Objects from here preloaded into NewSimpleFake. + kubeobjects []runtime.Object + objects []runtime.Object +} + +func newFixture(t *testing.T) *fixture { + f := &fixture{} + f.t = t + f.objects = []runtime.Object{} + f.kubeobjects = []runtime.Object{} + return f +} + +func newFoo(name string, replicas *int32) *samplecontroller.Foo { + return &samplecontroller.Foo{ + TypeMeta: metav1.TypeMeta{APIVersion: samplecontroller.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + }, + Spec: samplecontroller.FooSpec{ + DeploymentName: fmt.Sprintf("%s-deployment", name), + Replicas: replicas, + }, + } +} + +func (f *fixture) newController() (*Controller, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) { + f.client = fake.NewSimpleClientset(f.objects...) + f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) + + i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) + k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) + + c := NewController(f.kubeclient, f.client, + k8sI.Apps().V1().Deployments(), i.Samplecontroller().V1alpha1().Foos()) + + c.foosSynced = alwaysReady + c.deploymentsSynced = alwaysReady + c.recorder = &record.FakeRecorder{} + + for _, f := range f.fooLister { + i.Samplecontroller().V1alpha1().Foos().Informer().GetIndexer().Add(f) + } + + for _, d := range f.deploymentLister { + k8sI.Apps().V1().Deployments().Informer().GetIndexer().Add(d) + } + + return c, i, k8sI +} + +func (f *fixture) run(fooName string) { + f.runController(fooName, true, false) +} + +func (f *fixture) runExpectError(fooName string) { + f.runController(fooName, true, true) +} + +func (f *fixture) runController(fooName string, startInformers bool, expectError bool) { + c, i, k8sI := f.newController() + if startInformers { + stopCh := make(chan struct{}) + defer close(stopCh) + i.Start(stopCh) + k8sI.Start(stopCh) + } + + err := c.syncHandler(fooName) + if !expectError && err != nil { + f.t.Errorf("error syncing foo: %v", err) + } else if expectError && err == nil { + f.t.Error("expected error syncing foo, got nil") + } + + actions := filterInformerActions(f.client.Actions()) + for i, action := range actions { + if len(f.actions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:]) + break + } + + expectedAction := f.actions[i] + checkAction(expectedAction, action, f.t) + } + + if len(f.actions) > len(actions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):]) + } + + k8sActions := filterInformerActions(f.kubeclient.Actions()) + for i, action := range k8sActions { + if len(f.kubeactions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:]) + break + } + + expectedAction := f.kubeactions[i] + checkAction(expectedAction, action, f.t) + } + + if len(f.kubeactions) > len(k8sActions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):]) + } +} + +// checkAction verifies that expected and actual actions are equal and both have +// same attached resources +func checkAction(expected, actual core.Action, t *testing.T) { + if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) { + t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual) + return + } + + if reflect.TypeOf(actual) != reflect.TypeOf(expected) { + t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual) + return + } + + switch a := actual.(type) { + case core.CreateAction: + e, _ := expected.(core.CreateAction) + expObject := e.GetObject() + object := a.GetObject() + + if !reflect.DeepEqual(expObject, object) { + t.Errorf("Action %s %s has wrong object\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expObject, object)) + } + case core.UpdateAction: + e, _ := expected.(core.UpdateAction) + expObject := e.GetObject() + object := a.GetObject() + + if !reflect.DeepEqual(expObject, object) { + t.Errorf("Action %s %s has wrong object\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expObject, object)) + } + case core.PatchAction: + e, _ := expected.(core.PatchAction) + expPatch := e.GetPatch() + patch := a.GetPatch() + + if !reflect.DeepEqual(expPatch, expPatch) { + t.Errorf("Action %s %s has wrong patch\nDiff:\n %s", + a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expPatch, patch)) + } + } +} + +// filterInformerActions filters list and watch actions for testing resources. +// Since list and watch don't change resource state we can filter it to lower +// nose level in our tests. +func filterInformerActions(actions []core.Action) []core.Action { + ret := []core.Action{} + for _, action := range actions { + if len(action.GetNamespace()) == 0 && + (action.Matches("list", "foos") || + action.Matches("watch", "foos") || + action.Matches("list", "deployments") || + action.Matches("watch", "deployments")) { + continue + } + ret = append(ret, action) + } + + return ret +} + +func (f *fixture) expectCreateDeploymentAction(d *apps.Deployment) { + f.kubeactions = append(f.kubeactions, core.NewCreateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)) +} + +func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) { + f.kubeactions = append(f.kubeactions, core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)) +} + +func (f *fixture) expectUpdateFooStatusAction(foo *samplecontroller.Foo) { + action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "foos"}, foo.Namespace, foo) + // TODO: Until #38113 is merged, we can't use Subresource + //action.Subresource = "status" + f.actions = append(f.actions, action) +} + +func getKey(foo *samplecontroller.Foo, t *testing.T) string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(foo) + if err != nil { + t.Errorf("Unexpected error getting key for foo %v: %v", foo.Name, err) + return "" + } + return key +} + +func TestCreatesDeployment(t *testing.T) { + f := newFixture(t) + foo := newFoo("test", int32Ptr(1)) + + f.fooLister = append(f.fooLister, foo) + f.objects = append(f.objects, foo) + + expDeployment := newDeployment(foo) + f.expectCreateDeploymentAction(expDeployment) + f.expectUpdateFooStatusAction(foo) + + f.run(getKey(foo, t)) +} + +func TestDoNothing(t *testing.T) { + f := newFixture(t) + foo := newFoo("test", int32Ptr(1)) + d := newDeployment(foo) + + f.fooLister = append(f.fooLister, foo) + f.objects = append(f.objects, foo) + f.deploymentLister = append(f.deploymentLister, d) + f.kubeobjects = append(f.kubeobjects, d) + + f.expectUpdateFooStatusAction(foo) + f.run(getKey(foo, t)) +} + +func TestUpdateDeployment(t *testing.T) { + f := newFixture(t) + foo := newFoo("test", int32Ptr(1)) + d := newDeployment(foo) + + // Update replicas + foo.Spec.Replicas = int32Ptr(2) + expDeployment := newDeployment(foo) + + f.fooLister = append(f.fooLister, foo) + f.objects = append(f.objects, foo) + f.deploymentLister = append(f.deploymentLister, d) + f.kubeobjects = append(f.kubeobjects, d) + + f.expectUpdateFooStatusAction(foo) + f.expectUpdateDeploymentAction(expDeployment) + f.run(getKey(foo, t)) +} + +func TestNotControlledByUs(t *testing.T) { + f := newFixture(t) + foo := newFoo("test", int32Ptr(1)) + d := newDeployment(foo) + + d.ObjectMeta.OwnerReferences = []metav1.OwnerReference{} + + f.fooLister = append(f.fooLister, foo) + f.objects = append(f.objects, foo) + f.deploymentLister = append(f.deploymentLister, d) + f.kubeobjects = append(f.kubeobjects, d) + + f.runExpectError(getKey(foo, t)) +} + +func int32Ptr(i int32) *int32 { return &i } diff --git a/staging/src/k8s.io/sample-controller/main.go b/staging/src/k8s.io/sample-controller/main.go index d3514fc3a5d..026083f331a 100644 --- a/staging/src/k8s.io/sample-controller/main.go +++ b/staging/src/k8s.io/sample-controller/main.go @@ -61,7 +61,9 @@ func main() { kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) exampleInformerFactory := informers.NewSharedInformerFactory(exampleClient, time.Second*30) - controller := NewController(kubeClient, exampleClient, kubeInformerFactory, exampleInformerFactory) + controller := NewController(kubeClient, exampleClient, + kubeInformerFactory.Apps().V1().Deployments(), + exampleInformerFactory.Samplecontroller().V1alpha1().Foos()) go kubeInformerFactory.Start(stopCh) go exampleInformerFactory.Start(stopCh) From 62e3285cf8e2deb7d0cc39ea12b1cf2df8ae6c90 Mon Sep 17 00:00:00 2001 From: liangwei Date: Tue, 22 May 2018 14:10:37 +0800 Subject: [PATCH 091/307] ipvs lb local session affinity --- pkg/proxy/ipvs/proxier.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index ff0930c6fee..97698e0c1d1 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -856,7 +856,7 @@ func (proxier *Proxier) syncProxyRules() { } if err := proxier.syncService(svcNameString, serv, true); err == nil { activeIPVSServices[serv.String()] = true - if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil { + if err := proxier.syncEndpoint(svcName, false, serv); err != nil { glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { @@ -955,8 +955,10 @@ func (proxier *Proxier) syncProxyRules() { serv.Timeout = uint32(svcInfo.StickyMaxAgeSeconds) } if err := proxier.syncService(svcNameString, serv, true); err == nil { + // check if service need skip endpoints that not in same host as kube-proxy + onlyLocal := svcInfo.SessionAffinityType == api.ServiceAffinityClientIP && svcInfo.OnlyNodeLocalEndpoints activeIPVSServices[serv.String()] = true - if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil { + if err := proxier.syncEndpoint(svcName, onlyLocal, serv); err != nil { glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { @@ -1084,7 +1086,7 @@ func (proxier *Proxier) syncProxyRules() { // There is no need to bind Node IP to dummy interface, so set parameter `bindAddr` to `false`. if err := proxier.syncService(svcNameString, serv, false); err == nil { activeIPVSServices[serv.String()] = true - if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil { + if err := proxier.syncEndpoint(svcName, false, serv); err != nil { glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { @@ -1477,6 +1479,9 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } for _, epInfo := range proxier.endpointsMap[svcPortName] { + if onlyNodeLocalEndpoints && !epInfo.GetIsLocal() { + continue + } newEndpoints.Insert(epInfo.String()) } From 092714ea0fab9960f6ac452f49f2576d677f1f9e Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 19 Apr 2018 07:57:45 -0400 Subject: [PATCH 092/307] switch rbac to external --- pkg/apis/rbac/helpers.go | 61 ---- pkg/apis/rbac/v1/BUILD | 2 + pkg/apis/rbac/v1/doc.go | 1 + pkg/apis/rbac/v1/evaluation_helpers.go | 179 ++++++++++ pkg/apis/rbac/v1/helpers.go | 103 +++++- pkg/apis/rbac/v1/zz_generated.deepcopy.go | 21 ++ pkg/kubeapiserver/authorizer/config.go | 8 +- pkg/kubectl/cmd/auth/BUILD | 7 +- pkg/kubectl/cmd/auth/reconcile.go | 37 +- pkg/printers/internalversion/BUILD | 2 + pkg/printers/internalversion/describe.go | 26 +- pkg/registry/rbac/clusterrole/BUILD | 4 +- .../rbac/clusterrole/policybased/storage.go | 8 +- pkg/registry/rbac/clusterrole/registry.go | 50 +-- pkg/registry/rbac/clusterrolebinding/BUILD | 4 +- .../rbac/clusterrolebinding/policybased/BUILD | 2 + .../clusterrolebinding/policybased/storage.go | 16 +- .../rbac/clusterrolebinding/registry.go | 48 +-- pkg/registry/rbac/reconciliation/BUILD | 10 +- .../reconciliation/clusterrole_interfaces.go | 16 +- .../clusterrolebinding_interfaces.go | 14 +- .../rbac/reconciliation/reconcile_role.go | 22 +- .../reconciliation/reconcile_role_test.go | 40 +-- .../reconciliation/reconcile_rolebindings.go | 18 +- .../reconcile_rolebindings_test.go | 32 +- .../rbac/reconciliation/role_interfaces.go | 24 +- .../reconciliation/rolebinding_interfaces.go | 22 +- .../reconciliation/zz_generated.deepcopy.go | 10 +- pkg/registry/rbac/rest/BUILD | 4 +- pkg/registry/rbac/rest/storage_rbac.go | 18 +- pkg/registry/rbac/role/BUILD | 4 +- pkg/registry/rbac/role/policybased/storage.go | 4 +- pkg/registry/rbac/role/registry.go | 51 +-- pkg/registry/rbac/rolebinding/BUILD | 4 +- .../rbac/rolebinding/policybased/BUILD | 2 + .../rbac/rolebinding/policybased/storage.go | 16 +- pkg/registry/rbac/rolebinding/registry.go | 49 +-- pkg/registry/rbac/validation/BUILD | 6 +- .../validation/internal_version_adapter.go | 39 +++ .../rbac/validation/policy_compact.go | 12 +- .../rbac/validation/policy_compact_test.go | 63 ++-- .../rbac/validation/policy_comparator.go | 26 +- .../rbac/validation/policy_comparator_test.go | 148 ++++---- pkg/registry/rbac/validation/rule.go | 78 ++--- pkg/registry/rbac/validation/rule_test.go | 104 +++--- plugin/pkg/auth/authorizer/node/BUILD | 2 +- .../auth/authorizer/node/node_authorizer.go | 6 +- plugin/pkg/auth/authorizer/rbac/BUILD | 8 +- .../authorizer/rbac/bootstrappolicy/BUILD | 5 +- .../rbac/bootstrappolicy/controller_policy.go | 327 +++++++++--------- .../rbac/bootstrappolicy/namespace_policy.go | 75 ++-- .../authorizer/rbac/bootstrappolicy/policy.go | 301 ++++++++-------- .../rbac/bootstrappolicy/policy_test.go | 42 +-- plugin/pkg/auth/authorizer/rbac/rbac.go | 38 +- plugin/pkg/auth/authorizer/rbac/rbac_test.go | 97 +++--- .../auth/authorizer/rbac/subject_locator.go | 14 +- .../authorizer/rbac/subject_locator_test.go | 88 ++--- test/integration/framework/master_utils.go | 3 + 58 files changed, 1319 insertions(+), 1102 deletions(-) create mode 100644 pkg/apis/rbac/v1/evaluation_helpers.go create mode 100644 pkg/apis/rbac/v1/zz_generated.deepcopy.go create mode 100644 pkg/registry/rbac/validation/internal_version_adapter.go diff --git a/pkg/apis/rbac/helpers.go b/pkg/apis/rbac/helpers.go index 373711500b1..b7b4b78c819 100644 --- a/pkg/apis/rbac/helpers.go +++ b/pkg/apis/rbac/helpers.go @@ -21,40 +21,9 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" ) -func RoleRefGroupKind(roleRef RoleRef) schema.GroupKind { - return schema.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} -} - -func VerbMatches(rule *PolicyRule, requestedVerb string) bool { - for _, ruleVerb := range rule.Verbs { - if ruleVerb == VerbAll { - return true - } - if ruleVerb == requestedVerb { - return true - } - } - - return false -} - -func APIGroupMatches(rule *PolicyRule, requestedGroup string) bool { - for _, ruleGroup := range rule.APIGroups { - if ruleGroup == APIGroupAll { - return true - } - if ruleGroup == requestedGroup { - return true - } - } - - return false -} - func ResourceMatches(rule *PolicyRule, combinedRequestedResource, requestedSubresource string) bool { for _, ruleResource := range rule.Resources { // if everything is allowed, we match @@ -83,36 +52,6 @@ func ResourceMatches(rule *PolicyRule, combinedRequestedResource, requestedSubre return false } -func ResourceNameMatches(rule *PolicyRule, requestedName string) bool { - if len(rule.ResourceNames) == 0 { - return true - } - - for _, ruleName := range rule.ResourceNames { - if ruleName == requestedName { - return true - } - } - - return false -} - -func NonResourceURLMatches(rule *PolicyRule, requestedURL string) bool { - for _, ruleURL := range rule.NonResourceURLs { - if ruleURL == NonResourceAll { - return true - } - if ruleURL == requestedURL { - return true - } - if strings.HasSuffix(ruleURL, "*") && strings.HasPrefix(requestedURL, strings.TrimRight(ruleURL, "*")) { - return true - } - } - - return false -} - // subjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. func SubjectsStrings(subjects []Subject) ([]string, []string, []string, []string) { users := []string{} diff --git a/pkg/apis/rbac/v1/BUILD b/pkg/apis/rbac/v1/BUILD index 5aa5b5fc768..9eb4029fcf2 100644 --- a/pkg/apis/rbac/v1/BUILD +++ b/pkg/apis/rbac/v1/BUILD @@ -10,9 +10,11 @@ go_library( srcs = [ "defaults.go", "doc.go", + "evaluation_helpers.go", "helpers.go", "register.go", "zz_generated.conversion.go", + "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1", diff --git a/pkg/apis/rbac/v1/doc.go b/pkg/apis/rbac/v1/doc.go index 1668eabe3b8..5608caba705 100644 --- a/pkg/apis/rbac/v1/doc.go +++ b/pkg/apis/rbac/v1/doc.go @@ -18,6 +18,7 @@ limitations under the License. // +k8s:conversion-gen-external-types=k8s.io/api/rbac/v1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1 +// +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io package v1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1" diff --git a/pkg/apis/rbac/v1/evaluation_helpers.go b/pkg/apis/rbac/v1/evaluation_helpers.go new file mode 100644 index 00000000000..3707760bf5b --- /dev/null +++ b/pkg/apis/rbac/v1/evaluation_helpers.go @@ -0,0 +1,179 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strings" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func RoleRefGroupKind(roleRef rbacv1.RoleRef) schema.GroupKind { + return schema.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} +} + +func VerbMatches(rule *rbacv1.PolicyRule, requestedVerb string) bool { + for _, ruleVerb := range rule.Verbs { + if ruleVerb == rbacv1.VerbAll { + return true + } + if ruleVerb == requestedVerb { + return true + } + } + + return false +} + +func APIGroupMatches(rule *rbacv1.PolicyRule, requestedGroup string) bool { + for _, ruleGroup := range rule.APIGroups { + if ruleGroup == rbacv1.APIGroupAll { + return true + } + if ruleGroup == requestedGroup { + return true + } + } + + return false +} + +func ResourceMatches(rule *rbacv1.PolicyRule, combinedRequestedResource, requestedSubresource string) bool { + for _, ruleResource := range rule.Resources { + // if everything is allowed, we match + if ruleResource == rbacv1.ResourceAll { + return true + } + // if we have an exact match, we match + if ruleResource == combinedRequestedResource { + return true + } + + // We can also match a */subresource. + // if there isn't a subresource, then continue + if len(requestedSubresource) == 0 { + continue + } + // if the rule isn't in the format */subresource, then we don't match, continue + if len(ruleResource) == len(requestedSubresource)+2 && + strings.HasPrefix(ruleResource, "*/") && + strings.HasSuffix(ruleResource, requestedSubresource) { + return true + + } + } + + return false +} + +func ResourceNameMatches(rule *rbacv1.PolicyRule, requestedName string) bool { + if len(rule.ResourceNames) == 0 { + return true + } + + for _, ruleName := range rule.ResourceNames { + if ruleName == requestedName { + return true + } + } + + return false +} + +func NonResourceURLMatches(rule *rbacv1.PolicyRule, requestedURL string) bool { + for _, ruleURL := range rule.NonResourceURLs { + if ruleURL == rbacv1.NonResourceAll { + return true + } + if ruleURL == requestedURL { + return true + } + if strings.HasSuffix(ruleURL, "*") && strings.HasPrefix(requestedURL, strings.TrimRight(ruleURL, "*")) { + return true + } + } + + return false +} + +// subjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. +func SubjectsStrings(subjects []rbacv1.Subject) ([]string, []string, []string, []string) { + users := []string{} + groups := []string{} + sas := []string{} + others := []string{} + + for _, subject := range subjects { + switch subject.Kind { + case rbacv1.ServiceAccountKind: + sas = append(sas, fmt.Sprintf("%s/%s", subject.Namespace, subject.Name)) + + case rbacv1.UserKind: + users = append(users, subject.Name) + + case rbacv1.GroupKind: + groups = append(groups, subject.Name) + + default: + others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name)) + } + } + + return users, groups, sas, others +} + +func String(r rbacv1.PolicyRule) string { + return "PolicyRule" + CompactString(r) +} + +// CompactString exposes a compact string representation for use in escalation error messages +func CompactString(r rbacv1.PolicyRule) string { + formatStringParts := []string{} + formatArgs := []interface{}{} + if len(r.APIGroups) > 0 { + formatStringParts = append(formatStringParts, "APIGroups:%q") + formatArgs = append(formatArgs, r.APIGroups) + } + if len(r.Resources) > 0 { + formatStringParts = append(formatStringParts, "Resources:%q") + formatArgs = append(formatArgs, r.Resources) + } + if len(r.NonResourceURLs) > 0 { + formatStringParts = append(formatStringParts, "NonResourceURLs:%q") + formatArgs = append(formatArgs, r.NonResourceURLs) + } + if len(r.ResourceNames) > 0 { + formatStringParts = append(formatStringParts, "ResourceNames:%q") + formatArgs = append(formatArgs, r.ResourceNames) + } + if len(r.Verbs) > 0 { + formatStringParts = append(formatStringParts, "Verbs:%q") + formatArgs = append(formatArgs, r.Verbs) + } + formatString := "{" + strings.Join(formatStringParts, ", ") + "}" + return fmt.Sprintf(formatString, formatArgs...) +} + +type SortableRuleSlice []rbacv1.PolicyRule + +func (s SortableRuleSlice) Len() int { return len(s) } +func (s SortableRuleSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s SortableRuleSlice) Less(i, j int) bool { + return strings.Compare(s[i].String(), s[j].String()) < 0 +} diff --git a/pkg/apis/rbac/v1/helpers.go b/pkg/apis/rbac/v1/helpers.go index 6bde41408eb..539fe85b464 100644 --- a/pkg/apis/rbac/v1/helpers.go +++ b/pkg/apis/rbac/v1/helpers.go @@ -21,9 +21,13 @@ import ( rbacv1 "k8s.io/api/rbac/v1" + "sort" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +k8s:deepcopy-gen=false + // PolicyRuleBuilder let's us attach methods. A no-no for API types. // We use it to construct rules in code. It's more compact than trying to write them // out in a literal and allows us to perform some basic checking during construction @@ -87,9 +91,16 @@ func (r *PolicyRuleBuilder) Rule() (rbacv1.PolicyRule, error) { return rbacv1.PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) } + sort.Strings(r.PolicyRule.Resources) + sort.Strings(r.PolicyRule.ResourceNames) + sort.Strings(r.PolicyRule.APIGroups) + sort.Strings(r.PolicyRule.NonResourceURLs) + sort.Strings(r.PolicyRule.Verbs) return r.PolicyRule, nil } +// +k8s:deepcopy-gen=false + // ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. // We use it to construct bindings in code. It's more compact than trying to write them // out in a literal. @@ -112,14 +123,14 @@ func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { for _, group := range groups { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.GroupKind, Name: group}) + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.GroupKind, Name: group}) } return r } func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { for _, user := range users { - r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.UserKind, Name: user}) + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, rbacv1.Subject{APIGroup: rbacv1.GroupName, Kind: rbacv1.UserKind, Name: user}) } return r } @@ -146,3 +157,91 @@ func (r *ClusterRoleBindingBuilder) Binding() (rbacv1.ClusterRoleBinding, error) return r.ClusterRoleBinding, nil } + +// +k8s:deepcopy-gen=false + +// RoleBindingBuilder let's us attach methods. It is similar to +// ClusterRoleBindingBuilder above. +type RoleBindingBuilder struct { + RoleBinding rbacv1.RoleBinding +} + +// NewRoleBinding creates a RoleBinding builder that can be used +// to define the subjects of a role binding. At least one of +// the `Groups`, `Users` or `SAs` method must be called before +// calling the `Binding*` methods. +func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: GroupName, + Kind: "Role", + Name: roleName, + }, + }, + } +} + +func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + }, + } +} + +// Groups adds the specified groups as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { + for _, group := range groups { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.GroupKind, APIGroup: GroupName, Name: group}) + } + return r +} + +// Users adds the specified users as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder { + for _, user := range users { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: GroupName, Name: user}) + } + return r +} + +// SAs adds the specified service accounts as the subjects of the +// RoleBinding. +func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +// BindingOrDie calls the binding method and panics if there is an error. +func (r *RoleBindingBuilder) BindingOrDie() rbacv1.RoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +// Binding builds and returns the RoleBinding API object from the builder +// object. +func (r *RoleBindingBuilder) Binding() (rbacv1.RoleBinding, error) { + if len(r.RoleBinding.Subjects) == 0 { + return rbacv1.RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding) + } + + return r.RoleBinding, nil +} diff --git a/pkg/apis/rbac/v1/zz_generated.deepcopy.go b/pkg/apis/rbac/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..1e699e99228 --- /dev/null +++ b/pkg/apis/rbac/v1/zz_generated.deepcopy.go @@ -0,0 +1,21 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 diff --git a/pkg/kubeapiserver/authorizer/config.go b/pkg/kubeapiserver/authorizer/config.go index 7646f80bc1b..a72ba5ee63c 100644 --- a/pkg/kubeapiserver/authorizer/config.go +++ b/pkg/kubeapiserver/authorizer/config.go @@ -108,10 +108,10 @@ func (config AuthorizationConfig) New() (authorizer.Authorizer, authorizer.RuleR ruleResolvers = append(ruleResolvers, webhookAuthorizer) case modes.ModeRBAC: rbacAuthorizer := rbac.New( - &rbac.RoleGetter{Lister: config.InformerFactory.Rbac().InternalVersion().Roles().Lister()}, - &rbac.RoleBindingLister{Lister: config.InformerFactory.Rbac().InternalVersion().RoleBindings().Lister()}, - &rbac.ClusterRoleGetter{Lister: config.InformerFactory.Rbac().InternalVersion().ClusterRoles().Lister()}, - &rbac.ClusterRoleBindingLister{Lister: config.InformerFactory.Rbac().InternalVersion().ClusterRoleBindings().Lister()}, + &rbac.RoleGetter{Lister: config.VersionedInformerFactory.Rbac().V1().Roles().Lister()}, + &rbac.RoleBindingLister{Lister: config.VersionedInformerFactory.Rbac().V1().RoleBindings().Lister()}, + &rbac.ClusterRoleGetter{Lister: config.VersionedInformerFactory.Rbac().V1().ClusterRoles().Lister()}, + &rbac.ClusterRoleBindingLister{Lister: config.VersionedInformerFactory.Rbac().V1().ClusterRoleBindings().Lister()}, ) authorizers = append(authorizers, rbacAuthorizer) ruleResolvers = append(ruleResolvers, rbacAuthorizer) diff --git a/pkg/kubectl/cmd/auth/BUILD b/pkg/kubectl/cmd/auth/BUILD index a56b7c5c592..d78bc102fc8 100644 --- a/pkg/kubectl/cmd/auth/BUILD +++ b/pkg/kubectl/cmd/auth/BUILD @@ -16,12 +16,8 @@ go_library( "//build/visible_to:pkg_kubectl_cmd_auth_CONSUMERS", ], deps = [ - "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/authorization:go_default_library", - "//pkg/apis/rbac:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", @@ -31,8 +27,11 @@ go_library( "//pkg/registry/rbac/reconciliation:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index fd82e2ccdcd..ef62092e6f2 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -22,10 +22,9 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/rbac" - internalcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - internalrbacclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" + rbacv1 "k8s.io/api/rbac/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -42,8 +41,8 @@ type ReconcileOptions struct { FilenameOptions *resource.FilenameOptions Visitor resource.Visitor - RBACClient internalrbacclient.RbacInterface - NamespaceClient internalcoreclient.NamespaceInterface + RBACClient rbacv1client.RbacV1Interface + NamespaceClient corev1client.CoreV1Interface PrintObject printers.ResourcePrinterFunc @@ -104,7 +103,7 @@ func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args } r := f.NewBuilder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). FilenameParam(enforceNamespace, o.FilenameOptions). @@ -116,12 +115,18 @@ func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args } o.Visitor = r - client, err := f.ClientSet() + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.RBACClient, err = rbacv1client.NewForConfig(clientConfig) + if err != nil { + return err + } + o.NamespaceClient, err = corev1client.NewForConfig(clientConfig) if err != nil { return err } - o.RBACClient = client.Rbac() - o.NamespaceClient = client.Core().Namespaces() printer, err := o.PrintFlags.ToPrinter() if err != nil { @@ -161,13 +166,13 @@ func (o *ReconcileOptions) RunReconcile() error { } switch t := info.Object.(type) { - case *rbac.Role: + case *rbacv1.Role: reconcileOptions := reconciliation.ReconcileRoleOptions{ Confirm: true, RemoveExtraPermissions: false, Role: reconciliation.RoleRuleOwner{Role: t}, Client: reconciliation.RoleModifier{ - NamespaceClient: o.NamespaceClient, + NamespaceClient: o.NamespaceClient.Namespaces(), Client: o.RBACClient, }, } @@ -177,7 +182,7 @@ func (o *ReconcileOptions) RunReconcile() error { } o.PrintObject(result.Role.GetObject(), o.Out) - case *rbac.ClusterRole: + case *rbacv1.ClusterRole: reconcileOptions := reconciliation.ReconcileRoleOptions{ Confirm: true, RemoveExtraPermissions: false, @@ -192,14 +197,14 @@ func (o *ReconcileOptions) RunReconcile() error { } o.PrintObject(result.Role.GetObject(), o.Out) - case *rbac.RoleBinding: + case *rbacv1.RoleBinding: reconcileOptions := reconciliation.ReconcileRoleBindingOptions{ Confirm: true, RemoveExtraSubjects: false, RoleBinding: reconciliation.RoleBindingAdapter{RoleBinding: t}, Client: reconciliation.RoleBindingClientAdapter{ Client: o.RBACClient, - NamespaceClient: o.NamespaceClient, + NamespaceClient: o.NamespaceClient.Namespaces(), }, } result, err := reconcileOptions.Run() @@ -208,7 +213,7 @@ func (o *ReconcileOptions) RunReconcile() error { } o.PrintObject(result.RoleBinding.GetObject(), o.Out) - case *rbac.ClusterRoleBinding: + case *rbacv1.ClusterRoleBinding: reconcileOptions := reconciliation.ReconcileRoleBindingOptions{ Confirm: true, RemoveExtraSubjects: false, diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index c28c99c0949..aeac7a5675f 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -70,6 +70,7 @@ go_library( "//pkg/apis/networking:go_default_library", "//pkg/apis/policy:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/apis/storage/util:go_default_library", @@ -90,6 +91,7 @@ go_library( "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index edc5edebe50..ca414aef62f 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -35,6 +35,7 @@ import ( "github.com/fatih/camelcase" versionedextension "k8s.io/api/extensions/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" @@ -63,6 +64,7 @@ import ( "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/storage" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" @@ -166,10 +168,10 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]printers.Desc certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c}, storage.Kind("StorageClass"): &StorageClassDescriber{c}, policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c}, - rbac.Kind("Role"): &RoleDescriber{c}, - rbac.Kind("ClusterRole"): &ClusterRoleDescriber{c}, - rbac.Kind("RoleBinding"): &RoleBindingDescriber{c}, - rbac.Kind("ClusterRoleBinding"): &ClusterRoleBindingDescriber{c}, + rbac.Kind("Role"): &RoleDescriber{externalclient}, + rbac.Kind("ClusterRole"): &ClusterRoleDescriber{externalclient}, + rbac.Kind("RoleBinding"): &RoleBindingDescriber{externalclient}, + rbac.Kind("ClusterRoleBinding"): &ClusterRoleBindingDescriber{externalclient}, networking.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, scheduling.Kind("PriorityClass"): &PriorityClassDescriber{c}, } @@ -2443,7 +2445,7 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec // RoleDescriber generates information about a node. type RoleDescriber struct { - clientset.Interface + externalclient.Interface } func (d *RoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { @@ -2452,7 +2454,7 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print return "", err } - breakdownRules := []rbac.PolicyRule{} + breakdownRules := []rbacv1.PolicyRule{} for _, rule := range role.Rules { breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...) } @@ -2461,7 +2463,7 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print if err != nil { return "", err } - sort.Stable(rbac.SortableRuleSlice(compactRules)) + sort.Stable(rbacv1helpers.SortableRuleSlice(compactRules)) return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2482,7 +2484,7 @@ func (d *RoleDescriber) Describe(namespace, name string, describerSettings print // ClusterRoleDescriber generates information about a node. type ClusterRoleDescriber struct { - clientset.Interface + externalclient.Interface } func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { @@ -2491,7 +2493,7 @@ func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSetting return "", err } - breakdownRules := []rbac.PolicyRule{} + breakdownRules := []rbacv1.PolicyRule{} for _, rule := range role.Rules { breakdownRules = append(breakdownRules, validation.BreakdownRule(rule)...) } @@ -2500,7 +2502,7 @@ func (d *ClusterRoleDescriber) Describe(namespace, name string, describerSetting if err != nil { return "", err } - sort.Stable(rbac.SortableRuleSlice(compactRules)) + sort.Stable(rbacv1helpers.SortableRuleSlice(compactRules)) return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) @@ -2538,7 +2540,7 @@ func combineResourceGroup(resource, group []string) string { // RoleBindingDescriber generates information about a node. type RoleBindingDescriber struct { - clientset.Interface + externalclient.Interface } func (d *RoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { @@ -2570,7 +2572,7 @@ func (d *RoleBindingDescriber) Describe(namespace, name string, describerSetting // ClusterRoleBindingDescriber generates information about a node. type ClusterRoleBindingDescriber struct { - clientset.Interface + externalclient.Interface } func (d *ClusterRoleBindingDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { diff --git a/pkg/registry/rbac/clusterrole/BUILD b/pkg/registry/rbac/clusterrole/BUILD index 57fac87d96e..c4fda28a5fd 100644 --- a/pkg/registry/rbac/clusterrole/BUILD +++ b/pkg/registry/rbac/clusterrole/BUILD @@ -16,12 +16,12 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/apis/rbac/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", diff --git a/pkg/registry/rbac/clusterrole/policybased/storage.go b/pkg/registry/rbac/clusterrole/policybased/storage.go index 8782a679350..767673d252c 100644 --- a/pkg/registry/rbac/clusterrole/policybased/storage.go +++ b/pkg/registry/rbac/clusterrole/policybased/storage.go @@ -58,12 +58,12 @@ func (s *Storage) Create(ctx context.Context, obj runtime.Object, createValidati clusterRole := obj.(*rbac.ClusterRole) rules := clusterRole.Rules - if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { + if err := rbacregistryvalidation.ConfirmNoEscalationInternal(ctx, s.ruleResolver, rules); err != nil { return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, err) } // to set the aggregation rule, since it can gather anything, requires * on *.* if hasAggregationRule(clusterRole) { - if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, fullAuthority); err != nil { + if err := rbacregistryvalidation.ConfirmNoEscalationInternal(ctx, s.ruleResolver, fullAuthority); err != nil { return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, errors.New("must have cluster-admin privileges to use the aggregationRule")) } } @@ -86,12 +86,12 @@ func (s *Storage) Update(ctx context.Context, name string, obj rest.UpdatedObjec } rules := clusterRole.Rules - if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { + if err := rbacregistryvalidation.ConfirmNoEscalationInternal(ctx, s.ruleResolver, rules); err != nil { return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, err) } // to change the aggregation rule, since it can gather anything and prevent tightening, requires * on *.* if hasAggregationRule(clusterRole) || hasAggregationRule(oldClusterRole) { - if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, fullAuthority); err != nil { + if err := rbacregistryvalidation.ConfirmNoEscalationInternal(ctx, s.ruleResolver, fullAuthority); err != nil { return nil, apierrors.NewForbidden(groupResource, clusterRole.Name, errors.New("must have cluster-admin privileges to use the aggregationRule")) } } diff --git a/pkg/registry/rbac/clusterrole/registry.go b/pkg/registry/rbac/clusterrole/registry.go index a6244ac087a..5fdc5024df9 100644 --- a/pkg/registry/rbac/clusterrole/registry.go +++ b/pkg/registry/rbac/clusterrole/registry.go @@ -19,27 +19,22 @@ package clusterrole import ( "context" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" ) // Registry is an interface for things that know how to store ClusterRoles. type Registry interface { - ListClusterRoles(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.ClusterRoleList, error) - CreateClusterRole(ctx context.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc) error - UpdateClusterRole(ctx context.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error - GetClusterRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.ClusterRole, error) - DeleteClusterRole(ctx context.Context, name string) error - WatchClusterRoles(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) + GetClusterRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbacv1.ClusterRole, error) } // storage puts strong typing around storage calls type storage struct { - rest.StandardStorage + rest.Getter } // NewRegistry returns a new Registry interface for the given Storage. Any mismatched @@ -48,40 +43,17 @@ func NewRegistry(s rest.StandardStorage) Registry { return &storage{s} } -func (s *storage) ListClusterRoles(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.ClusterRoleList, error) { - obj, err := s.List(ctx, options) - if err != nil { - return nil, err - } - - return obj.(*rbac.ClusterRoleList), nil -} - -func (s *storage) CreateClusterRole(ctx context.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc) error { - _, err := s.Create(ctx, clusterRole, createValidation, false) - return err -} - -func (s *storage) UpdateClusterRole(ctx context.Context, clusterRole *rbac.ClusterRole, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { - _, _, err := s.Update(ctx, clusterRole.Name, rest.DefaultUpdatedObjectInfo(clusterRole), createValidation, updateValidation) - return err -} - -func (s *storage) WatchClusterRoles(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetClusterRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.ClusterRole, error) { +func (s *storage) GetClusterRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbacv1.ClusterRole, error) { obj, err := s.Get(ctx, name, options) if err != nil { return nil, err } - return obj.(*rbac.ClusterRole), nil -} -func (s *storage) DeleteClusterRole(ctx context.Context, name string) error { - _, _, err := s.Delete(ctx, name, nil) - return err + ret := &rbacv1.ClusterRole{} + if err := rbacv1helpers.Convert_rbac_ClusterRole_To_v1_ClusterRole(obj.(*rbac.ClusterRole), ret, nil); err != nil { + return nil, err + } + return ret, nil } // AuthorizerAdapter adapts the registry to the authorizer interface @@ -89,6 +61,6 @@ type AuthorizerAdapter struct { Registry Registry } -func (a AuthorizerAdapter) GetClusterRole(name string) (*rbac.ClusterRole, error) { +func (a AuthorizerAdapter) GetClusterRole(name string) (*rbacv1.ClusterRole, error) { return a.Registry.GetClusterRole(genericapirequest.NewContext(), name, &metav1.GetOptions{}) } diff --git a/pkg/registry/rbac/clusterrolebinding/BUILD b/pkg/registry/rbac/clusterrolebinding/BUILD index 3f7f3015b01..5b327e87682 100644 --- a/pkg/registry/rbac/clusterrolebinding/BUILD +++ b/pkg/registry/rbac/clusterrolebinding/BUILD @@ -16,12 +16,12 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/apis/rbac/validation:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", diff --git a/pkg/registry/rbac/clusterrolebinding/policybased/BUILD b/pkg/registry/rbac/clusterrolebinding/policybased/BUILD index 66d61e93d13..db396f2b7ad 100644 --- a/pkg/registry/rbac/clusterrolebinding/policybased/BUILD +++ b/pkg/registry/rbac/clusterrolebinding/policybased/BUILD @@ -12,8 +12,10 @@ go_library( deps = [ "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/registry/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/registry/rbac/clusterrolebinding/policybased/storage.go b/pkg/registry/rbac/clusterrolebinding/policybased/storage.go index f31e3a0491a..0217fcc3535 100644 --- a/pkg/registry/rbac/clusterrolebinding/policybased/storage.go +++ b/pkg/registry/rbac/clusterrolebinding/policybased/storage.go @@ -20,6 +20,7 @@ package policybased import ( "context" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -27,6 +28,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" ) @@ -59,7 +61,12 @@ func (s *Storage) Create(ctx context.Context, obj runtime.Object, createValidati return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } - rules, err := s.ruleResolver.GetRoleReferenceRules(clusterRoleBinding.RoleRef, metav1.NamespaceNone) + v1RoleRef := rbacv1.RoleRef{} + err := rbacv1helpers.Convert_rbac_RoleRef_To_v1_RoleRef(&clusterRoleBinding.RoleRef, &v1RoleRef, nil) + if err != nil { + return nil, err + } + rules, err := s.ruleResolver.GetRoleReferenceRules(v1RoleRef, metav1.NamespaceNone) if err != nil { return nil, err } @@ -88,7 +95,12 @@ func (s *Storage) Update(ctx context.Context, name string, obj rest.UpdatedObjec } // Otherwise, see if we already have all the permissions contained in the referenced clusterrole - rules, err := s.ruleResolver.GetRoleReferenceRules(clusterRoleBinding.RoleRef, metav1.NamespaceNone) + v1RoleRef := rbacv1.RoleRef{} + err := rbacv1helpers.Convert_rbac_RoleRef_To_v1_RoleRef(&clusterRoleBinding.RoleRef, &v1RoleRef, nil) + if err != nil { + return nil, err + } + rules, err := s.ruleResolver.GetRoleReferenceRules(v1RoleRef, metav1.NamespaceNone) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/clusterrolebinding/registry.go b/pkg/registry/rbac/clusterrolebinding/registry.go index 02ba53bf8c8..34f5b73a9ed 100644 --- a/pkg/registry/rbac/clusterrolebinding/registry.go +++ b/pkg/registry/rbac/clusterrolebinding/registry.go @@ -19,27 +19,22 @@ package clusterrolebinding import ( "context" + rbacv1 "k8s.io/api/rbac/v1" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" ) // Registry is an interface for things that know how to store ClusterRoleBindings. type Registry interface { - ListClusterRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.ClusterRoleBindingList, error) - CreateClusterRoleBinding(ctx context.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc) error - UpdateClusterRoleBinding(ctx context.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error - GetClusterRoleBinding(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.ClusterRoleBinding, error) - DeleteClusterRoleBinding(ctx context.Context, name string) error - WatchClusterRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) + ListClusterRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbacv1.ClusterRoleBindingList, error) } // storage puts strong typing around storage calls type storage struct { - rest.StandardStorage + rest.Lister } // NewRegistry returns a new Registry interface for the given Storage. Any mismatched @@ -48,40 +43,17 @@ func NewRegistry(s rest.StandardStorage) Registry { return &storage{s} } -func (s *storage) ListClusterRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.ClusterRoleBindingList, error) { +func (s *storage) ListClusterRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbacv1.ClusterRoleBindingList, error) { obj, err := s.List(ctx, options) if err != nil { return nil, err } - return obj.(*rbac.ClusterRoleBindingList), nil -} - -func (s *storage) CreateClusterRoleBinding(ctx context.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc) error { - _, err := s.Create(ctx, clusterRoleBinding, createValidation, false) - return err -} - -func (s *storage) UpdateClusterRoleBinding(ctx context.Context, clusterRoleBinding *rbac.ClusterRoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { - _, _, err := s.Update(ctx, clusterRoleBinding.Name, rest.DefaultUpdatedObjectInfo(clusterRoleBinding), createValidation, updateValidation) - return err -} - -func (s *storage) WatchClusterRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetClusterRoleBinding(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.ClusterRoleBinding, error) { - obj, err := s.Get(ctx, name, options) - if err != nil { + ret := &rbacv1.ClusterRoleBindingList{} + if err := rbacv1helpers.Convert_rbac_ClusterRoleBindingList_To_v1_ClusterRoleBindingList(obj.(*rbac.ClusterRoleBindingList), ret, nil); err != nil { return nil, err } - return obj.(*rbac.ClusterRoleBinding), nil -} - -func (s *storage) DeleteClusterRoleBinding(ctx context.Context, name string) error { - _, _, err := s.Delete(ctx, name, nil) - return err + return ret, nil } // AuthorizerAdapter adapts the registry to the authorizer interface @@ -89,13 +61,13 @@ type AuthorizerAdapter struct { Registry Registry } -func (a AuthorizerAdapter) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) { +func (a AuthorizerAdapter) ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) { list, err := a.Registry.ListClusterRoleBindings(genericapirequest.NewContext(), &metainternalversion.ListOptions{}) if err != nil { return nil, err } - ret := []*rbac.ClusterRoleBinding{} + ret := []*rbacv1.ClusterRoleBinding{} for i := range list.Items { ret = append(ret, &list.Items[i]) } diff --git a/pkg/registry/rbac/reconciliation/BUILD b/pkg/registry/rbac/reconciliation/BUILD index 4b14eed1c69..42ff2c21014 100644 --- a/pkg/registry/rbac/reconciliation/BUILD +++ b/pkg/registry/rbac/reconciliation/BUILD @@ -15,7 +15,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core/helper:go_default_library", - "//pkg/apis/rbac:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], @@ -34,16 +34,16 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/rbac:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", "//pkg/registry/rbac/validation:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", ], ) diff --git a/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go index 419cc1df26b..68dc97fd3f7 100644 --- a/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrole_interfaces.go @@ -17,17 +17,17 @@ limitations under the License. package reconciliation import ( + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/kubernetes/pkg/registry/rbac/reconciliation.RuleOwner // +k8s:deepcopy-gen:nonpointer-interfaces=true type ClusterRoleRuleOwner struct { - ClusterRole *rbac.ClusterRole + ClusterRole *rbacv1.ClusterRole } func (o ClusterRoleRuleOwner) GetObject() runtime.Object { @@ -58,24 +58,24 @@ func (o ClusterRoleRuleOwner) SetAnnotations(in map[string]string) { o.ClusterRole.Annotations = in } -func (o ClusterRoleRuleOwner) GetRules() []rbac.PolicyRule { +func (o ClusterRoleRuleOwner) GetRules() []rbacv1.PolicyRule { return o.ClusterRole.Rules } -func (o ClusterRoleRuleOwner) SetRules(in []rbac.PolicyRule) { +func (o ClusterRoleRuleOwner) SetRules(in []rbacv1.PolicyRule) { o.ClusterRole.Rules = in } -func (o ClusterRoleRuleOwner) GetAggregationRule() *rbac.AggregationRule { +func (o ClusterRoleRuleOwner) GetAggregationRule() *rbacv1.AggregationRule { return o.ClusterRole.AggregationRule } -func (o ClusterRoleRuleOwner) SetAggregationRule(in *rbac.AggregationRule) { +func (o ClusterRoleRuleOwner) SetAggregationRule(in *rbacv1.AggregationRule) { o.ClusterRole.AggregationRule = in } type ClusterRoleModifier struct { - Client internalversion.ClusterRoleInterface + Client rbacv1client.ClusterRoleInterface } func (c ClusterRoleModifier) Get(namespace, name string) (RuleOwner, error) { diff --git a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go index aa07f107f8a..b669a595157 100644 --- a/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/clusterrolebinding_interfaces.go @@ -17,18 +17,18 @@ limitations under the License. package reconciliation import ( + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/kubernetes/pkg/registry/rbac/reconciliation.RoleBinding // +k8s:deepcopy-gen:nonpointer-interfaces=true type ClusterRoleBindingAdapter struct { - ClusterRoleBinding *rbac.ClusterRoleBinding + ClusterRoleBinding *rbacv1.ClusterRoleBinding } func (o ClusterRoleBindingAdapter) GetObject() runtime.Object { @@ -63,20 +63,20 @@ func (o ClusterRoleBindingAdapter) SetAnnotations(in map[string]string) { o.ClusterRoleBinding.Annotations = in } -func (o ClusterRoleBindingAdapter) GetRoleRef() rbac.RoleRef { +func (o ClusterRoleBindingAdapter) GetRoleRef() rbacv1.RoleRef { return o.ClusterRoleBinding.RoleRef } -func (o ClusterRoleBindingAdapter) GetSubjects() []rbac.Subject { +func (o ClusterRoleBindingAdapter) GetSubjects() []rbacv1.Subject { return o.ClusterRoleBinding.Subjects } -func (o ClusterRoleBindingAdapter) SetSubjects(in []rbac.Subject) { +func (o ClusterRoleBindingAdapter) SetSubjects(in []rbacv1.Subject) { o.ClusterRoleBinding.Subjects = in } type ClusterRoleBindingClientAdapter struct { - Client internalversion.ClusterRoleBindingInterface + Client rbacv1client.ClusterRoleBindingInterface } func (c ClusterRoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { diff --git a/pkg/registry/rbac/reconciliation/reconcile_role.go b/pkg/registry/rbac/reconciliation/reconcile_role.go index 1ab840c759b..8197b9f7596 100644 --- a/pkg/registry/rbac/reconciliation/reconcile_role.go +++ b/pkg/registry/rbac/reconciliation/reconcile_role.go @@ -20,11 +20,11 @@ import ( "fmt" "reflect" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/rbac" "k8s.io/kubernetes/pkg/registry/rbac/validation" ) @@ -51,10 +51,10 @@ type RuleOwner interface { SetLabels(map[string]string) GetAnnotations() map[string]string SetAnnotations(map[string]string) - GetRules() []rbac.PolicyRule - SetRules([]rbac.PolicyRule) - GetAggregationRule() *rbac.AggregationRule - SetAggregationRule(*rbac.AggregationRule) + GetRules() []rbacv1.PolicyRule + SetRules([]rbacv1.PolicyRule) + GetAggregationRule() *rbacv1.AggregationRule + SetAggregationRule(*rbacv1.AggregationRule) DeepCopyRuleOwner() RuleOwner } @@ -75,9 +75,9 @@ type ReconcileClusterRoleResult struct { Role RuleOwner // MissingRules contains expected rules that were missing from the currently persisted role - MissingRules []rbac.PolicyRule + MissingRules []rbacv1.PolicyRule // ExtraRules contains extra permissions the currently persisted role had - ExtraRules []rbac.PolicyRule + ExtraRules []rbacv1.PolicyRule // MissingAggregationRuleSelectors contains expected selectors that were missing from the currently persisted role MissingAggregationRuleSelectors []metav1.LabelSelector @@ -112,7 +112,7 @@ func (o *ReconcileRoleOptions) run(attempts int) (*ReconcileClusterRoleResult, e case errors.IsNotFound(err): aggregationRule := o.Role.GetAggregationRule() if aggregationRule == nil { - aggregationRule = &rbac.AggregationRule{} + aggregationRule = &rbacv1.AggregationRule{} } result = &ReconcileClusterRoleResult{ Role: o.Role, @@ -178,7 +178,7 @@ func (o *ReconcileRoleOptions) run(attempts int) (*ReconcileClusterRoleResult, e func computeReconciledRole(existing, expected RuleOwner, removeExtraPermissions bool) (*ReconcileClusterRoleResult, error) { result := &ReconcileClusterRoleResult{Operation: ReconcileNone} - result.Protected = (existing.GetAnnotations()[rbac.AutoUpdateAnnotationKey] == "false") + result.Protected = (existing.GetAnnotations()[rbacv1.AutoUpdateAnnotationKey] == "false") // Start with a copy of the existing object result.Role = existing.DeepCopyRuleOwner() @@ -223,7 +223,7 @@ func computeReconciledRole(existing, expected RuleOwner, removeExtraPermissions // add missing rules in the union case aggregationRule := result.Role.GetAggregationRule() if aggregationRule == nil { - aggregationRule = &rbac.AggregationRule{} + aggregationRule = &rbacv1.AggregationRule{} } aggregationRule.ClusterRoleSelectors = append(aggregationRule.ClusterRoleSelectors, result.MissingAggregationRuleSelectors...) result.Role.SetAggregationRule(aggregationRule) @@ -254,7 +254,7 @@ func merge(maps ...map[string]string) map[string]string { // aggregationRuleCovers determines whether or not the ownerSelectors cover the servantSelectors in terms of semantically // equal label selectors. // It returns whether or not the ownerSelectors cover and a list of the rules that the ownerSelectors do not cover. -func aggregationRuleCovers(ownerRule, servantRule *rbac.AggregationRule) (bool, []metav1.LabelSelector) { +func aggregationRuleCovers(ownerRule, servantRule *rbacv1.AggregationRule) (bool, []metav1.LabelSelector) { switch { case ownerRule == nil && servantRule == nil: return true, []metav1.LabelSelector{} diff --git a/pkg/registry/rbac/reconciliation/reconcile_role_test.go b/pkg/registry/rbac/reconciliation/reconcile_role_test.go index 6a7f632b484..be371279889 100644 --- a/pkg/registry/rbac/reconciliation/reconcile_role_test.go +++ b/pkg/registry/rbac/reconciliation/reconcile_role_test.go @@ -19,23 +19,23 @@ package reconciliation import ( "testing" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/rbac" ) -func role(rules []rbac.PolicyRule, labels map[string]string, annotations map[string]string) *rbac.ClusterRole { - return &rbac.ClusterRole{ +func role(rules []rbacv1.PolicyRule, labels map[string]string, annotations map[string]string) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ Rules: rules, ObjectMeta: metav1.ObjectMeta{Labels: labels, Annotations: annotations}, } } -func rules(resources ...string) []rbac.PolicyRule { - r := []rbac.PolicyRule{} +func rules(resources ...string) []rbacv1.PolicyRule { + r := []rbacv1.PolicyRule{} for _, resource := range resources { - r = append(r, rbac.PolicyRule{APIGroups: []string{""}, Verbs: []string{"get"}, Resources: []string{resource}}) + r = append(r, rbacv1.PolicyRule{APIGroups: []string{""}, Verbs: []string{"get"}, Resources: []string{resource}}) } return r } @@ -44,11 +44,11 @@ type ss map[string]string func TestComputeReconciledRoleRules(t *testing.T) { tests := map[string]struct { - expectedRole *rbac.ClusterRole - actualRole *rbac.ClusterRole + expectedRole *rbacv1.ClusterRole + actualRole *rbacv1.ClusterRole removeExtraPermissions bool - expectedReconciledRole *rbac.ClusterRole + expectedReconciledRole *rbacv1.ClusterRole expectedReconciliationNeeded bool }{ "empty": { @@ -278,14 +278,14 @@ func TestComputeReconciledRoleRules(t *testing.T) { } } -func aggregatedRole(aggregationRule *rbac.AggregationRule) *rbac.ClusterRole { - return &rbac.ClusterRole{ +func aggregatedRole(aggregationRule *rbacv1.AggregationRule) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ AggregationRule: aggregationRule, } } -func aggregationrule(selectors []map[string]string) *rbac.AggregationRule { - ret := &rbac.AggregationRule{} +func aggregationrule(selectors []map[string]string) *rbacv1.AggregationRule { + ret := &rbacv1.AggregationRule{} for _, selector := range selectors { ret.ClusterRoleSelectors = append(ret.ClusterRoleSelectors, metav1.LabelSelector{MatchLabels: selector}) @@ -295,15 +295,15 @@ func aggregationrule(selectors []map[string]string) *rbac.AggregationRule { func TestComputeReconciledRoleAggregationRules(t *testing.T) { tests := map[string]struct { - expectedRole *rbac.ClusterRole - actualRole *rbac.ClusterRole + expectedRole *rbacv1.ClusterRole + actualRole *rbacv1.ClusterRole removeExtraPermissions bool - expectedReconciledRole *rbac.ClusterRole + expectedReconciledRole *rbacv1.ClusterRole expectedReconciliationNeeded bool }{ "empty": { - expectedRole: aggregatedRole(&rbac.AggregationRule{}), + expectedRole: aggregatedRole(&rbacv1.AggregationRule{}), actualRole: aggregatedRole(nil), removeExtraPermissions: true, @@ -311,8 +311,8 @@ func TestComputeReconciledRoleAggregationRules(t *testing.T) { expectedReconciliationNeeded: false, }, "empty-2": { - expectedRole: aggregatedRole(&rbac.AggregationRule{}), - actualRole: aggregatedRole(&rbac.AggregationRule{}), + expectedRole: aggregatedRole(&rbacv1.AggregationRule{}), + actualRole: aggregatedRole(&rbacv1.AggregationRule{}), removeExtraPermissions: true, expectedReconciledRole: nil, @@ -365,7 +365,7 @@ func TestComputeReconciledRoleAggregationRules(t *testing.T) { // desired role is not aggregated expectedRole: role(rules("pods", "nodes", "secrets"), nil, nil), // existing role is aggregated and has other permissions - actualRole: func() *rbac.ClusterRole { + actualRole: func() *rbacv1.ClusterRole { r := aggregatedRole(aggregationrule([]map[string]string{{"alpha": "bravo"}})) r.Rules = rules("deployments") return r diff --git a/pkg/registry/rbac/reconciliation/reconcile_rolebindings.go b/pkg/registry/rbac/reconciliation/reconcile_rolebindings.go index ac138da5846..65ca2bd1a35 100644 --- a/pkg/registry/rbac/reconciliation/reconcile_rolebindings.go +++ b/pkg/registry/rbac/reconciliation/reconcile_rolebindings.go @@ -20,10 +20,10 @@ import ( "fmt" "reflect" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/apis/rbac" ) type RoleBindingModifier interface { @@ -42,9 +42,9 @@ type RoleBinding interface { SetLabels(map[string]string) GetAnnotations() map[string]string SetAnnotations(map[string]string) - GetRoleRef() rbac.RoleRef - GetSubjects() []rbac.Subject - SetSubjects([]rbac.Subject) + GetRoleRef() rbacv1.RoleRef + GetSubjects() []rbacv1.Subject + SetSubjects([]rbacv1.Subject) DeepCopyRoleBinding() RoleBinding } @@ -67,9 +67,9 @@ type ReconcileClusterRoleBindingResult struct { RoleBinding RoleBinding // MissingSubjects contains expected subjects that were missing from the currently persisted rolebinding - MissingSubjects []rbac.Subject + MissingSubjects []rbacv1.Subject // ExtraSubjects contains extra subjects the currently persisted rolebinding had - ExtraSubjects []rbac.Subject + ExtraSubjects []rbacv1.Subject // Operation is the API operation required to reconcile. // If no reconciliation was needed, it is set to ReconcileNone. @@ -176,7 +176,7 @@ func (o *ReconcileRoleBindingOptions) run(attempts int) (*ReconcileClusterRoleBi func computeReconciledRoleBinding(existing, expected RoleBinding, removeExtraSubjects bool) (*ReconcileClusterRoleBindingResult, error) { result := &ReconcileClusterRoleBindingResult{Operation: ReconcileNone} - result.Protected = (existing.GetAnnotations()[rbac.AutoUpdateAnnotationKey] == "false") + result.Protected = (existing.GetAnnotations()[rbacv1.AutoUpdateAnnotationKey] == "false") // Reset the binding completely if the roleRef is different if expected.GetRoleRef() != existing.GetRoleRef() { @@ -216,7 +216,7 @@ func computeReconciledRoleBinding(existing, expected RoleBinding, removeExtraSub return result, nil } -func contains(list []rbac.Subject, item rbac.Subject) bool { +func contains(list []rbacv1.Subject, item rbacv1.Subject) bool { for _, listItem := range list { if listItem == item { return true @@ -229,7 +229,7 @@ func contains(list []rbac.Subject, item rbac.Subject) bool { // list1Only = list1 - list2 // list2Only = list2 - list1 // if both returned lists are empty, the provided lists are equal -func diffSubjectLists(list1 []rbac.Subject, list2 []rbac.Subject) (list1Only []rbac.Subject, list2Only []rbac.Subject) { +func diffSubjectLists(list1 []rbacv1.Subject, list2 []rbacv1.Subject) (list1Only []rbacv1.Subject, list2Only []rbacv1.Subject) { for _, list1Item := range list1 { if !contains(list2, list1Item) { if !contains(list1Only, list1Item) { diff --git a/pkg/registry/rbac/reconciliation/reconcile_rolebindings_test.go b/pkg/registry/rbac/reconciliation/reconcile_rolebindings_test.go index 0cdd3cf46b8..6abb09081b0 100644 --- a/pkg/registry/rbac/reconciliation/reconcile_rolebindings_test.go +++ b/pkg/registry/rbac/reconciliation/reconcile_rolebindings_test.go @@ -19,24 +19,24 @@ package reconciliation import ( "testing" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/rbac" ) -func binding(roleRef rbac.RoleRef, subjects []rbac.Subject) *rbac.ClusterRoleBinding { - return &rbac.ClusterRoleBinding{RoleRef: roleRef, Subjects: subjects} +func binding(roleRef rbacv1.RoleRef, subjects []rbacv1.Subject) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{RoleRef: roleRef, Subjects: subjects} } -func ref(name string) rbac.RoleRef { - return rbac.RoleRef{Name: name} +func ref(name string) rbacv1.RoleRef { + return rbacv1.RoleRef{Name: name} } -func subject(name string) rbac.Subject { - return rbac.Subject{Name: name} +func subject(name string) rbacv1.Subject { + return rbacv1.Subject{Name: name} } -func subjects(names ...string) []rbac.Subject { - r := []rbac.Subject{} +func subjects(names ...string) []rbacv1.Subject { + r := []rbacv1.Subject{} for _, name := range names { r = append(r, subject(name)) } @@ -45,10 +45,10 @@ func subjects(names ...string) []rbac.Subject { func TestDiffObjectReferenceLists(t *testing.T) { tests := map[string]struct { - A []rbac.Subject - B []rbac.Subject - ExpectedOnlyA []rbac.Subject - ExpectedOnlyB []rbac.Subject + A []rbacv1.Subject + B []rbacv1.Subject + ExpectedOnlyA []rbacv1.Subject + ExpectedOnlyB []rbacv1.Subject }{ "empty": {}, @@ -92,11 +92,11 @@ func TestDiffObjectReferenceLists(t *testing.T) { func TestComputeUpdate(t *testing.T) { tests := map[string]struct { - ExpectedBinding *rbac.ClusterRoleBinding - ActualBinding *rbac.ClusterRoleBinding + ExpectedBinding *rbacv1.ClusterRoleBinding + ActualBinding *rbacv1.ClusterRoleBinding RemoveExtraSubjects bool - ExpectedUpdatedBinding *rbac.ClusterRoleBinding + ExpectedUpdatedBinding *rbacv1.ClusterRoleBinding ExpectedUpdateNeeded bool }{ "match without union": { diff --git a/pkg/registry/rbac/reconciliation/role_interfaces.go b/pkg/registry/rbac/reconciliation/role_interfaces.go index b46e9e872ea..24cb7899d37 100644 --- a/pkg/registry/rbac/reconciliation/role_interfaces.go +++ b/pkg/registry/rbac/reconciliation/role_interfaces.go @@ -17,20 +17,20 @@ limitations under the License. package reconciliation import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/rbac" - core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/kubernetes/pkg/registry/rbac/reconciliation.RuleOwner // +k8s:deepcopy-gen:nonpointer-interfaces=true type RoleRuleOwner struct { - Role *rbac.Role + Role *rbacv1.Role } func (o RoleRuleOwner) GetObject() runtime.Object { @@ -61,24 +61,24 @@ func (o RoleRuleOwner) SetAnnotations(in map[string]string) { o.Role.Annotations = in } -func (o RoleRuleOwner) GetRules() []rbac.PolicyRule { +func (o RoleRuleOwner) GetRules() []rbacv1.PolicyRule { return o.Role.Rules } -func (o RoleRuleOwner) SetRules(in []rbac.PolicyRule) { +func (o RoleRuleOwner) SetRules(in []rbacv1.PolicyRule) { o.Role.Rules = in } -func (o RoleRuleOwner) GetAggregationRule() *rbac.AggregationRule { +func (o RoleRuleOwner) GetAggregationRule() *rbacv1.AggregationRule { return nil } -func (o RoleRuleOwner) SetAggregationRule(in *rbac.AggregationRule) { +func (o RoleRuleOwner) SetAggregationRule(in *rbacv1.AggregationRule) { } type RoleModifier struct { - Client internalversion.RolesGetter - NamespaceClient core.NamespaceInterface + Client rbacv1client.RolesGetter + NamespaceClient corev1client.NamespaceInterface } func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { @@ -90,7 +90,7 @@ func (c RoleModifier) Get(namespace, name string) (RuleOwner, error) { } func (c RoleModifier) Create(in RuleOwner) (RuleOwner, error) { - ns := &api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} if _, err := c.NamespaceClient.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go index 126f479cb1e..23bf6b653a8 100644 --- a/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go +++ b/pkg/registry/rbac/reconciliation/rolebinding_interfaces.go @@ -17,21 +17,21 @@ limitations under the License. package reconciliation import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/rbac" - core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" ) // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/kubernetes/pkg/registry/rbac/reconciliation.RoleBinding // +k8s:deepcopy-gen:nonpointer-interfaces=true type RoleBindingAdapter struct { - RoleBinding *rbac.RoleBinding + RoleBinding *rbacv1.RoleBinding } func (o RoleBindingAdapter) GetObject() runtime.Object { @@ -66,21 +66,21 @@ func (o RoleBindingAdapter) SetAnnotations(in map[string]string) { o.RoleBinding.Annotations = in } -func (o RoleBindingAdapter) GetRoleRef() rbac.RoleRef { +func (o RoleBindingAdapter) GetRoleRef() rbacv1.RoleRef { return o.RoleBinding.RoleRef } -func (o RoleBindingAdapter) GetSubjects() []rbac.Subject { +func (o RoleBindingAdapter) GetSubjects() []rbacv1.Subject { return o.RoleBinding.Subjects } -func (o RoleBindingAdapter) SetSubjects(in []rbac.Subject) { +func (o RoleBindingAdapter) SetSubjects(in []rbacv1.Subject) { o.RoleBinding.Subjects = in } type RoleBindingClientAdapter struct { - Client internalversion.RoleBindingsGetter - NamespaceClient core.NamespaceInterface + Client rbacv1client.RoleBindingsGetter + NamespaceClient corev1client.NamespaceInterface } func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, error) { @@ -92,7 +92,7 @@ func (c RoleBindingClientAdapter) Get(namespace, name string) (RoleBinding, erro } func (c RoleBindingClientAdapter) Create(in RoleBinding) (RoleBinding, error) { - ns := &api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: in.GetNamespace()}} if _, err := c.NamespaceClient.Create(ns); err != nil && !apierrors.IsAlreadyExists(err) { return nil, err } diff --git a/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go b/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go index 3347ccdfe0a..411cfbd48af 100644 --- a/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go +++ b/pkg/registry/rbac/reconciliation/zz_generated.deepcopy.go @@ -21,7 +21,7 @@ limitations under the License. package reconciliation import ( - rbac "k8s.io/kubernetes/pkg/apis/rbac" + v1 "k8s.io/api/rbac/v1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -32,7 +32,7 @@ func (in *ClusterRoleBindingAdapter) DeepCopyInto(out *ClusterRoleBindingAdapter if *in == nil { *out = nil } else { - *out = new(rbac.ClusterRoleBinding) + *out = new(v1.ClusterRoleBinding) (*in).DeepCopyInto(*out) } } @@ -62,7 +62,7 @@ func (in *ClusterRoleRuleOwner) DeepCopyInto(out *ClusterRoleRuleOwner) { if *in == nil { *out = nil } else { - *out = new(rbac.ClusterRole) + *out = new(v1.ClusterRole) (*in).DeepCopyInto(*out) } } @@ -92,7 +92,7 @@ func (in *RoleBindingAdapter) DeepCopyInto(out *RoleBindingAdapter) { if *in == nil { *out = nil } else { - *out = new(rbac.RoleBinding) + *out = new(v1.RoleBinding) (*in).DeepCopyInto(*out) } } @@ -122,7 +122,7 @@ func (in *RoleRuleOwner) DeepCopyInto(out *RoleRuleOwner) { if *in == nil { *out = nil } else { - *out = new(rbac.Role) + *out = new(v1.Role) (*in).DeepCopyInto(*out) } } diff --git a/pkg/registry/rbac/rest/BUILD b/pkg/registry/rbac/rest/BUILD index 685a8334bf1..0d84ad2f853 100644 --- a/pkg/registry/rbac/rest/BUILD +++ b/pkg/registry/rbac/rest/BUILD @@ -12,8 +12,6 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", "//pkg/registry/rbac/clusterrole:go_default_library", "//pkg/registry/rbac/clusterrole/policybased:go_default_library", "//pkg/registry/rbac/clusterrole/storage:go_default_library", @@ -43,6 +41,8 @@ go_library( "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", ], ) diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index 1622d067b32..53ace70eb9d 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -35,11 +35,11 @@ import ( "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/util/retry" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/rbac" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - rbacclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion" "k8s.io/kubernetes/pkg/registry/rbac/clusterrole" clusterrolepolicybased "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased" clusterrolestore "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage" @@ -124,10 +124,10 @@ func (p RESTStorageProvider) PostStartHook() (string, genericapiserver.PostStart } type PolicyData struct { - ClusterRoles []rbac.ClusterRole - ClusterRoleBindings []rbac.ClusterRoleBinding - Roles map[string][]rbac.Role - RoleBindings map[string][]rbac.RoleBinding + ClusterRoles []rbacapiv1.ClusterRole + ClusterRoleBindings []rbacapiv1.ClusterRoleBinding + Roles map[string][]rbacapiv1.Role + RoleBindings map[string][]rbacapiv1.RoleBinding // ClusterRolesToAggregate maps from previous clusterrole name to the new clusterrole name ClusterRolesToAggregate map[string]string } @@ -138,13 +138,13 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { // starts, the roles don't initialize, and nothing works. err := wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) { - coreclientset, err := coreclient.NewForConfig(hookContext.LoopbackClientConfig) + coreclientset, err := corev1client.NewForConfig(hookContext.LoopbackClientConfig) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to initialize client: %v", err)) return false, nil } - clientset, err := rbacclient.NewForConfig(hookContext.LoopbackClientConfig) + clientset, err := rbacv1client.NewForConfig(hookContext.LoopbackClientConfig) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to initialize client: %v", err)) return false, nil @@ -303,7 +303,7 @@ func (p RESTStorageProvider) GroupName() string { // primeAggregatedClusterRoles copies roles that have transitioned to aggregated roles and may need to pick up changes // that were done to the legacy roles. -func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clusterRoleClient rbacclient.ClusterRolesGetter) error { +func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clusterRoleClient rbacv1client.ClusterRolesGetter) error { for oldName, newName := range clusterRolesToAggregate { _, err := clusterRoleClient.ClusterRoles().Get(newName, metav1.GetOptions{}) if err == nil { diff --git a/pkg/registry/rbac/role/BUILD b/pkg/registry/rbac/role/BUILD index a4fde9839b4..03c6f598e0b 100644 --- a/pkg/registry/rbac/role/BUILD +++ b/pkg/registry/rbac/role/BUILD @@ -16,12 +16,12 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/apis/rbac/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", diff --git a/pkg/registry/rbac/role/policybased/storage.go b/pkg/registry/rbac/role/policybased/storage.go index 3aa1c4f946c..447cd1178a3 100644 --- a/pkg/registry/rbac/role/policybased/storage.go +++ b/pkg/registry/rbac/role/policybased/storage.go @@ -52,7 +52,7 @@ func (s *Storage) Create(ctx context.Context, obj runtime.Object, createValidati role := obj.(*rbac.Role) rules := role.Rules - if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { + if err := rbacregistryvalidation.ConfirmNoEscalationInternal(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, role.Name, err) } return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) @@ -72,7 +72,7 @@ func (s *Storage) Update(ctx context.Context, name string, obj rest.UpdatedObjec } rules := role.Rules - if err := rbacregistryvalidation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { + if err := rbacregistryvalidation.ConfirmNoEscalationInternal(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, role.Name, err) } return obj, nil diff --git a/pkg/registry/rbac/role/registry.go b/pkg/registry/rbac/role/registry.go index 6f026b01642..30626acf4e7 100644 --- a/pkg/registry/rbac/role/registry.go +++ b/pkg/registry/rbac/role/registry.go @@ -19,27 +19,22 @@ package role import ( "context" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" ) // Registry is an interface for things that know how to store Roles. type Registry interface { - ListRoles(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.RoleList, error) - CreateRole(ctx context.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc) error - UpdateRole(ctx context.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error - GetRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.Role, error) - DeleteRole(ctx context.Context, name string) error - WatchRoles(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) + GetRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbacv1.Role, error) } // storage puts strong typing around storage calls type storage struct { - rest.StandardStorage + rest.Getter } // NewRegistry returns a new Registry interface for the given Storage. Any mismatched @@ -48,41 +43,17 @@ func NewRegistry(s rest.StandardStorage) Registry { return &storage{s} } -func (s *storage) ListRoles(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.RoleList, error) { - obj, err := s.List(ctx, options) - if err != nil { - return nil, err - } - - return obj.(*rbac.RoleList), nil -} - -func (s *storage) CreateRole(ctx context.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc) error { - _, err := s.Create(ctx, role, createValidation, false) - return err -} - -func (s *storage) UpdateRole(ctx context.Context, role *rbac.Role, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { - // TODO: any admission? - _, _, err := s.Update(ctx, role.Name, rest.DefaultUpdatedObjectInfo(role), createValidation, updateValidation) - return err -} - -func (s *storage) WatchRoles(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.Role, error) { +func (s *storage) GetRole(ctx context.Context, name string, options *metav1.GetOptions) (*rbacv1.Role, error) { obj, err := s.Get(ctx, name, options) if err != nil { return nil, err } - return obj.(*rbac.Role), nil -} -func (s *storage) DeleteRole(ctx context.Context, name string) error { - _, _, err := s.Delete(ctx, name, nil) - return err + ret := &rbacv1.Role{} + if err := rbacv1helpers.Convert_rbac_Role_To_v1_Role(obj.(*rbac.Role), ret, nil); err != nil { + return nil, err + } + return ret, nil } // AuthorizerAdapter adapts the registry to the authorizer interface @@ -90,6 +61,6 @@ type AuthorizerAdapter struct { Registry Registry } -func (a AuthorizerAdapter) GetRole(namespace, name string) (*rbac.Role, error) { +func (a AuthorizerAdapter) GetRole(namespace, name string) (*rbacv1.Role, error) { return a.Registry.GetRole(genericapirequest.WithNamespace(genericapirequest.NewContext(), namespace), name, &metav1.GetOptions{}) } diff --git a/pkg/registry/rbac/rolebinding/BUILD b/pkg/registry/rbac/rolebinding/BUILD index e3072531138..63ad2869973 100644 --- a/pkg/registry/rbac/rolebinding/BUILD +++ b/pkg/registry/rbac/rolebinding/BUILD @@ -16,12 +16,12 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/apis/rbac/validation:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", diff --git a/pkg/registry/rbac/rolebinding/policybased/BUILD b/pkg/registry/rbac/rolebinding/policybased/BUILD index fd05f21b463..eaca0454360 100644 --- a/pkg/registry/rbac/rolebinding/policybased/BUILD +++ b/pkg/registry/rbac/rolebinding/policybased/BUILD @@ -12,8 +12,10 @@ go_library( deps = [ "//pkg/apis/core/helper:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/registry/rbac:go_default_library", "//pkg/registry/rbac/validation:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/pkg/registry/rbac/rolebinding/policybased/storage.go b/pkg/registry/rbac/rolebinding/policybased/storage.go index 5adc0003bd0..b71821c0c27 100644 --- a/pkg/registry/rbac/rolebinding/policybased/storage.go +++ b/pkg/registry/rbac/rolebinding/policybased/storage.go @@ -20,6 +20,7 @@ package policybased import ( "context" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authorization/authorizer" @@ -27,6 +28,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" kapihelper "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" rbacregistry "k8s.io/kubernetes/pkg/registry/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" ) @@ -66,7 +68,12 @@ func (s *Storage) Create(ctx context.Context, obj runtime.Object, createValidati return s.StandardStorage.Create(ctx, obj, createValidation, includeUninitialized) } - rules, err := s.ruleResolver.GetRoleReferenceRules(roleBinding.RoleRef, namespace) + v1RoleRef := rbacv1.RoleRef{} + err := rbacv1helpers.Convert_rbac_RoleRef_To_v1_RoleRef(&roleBinding.RoleRef, &v1RoleRef, nil) + if err != nil { + return nil, err + } + rules, err := s.ruleResolver.GetRoleReferenceRules(v1RoleRef, namespace) if err != nil { return nil, err } @@ -102,7 +109,12 @@ func (s *Storage) Update(ctx context.Context, name string, obj rest.UpdatedObjec } // Otherwise, see if we already have all the permissions contained in the referenced role - rules, err := s.ruleResolver.GetRoleReferenceRules(roleBinding.RoleRef, namespace) + v1RoleRef := rbacv1.RoleRef{} + err := rbacv1helpers.Convert_rbac_RoleRef_To_v1_RoleRef(&roleBinding.RoleRef, &v1RoleRef, nil) + if err != nil { + return nil, err + } + rules, err := s.ruleResolver.GetRoleReferenceRules(v1RoleRef, namespace) if err != nil { return nil, err } diff --git a/pkg/registry/rbac/rolebinding/registry.go b/pkg/registry/rbac/rolebinding/registry.go index cd0fc2c746d..af78c1a4999 100644 --- a/pkg/registry/rbac/rolebinding/registry.go +++ b/pkg/registry/rbac/rolebinding/registry.go @@ -19,27 +19,22 @@ package rolebinding import ( "context" + rbacv1 "k8s.io/api/rbac/v1" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" ) // Registry is an interface for things that know how to store RoleBindings. type Registry interface { - ListRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.RoleBindingList, error) - CreateRoleBinding(ctx context.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc) error - UpdateRoleBinding(ctx context.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error - GetRoleBinding(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.RoleBinding, error) - DeleteRoleBinding(ctx context.Context, name string) error - WatchRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) + ListRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbacv1.RoleBindingList, error) } // storage puts strong typing around storage calls type storage struct { - rest.StandardStorage + rest.Lister } // NewRegistry returns a new Registry interface for the given Storage. Any mismatched @@ -48,41 +43,17 @@ func NewRegistry(s rest.StandardStorage) Registry { return &storage{s} } -func (s *storage) ListRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbac.RoleBindingList, error) { +func (s *storage) ListRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (*rbacv1.RoleBindingList, error) { obj, err := s.List(ctx, options) if err != nil { return nil, err } - return obj.(*rbac.RoleBindingList), nil -} - -func (s *storage) CreateRoleBinding(ctx context.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc) error { - // TODO(ericchiang): add additional validation - _, err := s.Create(ctx, roleBinding, createValidation, false) - return err -} - -func (s *storage) UpdateRoleBinding(ctx context.Context, roleBinding *rbac.RoleBinding, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc) error { - _, _, err := s.Update(ctx, roleBinding.Name, rest.DefaultUpdatedObjectInfo(roleBinding), createValidation, updateValidation) - return err -} - -func (s *storage) WatchRoleBindings(ctx context.Context, options *metainternalversion.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetRoleBinding(ctx context.Context, name string, options *metav1.GetOptions) (*rbac.RoleBinding, error) { - obj, err := s.Get(ctx, name, options) - if err != nil { + ret := &rbacv1.RoleBindingList{} + if err := rbacv1helpers.Convert_rbac_RoleBindingList_To_v1_RoleBindingList(obj.(*rbac.RoleBindingList), ret, nil); err != nil { return nil, err } - return obj.(*rbac.RoleBinding), nil -} - -func (s *storage) DeleteRoleBinding(ctx context.Context, name string) error { - _, _, err := s.Delete(ctx, name, nil) - return err + return ret, nil } // AuthorizerAdapter adapts the registry to the authorizer interface @@ -90,13 +61,13 @@ type AuthorizerAdapter struct { Registry Registry } -func (a AuthorizerAdapter) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) { +func (a AuthorizerAdapter) ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) { list, err := a.Registry.ListRoleBindings(genericapirequest.WithNamespace(genericapirequest.NewContext(), namespace), &metainternalversion.ListOptions{}) if err != nil { return nil, err } - ret := []*rbac.RoleBinding{} + ret := []*rbacv1.RoleBinding{} for i := range list.Items { ret = append(ret, &list.Items[i]) } diff --git a/pkg/registry/rbac/validation/BUILD b/pkg/registry/rbac/validation/BUILD index ddfb6431fbd..152ae0387e1 100644 --- a/pkg/registry/rbac/validation/BUILD +++ b/pkg/registry/rbac/validation/BUILD @@ -15,7 +15,8 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -25,6 +26,7 @@ go_test( go_library( name = "go_default_library", srcs = [ + "internal_version_adapter.go", "policy_compact.go", "policy_comparator.go", "rule.go", @@ -32,7 +34,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", deps = [ "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", diff --git a/pkg/registry/rbac/validation/internal_version_adapter.go b/pkg/registry/rbac/validation/internal_version_adapter.go new file mode 100644 index 00000000000..bfb57242df9 --- /dev/null +++ b/pkg/registry/rbac/validation/internal_version_adapter.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "context" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" +) + +func ConfirmNoEscalationInternal(ctx context.Context, ruleResolver AuthorizationRuleResolver, inRules []rbac.PolicyRule) error { + rules := []rbacv1.PolicyRule{} + for i := range inRules { + v1Rule := rbacv1.PolicyRule{} + err := rbacv1helpers.Convert_rbac_PolicyRule_To_v1_PolicyRule(&inRules[i], &v1Rule, nil) + if err != nil { + return err + } + rules = append(rules, v1Rule) + } + + return ConfirmNoEscalation(ctx, ruleResolver, rules) +} diff --git a/pkg/registry/rbac/validation/policy_compact.go b/pkg/registry/rbac/validation/policy_compact.go index 303bbc7a07f..182657b1ca9 100644 --- a/pkg/registry/rbac/validation/policy_compact.go +++ b/pkg/registry/rbac/validation/policy_compact.go @@ -19,7 +19,7 @@ package validation import ( "reflect" - "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1 "k8s.io/api/rbac/v1" ) type simpleResource struct { @@ -31,10 +31,10 @@ type simpleResource struct { // CompactRules combines rules that contain a single APIGroup/Resource, differ only by verb, and contain no other attributes. // this is a fast check, and works well with the decomposed "missing rules" list from a Covers check. -func CompactRules(rules []rbac.PolicyRule) ([]rbac.PolicyRule, error) { - compacted := make([]rbac.PolicyRule, 0, len(rules)) +func CompactRules(rules []rbacv1.PolicyRule) ([]rbacv1.PolicyRule, error) { + compacted := make([]rbacv1.PolicyRule, 0, len(rules)) - simpleRules := map[simpleResource]*rbac.PolicyRule{} + simpleRules := map[simpleResource]*rbacv1.PolicyRule{} for _, rule := range rules { if resource, isSimple := isSimpleResourceRule(&rule); isSimple { if existingRule, ok := simpleRules[resource]; ok { @@ -61,7 +61,7 @@ func CompactRules(rules []rbac.PolicyRule) ([]rbac.PolicyRule, error) { } // isSimpleResourceRule returns true if the given rule contains verbs, a single resource, a single API group, at most one Resource Name, and no other values -func isSimpleResourceRule(rule *rbac.PolicyRule) (simpleResource, bool) { +func isSimpleResourceRule(rule *rbacv1.PolicyRule) (simpleResource, bool) { resource := simpleResource{} // If we have "complex" rule attributes, return early without allocations or expensive comparisons @@ -74,7 +74,7 @@ func isSimpleResourceRule(rule *rbac.PolicyRule) (simpleResource, bool) { } // Test if this rule only contains APIGroups/Resources/Verbs/ResourceNames - simpleRule := &rbac.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} + simpleRule := &rbacv1.PolicyRule{APIGroups: rule.APIGroups, Resources: rule.Resources, Verbs: rule.Verbs, ResourceNames: rule.ResourceNames} if !reflect.DeepEqual(simpleRule, rule) { return resource, false } diff --git a/pkg/registry/rbac/validation/policy_compact_test.go b/pkg/registry/rbac/validation/policy_compact_test.go index 21492613219..4444657be7a 100644 --- a/pkg/registry/rbac/validation/policy_compact_test.go +++ b/pkg/registry/rbac/validation/policy_compact_test.go @@ -21,20 +21,21 @@ import ( "sort" "testing" - "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" ) func TestCompactRules(t *testing.T) { testcases := map[string]struct { - Rules []rbac.PolicyRule - Expected []rbac.PolicyRule + Rules []rbacv1.PolicyRule + Expected []rbacv1.PolicyRule }{ "empty": { - Rules: []rbac.PolicyRule{}, - Expected: []rbac.PolicyRule{}, + Rules: []rbacv1.PolicyRule{}, + Expected: []rbacv1.PolicyRule{}, }, "simple": { - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}}, {Verbs: []string{"list"}, APIGroups: []string{""}, Resources: []string{"builds"}}, {Verbs: []string{"update", "patch"}, APIGroups: []string{""}, Resources: []string{"builds"}}, @@ -55,7 +56,7 @@ func TestCompactRules(t *testing.T) { {Verbs: nil, APIGroups: []string{""}, Resources: []string{"pods"}}, {Verbs: []string{"create"}, APIGroups: []string{""}, Resources: []string{"pods"}}, }, - Expected: []rbac.PolicyRule{ + Expected: []rbacv1.PolicyRule{ {Verbs: []string{"create", "delete"}, APIGroups: []string{"extensions"}, Resources: []string{"daemonsets"}}, {Verbs: []string{"patch"}, APIGroups: []string{"extensions"}, Resources: []string{"daemonsets"}, ResourceNames: []string{""}}, {Verbs: []string{"get", "list"}, APIGroups: []string{"extensions"}, Resources: []string{"daemonsets"}, ResourceNames: []string{"foo"}}, @@ -66,44 +67,44 @@ func TestCompactRules(t *testing.T) { }, }, "complex multi-group": { - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{"", "builds.openshift.io"}, Resources: []string{"builds"}}, {Verbs: []string{"list"}, APIGroups: []string{"", "builds.openshift.io"}, Resources: []string{"builds"}}, }, - Expected: []rbac.PolicyRule{ + Expected: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{"", "builds.openshift.io"}, Resources: []string{"builds"}}, {Verbs: []string{"list"}, APIGroups: []string{"", "builds.openshift.io"}, Resources: []string{"builds"}}, }, }, "complex multi-resource": { - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds", "images"}}, {Verbs: []string{"list"}, APIGroups: []string{""}, Resources: []string{"builds", "images"}}, }, - Expected: []rbac.PolicyRule{ + Expected: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds", "images"}}, {Verbs: []string{"list"}, APIGroups: []string{""}, Resources: []string{"builds", "images"}}, }, }, "complex named-resource": { - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"mybuild"}}, {Verbs: []string{"list"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"mybuild2"}}, }, - Expected: []rbac.PolicyRule{ + Expected: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"mybuild"}}, {Verbs: []string{"list"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"mybuild2"}}, }, }, "complex non-resource": { - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, NonResourceURLs: []string{"/"}}, {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, NonResourceURLs: []string{"/foo"}}, }, - Expected: []rbac.PolicyRule{ + Expected: []rbacv1.PolicyRule{ {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, NonResourceURLs: []string{"/"}}, {Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, NonResourceURLs: []string{"/foo"}}, }, @@ -112,7 +113,7 @@ func TestCompactRules(t *testing.T) { for k, tc := range testcases { rules := tc.Rules - originalRules := make([]rbac.PolicyRule, len(tc.Rules)) + originalRules := make([]rbacv1.PolicyRule, len(tc.Rules)) for i := range tc.Rules { originalRules[i] = *tc.Rules[i].DeepCopy() } @@ -134,8 +135,8 @@ func TestCompactRules(t *testing.T) { continue } - sort.Stable(rbac.SortableRuleSlice(compacted)) - sort.Stable(rbac.SortableRuleSlice(tc.Expected)) + sort.Stable(rbacv1helpers.SortableRuleSlice(compacted)) + sort.Stable(rbacv1helpers.SortableRuleSlice(tc.Expected)) if !reflect.DeepEqual(compacted, tc.Expected) { t.Errorf("%s: Expected\n%#v\ngot\n%#v", k, tc.Expected, compacted) continue @@ -145,68 +146,68 @@ func TestCompactRules(t *testing.T) { func TestIsSimpleResourceRule(t *testing.T) { testcases := map[string]struct { - Rule rbac.PolicyRule + Rule rbacv1.PolicyRule Simple bool Resource simpleResource }{ "simple, no verbs": { - Rule: rbac.PolicyRule{Verbs: []string{}, APIGroups: []string{""}, Resources: []string{"builds"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{}, APIGroups: []string{""}, Resources: []string{"builds"}}, Simple: true, Resource: simpleResource{Group: "", Resource: "builds"}, }, "simple, one verb": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}}, Simple: true, Resource: simpleResource{Group: "", Resource: "builds"}, }, "simple, one empty resource name": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{""}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{""}}, Simple: true, Resource: simpleResource{Group: "", Resource: "builds", ResourceNameExist: true, ResourceName: ""}, }, "simple, one resource name": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"foo"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"foo"}}, Simple: true, Resource: simpleResource{Group: "", Resource: "builds", ResourceNameExist: true, ResourceName: "foo"}, }, "simple, multi verb": { - Rule: rbac.PolicyRule{Verbs: []string{"get", "list"}, APIGroups: []string{""}, Resources: []string{"builds"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get", "list"}, APIGroups: []string{""}, Resources: []string{"builds"}}, Simple: true, Resource: simpleResource{Group: "", Resource: "builds"}, }, "complex, empty": { - Rule: rbac.PolicyRule{}, + Rule: rbacv1.PolicyRule{}, Simple: false, Resource: simpleResource{}, }, "complex, no group": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{}, Resources: []string{"builds"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{}, Resources: []string{"builds"}}, Simple: false, Resource: simpleResource{}, }, "complex, multi group": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{"a", "b"}, Resources: []string{"builds"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{"a", "b"}, Resources: []string{"builds"}}, Simple: false, Resource: simpleResource{}, }, "complex, no resource": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{}}, Simple: false, Resource: simpleResource{}, }, "complex, multi resource": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds", "images"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds", "images"}}, Simple: false, Resource: simpleResource{}, }, "complex, resource names": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"foo", "bar"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, ResourceNames: []string{"foo", "bar"}}, Simple: false, Resource: simpleResource{}, }, "complex, non-resource urls": { - Rule: rbac.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, NonResourceURLs: []string{"/"}}, + Rule: rbacv1.PolicyRule{Verbs: []string{"get"}, APIGroups: []string{""}, Resources: []string{"builds"}, NonResourceURLs: []string{"/"}}, Simple: false, Resource: simpleResource{}, }, diff --git a/pkg/registry/rbac/validation/policy_comparator.go b/pkg/registry/rbac/validation/policy_comparator.go index 4b2ba515814..7a0268b5e9e 100644 --- a/pkg/registry/rbac/validation/policy_comparator.go +++ b/pkg/registry/rbac/validation/policy_comparator.go @@ -19,23 +19,23 @@ package validation import ( "strings" - "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1 "k8s.io/api/rbac/v1" ) // Covers determines whether or not the ownerRules cover the servantRules in terms of allowed actions. // It returns whether or not the ownerRules cover and a list of the rules that the ownerRules do not cover. -func Covers(ownerRules, servantRules []rbac.PolicyRule) (bool, []rbac.PolicyRule) { +func Covers(ownerRules, servantRules []rbacv1.PolicyRule) (bool, []rbacv1.PolicyRule) { // 1. Break every servantRule into individual rule tuples: group, verb, resource, resourceName // 2. Compare the mini-rules against each owner rule. Because the breakdown is down to the most atomic level, we're guaranteed that each mini-servant rule will be either fully covered or not covered by a single owner rule // 3. Any left over mini-rules means that we are not covered and we have a nice list of them. // TODO: it might be nice to collapse the list down into something more human readable - subrules := []rbac.PolicyRule{} + subrules := []rbacv1.PolicyRule{} for _, servantRule := range servantRules { subrules = append(subrules, BreakdownRule(servantRule)...) } - uncoveredRules := []rbac.PolicyRule{} + uncoveredRules := []rbacv1.PolicyRule{} for _, subrule := range subrules { covered := false for _, ownerRule := range ownerRules { @@ -55,18 +55,18 @@ func Covers(ownerRules, servantRules []rbac.PolicyRule) (bool, []rbac.PolicyRule // BreadownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one // resource, and one resource name -func BreakdownRule(rule rbac.PolicyRule) []rbac.PolicyRule { - subrules := []rbac.PolicyRule{} +func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { + subrules := []rbacv1.PolicyRule{} for _, group := range rule.APIGroups { for _, resource := range rule.Resources { for _, verb := range rule.Verbs { if len(rule.ResourceNames) > 0 { for _, resourceName := range rule.ResourceNames { - subrules = append(subrules, rbac.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) } } else { - subrules = append(subrules, rbac.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) } } @@ -76,7 +76,7 @@ func BreakdownRule(rule rbac.PolicyRule) []rbac.PolicyRule { // Non-resource URLs are unique because they only combine with verbs. for _, nonResourceURL := range rule.NonResourceURLs { for _, verb := range rule.Verbs { - subrules = append(subrules, rbac.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) + subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) } } @@ -107,7 +107,7 @@ func hasAll(set, contains []string) bool { func resourceCoversAll(setResources, coversResources []string) bool { // if we have a star or an exact match on all resources, then we match - if has(setResources, rbac.ResourceAll) || hasAll(setResources, coversResources) { + if has(setResources, rbacv1.ResourceAll) || hasAll(setResources, coversResources) { return true } @@ -155,9 +155,9 @@ func nonResourceURLCovers(ownerPath, subPath string) bool { // ruleCovers determines whether the ownerRule (which may have multiple verbs, resources, and resourceNames) covers // the subrule (which may only contain at most one verb, resource, and resourceName) -func ruleCovers(ownerRule, subRule rbac.PolicyRule) bool { - verbMatches := has(ownerRule.Verbs, rbac.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) - groupMatches := has(ownerRule.APIGroups, rbac.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) +func ruleCovers(ownerRule, subRule rbacv1.PolicyRule) bool { + verbMatches := has(ownerRule.Verbs, rbacv1.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) + groupMatches := has(ownerRule.APIGroups, rbacv1.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) resourceMatches := resourceCoversAll(ownerRule.Resources, subRule.Resources) nonResourceURLMatches := nonResourceURLsCoversAll(ownerRule.NonResourceURLs, subRule.NonResourceURLs) diff --git a/pkg/registry/rbac/validation/policy_comparator_test.go b/pkg/registry/rbac/validation/policy_comparator_test.go index b8b947f72ea..e983c2abcfa 100644 --- a/pkg/registry/rbac/validation/policy_comparator_test.go +++ b/pkg/registry/rbac/validation/policy_comparator_test.go @@ -20,65 +20,65 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1 "k8s.io/api/rbac/v1" ) type escalationTest struct { - ownerRules []rbac.PolicyRule - servantRules []rbac.PolicyRule + ownerRules []rbacv1.PolicyRule + servantRules []rbacv1.PolicyRule expectedCovered bool - expectedUncoveredRules []rbac.PolicyRule + expectedUncoveredRules []rbacv1.PolicyRule } func TestCoversExactMatch(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversSubresourceWildcard(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*/scale"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"foo/scale"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversMultipleRulesCoveringSingleRule(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"v1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversMultipleAPIGroupsCoveringSingleRule(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, @@ -86,22 +86,22 @@ func TestCoversMultipleAPIGroupsCoveringSingleRule(t *testing.T) { {APIGroups: []string{"group2"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group2"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1", "group2"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversSingleAPIGroupsCoveringMultiple(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1", "group2"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"deployments"}}, {APIGroups: []string{"group1"}, Verbs: []string{"delete"}, Resources: []string{"builds"}}, {APIGroups: []string{"group1"}, Verbs: []string{"update"}, Resources: []string{"builds", "deployments"}}, @@ -111,23 +111,23 @@ func TestCoversSingleAPIGroupsCoveringMultiple(t *testing.T) { }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversMultipleRulesMissingSingleVerbResourceCombination(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments"}}, {APIGroups: []string{"v1"}, Verbs: []string{"delete"}, Resources: []string{"pods"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"delete", "update"}, Resources: []string{"builds", "deployments", "pods"}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{ + expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"update"}, Resources: []string{"pods"}}, }, }.test(t) @@ -135,29 +135,29 @@ func TestCoversMultipleRulesMissingSingleVerbResourceCombination(t *testing.T) { func TestCoversAPIGroupStarCoveringMultiple(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"group1", "group2"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringAPIGroupStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"dummy-group"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{ + expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, }.test(t) @@ -165,43 +165,43 @@ func TestCoversEnumerationNotCoveringAPIGroupStar(t *testing.T) { func TestCoversAPIGroupStarCoveringStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"*"}, Verbs: []string{"get"}, Resources: []string{"roles"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversVerbStarCoveringMultiple(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"watch", "list"}, Resources: []string{"roles"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringVerbStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get", "list", "watch", "create", "update", "delete", "exec"}, Resources: []string{"roles"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{ + expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, }.test(t) @@ -209,43 +209,43 @@ func TestCoversEnumerationNotCoveringVerbStar(t *testing.T) { func TestCoversVerbStarCoveringStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"*"}, Resources: []string{"roles"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversResourceStarCoveringMultiple(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"resourcegroup:deployments"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringResourceStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"roles", "resourcegroup:deployments"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{ + expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, }.test(t) @@ -253,43 +253,43 @@ func TestCoversEnumerationNotCoveringResourceStar(t *testing.T) { func TestCoversResourceStarCoveringStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"*"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversResourceNameEmptyCoveringMultiple(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{"foo", "bar"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversEnumerationNotCoveringResourceNameEmpty(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{"foo", "bar"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}, ResourceNames: []string{}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{ + expectedUncoveredRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"pods"}}, }, }.test(t) @@ -297,43 +297,43 @@ func TestCoversEnumerationNotCoveringResourceNameEmpty(t *testing.T) { func TestCoversNonResourceURLs(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis"}, Verbs: []string{"*"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis"}, Verbs: []string{"*"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsStar(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"*"}, Verbs: []string{"*"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis", "/apis/v1", "/"}, Verbs: []string{"*"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsStarAfterPrefixDoesntCover(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis/*"}, Verbs: []string{"*"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis", "/apis/v1"}, Verbs: []string{"get"}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{ + expectedUncoveredRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis"}, Verbs: []string{"get"}}, }, }.test(t) @@ -341,43 +341,43 @@ func TestCoversNonResourceURLsStarAfterPrefixDoesntCover(t *testing.T) { func TestCoversNonResourceURLsStarAfterPrefix(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis/*"}, Verbs: []string{"*"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {NonResourceURLs: []string{"/apis/v1/foo", "/apis/v1"}, Verbs: []string{"get"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsWithOtherFields(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, }, expectedCovered: true, - expectedUncoveredRules: []rbac.PolicyRule{}, + expectedUncoveredRules: []rbacv1.PolicyRule{}, }.test(t) } func TestCoversNonResourceURLsWithOtherFieldsFailure(t *testing.T) { escalationTest{ - ownerRules: []rbac.PolicyRule{ + ownerRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}}, }, - servantRules: []rbac.PolicyRule{ + servantRules: []rbacv1.PolicyRule{ {APIGroups: []string{"v1"}, Verbs: []string{"get"}, Resources: []string{"builds"}, NonResourceURLs: []string{"/apis"}}, }, expectedCovered: false, - expectedUncoveredRules: []rbac.PolicyRule{{NonResourceURLs: []string{"/apis"}, Verbs: []string{"get"}}}, + expectedUncoveredRules: []rbacv1.PolicyRule{{NonResourceURLs: []string{"/apis"}, Verbs: []string{"get"}}}, }.test(t) } @@ -393,7 +393,7 @@ func (test escalationTest) test(t *testing.T) { } } -func rulesMatch(expectedRules, actualRules []rbac.PolicyRule) bool { +func rulesMatch(expectedRules, actualRules []rbacv1.PolicyRule) bool { if len(expectedRules) != len(actualRules) { return false } diff --git a/pkg/registry/rbac/validation/rule.go b/pkg/registry/rbac/validation/rule.go index e101470ce43..366a9a97b12 100644 --- a/pkg/registry/rbac/validation/rule.go +++ b/pkg/registry/rbac/validation/rule.go @@ -23,31 +23,31 @@ import ( "github.com/golang/glog" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kubernetes/pkg/apis/rbac" ) type AuthorizationRuleResolver interface { // GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namepsace // of the role binding, the empty string if a cluster role binding. - GetRoleReferenceRules(roleRef rbac.RoleRef, namespace string) ([]rbac.PolicyRule, error) + GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error) // RulesFor returns the list of rules that apply to a given user in a given namespace and error. If an error is returned, the slice of // PolicyRules may not be complete, but it contains all retrievable rules. This is done because policy rules are purely additive and policy determinations // can be made on the basis of those rules that are found. - RulesFor(user user.Info, namespace string) ([]rbac.PolicyRule, error) + RulesFor(user user.Info, namespace string) ([]rbacv1.PolicyRule, error) // VisitRulesFor invokes visitor() with each rule that applies to a given user in a given namespace, and each error encountered resolving those rules. // If visitor() returns false, visiting is short-circuited. - VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool) + VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) } // ConfirmNoEscalation determines if the roles for a given user in a given namespace encompass the provided role. -func ConfirmNoEscalation(ctx context.Context, ruleResolver AuthorizationRuleResolver, rules []rbac.PolicyRule) error { +func ConfirmNoEscalation(ctx context.Context, ruleResolver AuthorizationRuleResolver, rules []rbacv1.PolicyRule) error { ruleResolutionErrors := []error{} user, ok := genericapirequest.UserFrom(ctx) @@ -82,33 +82,33 @@ func NewDefaultRuleResolver(roleGetter RoleGetter, roleBindingLister RoleBinding } type RoleGetter interface { - GetRole(namespace, name string) (*rbac.Role, error) + GetRole(namespace, name string) (*rbacv1.Role, error) } type RoleBindingLister interface { - ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) + ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) } type ClusterRoleGetter interface { - GetClusterRole(name string) (*rbac.ClusterRole, error) + GetClusterRole(name string) (*rbacv1.ClusterRole, error) } type ClusterRoleBindingLister interface { - ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) + ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) } -func (r *DefaultRuleResolver) RulesFor(user user.Info, namespace string) ([]rbac.PolicyRule, error) { +func (r *DefaultRuleResolver) RulesFor(user user.Info, namespace string) ([]rbacv1.PolicyRule, error) { visitor := &ruleAccumulator{} r.VisitRulesFor(user, namespace, visitor.visit) return visitor.rules, utilerrors.NewAggregate(visitor.errors) } type ruleAccumulator struct { - rules []rbac.PolicyRule + rules []rbacv1.PolicyRule errors []error } -func (r *ruleAccumulator) visit(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool { +func (r *ruleAccumulator) visit(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool { if rule != nil { r.rules = append(r.rules, *rule) } @@ -118,9 +118,9 @@ func (r *ruleAccumulator) visit(source fmt.Stringer, rule *rbac.PolicyRule, err return true } -func describeSubject(s *rbac.Subject, bindingNamespace string) string { +func describeSubject(s *rbacv1.Subject, bindingNamespace string) string { switch s.Kind { - case rbac.ServiceAccountKind: + case rbacv1.ServiceAccountKind: if len(s.Namespace) > 0 { return fmt.Sprintf("%s %q", s.Kind, s.Name+"/"+s.Namespace) } @@ -131,8 +131,8 @@ func describeSubject(s *rbac.Subject, bindingNamespace string) string { } type clusterRoleBindingDescriber struct { - binding *rbac.ClusterRoleBinding - subject *rbac.Subject + binding *rbacv1.ClusterRoleBinding + subject *rbacv1.Subject } func (d *clusterRoleBindingDescriber) String() string { @@ -145,8 +145,8 @@ func (d *clusterRoleBindingDescriber) String() string { } type roleBindingDescriber struct { - binding *rbac.RoleBinding - subject *rbac.Subject + binding *rbacv1.RoleBinding + subject *rbacv1.Subject } func (d *roleBindingDescriber) String() string { @@ -158,7 +158,7 @@ func (d *roleBindingDescriber) String() string { ) } -func (r *DefaultRuleResolver) VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool) { +func (r *DefaultRuleResolver) VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) { if clusterRoleBindings, err := r.clusterRoleBindingLister.ListClusterRoleBindings(); err != nil { if !visitor(nil, nil, err) { return @@ -219,16 +219,16 @@ func (r *DefaultRuleResolver) VisitRulesFor(user user.Info, namespace string, vi } // GetRoleReferenceRules attempts to resolve the RoleBinding or ClusterRoleBinding. -func (r *DefaultRuleResolver) GetRoleReferenceRules(roleRef rbac.RoleRef, bindingNamespace string) ([]rbac.PolicyRule, error) { - switch kind := rbac.RoleRefGroupKind(roleRef); kind { - case rbac.Kind("Role"): +func (r *DefaultRuleResolver) GetRoleReferenceRules(roleRef rbacv1.RoleRef, bindingNamespace string) ([]rbacv1.PolicyRule, error) { + switch roleRef.Kind { + case "Role": role, err := r.roleGetter.GetRole(bindingNamespace, roleRef.Name) if err != nil { return nil, err } return role.Rules, nil - case rbac.Kind("ClusterRole"): + case "ClusterRole": clusterRole, err := r.clusterRoleGetter.GetClusterRole(roleRef.Name) if err != nil { return nil, err @@ -236,13 +236,13 @@ func (r *DefaultRuleResolver) GetRoleReferenceRules(roleRef rbac.RoleRef, bindin return clusterRole.Rules, nil default: - return nil, fmt.Errorf("unsupported role reference kind: %q", kind) + return nil, fmt.Errorf("unsupported role reference kind: %q", roleRef.Kind) } } // appliesTo returns whether any of the bindingSubjects applies to the specified subject, // and if true, the index of the first subject that applies -func appliesTo(user user.Info, bindingSubjects []rbac.Subject, namespace string) (int, bool) { +func appliesTo(user user.Info, bindingSubjects []rbacv1.Subject, namespace string) (int, bool) { for i, bindingSubject := range bindingSubjects { if appliesToUser(user, bindingSubject, namespace) { return i, true @@ -251,15 +251,15 @@ func appliesTo(user user.Info, bindingSubjects []rbac.Subject, namespace string) return 0, false } -func appliesToUser(user user.Info, subject rbac.Subject, namespace string) bool { +func appliesToUser(user user.Info, subject rbacv1.Subject, namespace string) bool { switch subject.Kind { - case rbac.UserKind: + case rbacv1.UserKind: return user.GetName() == subject.Name - case rbac.GroupKind: + case rbacv1.GroupKind: return has(user.GetGroups(), subject.Name) - case rbac.ServiceAccountKind: + case rbacv1.ServiceAccountKind: // default the namespace to namespace we're working in if its available. This allows rolebindings that reference // SAs in th local namespace to avoid having to qualify them. saNamespace := namespace @@ -276,7 +276,7 @@ func appliesToUser(user user.Info, subject rbac.Subject, namespace string) bool } // NewTestRuleResolver returns a rule resolver from lists of role objects. -func NewTestRuleResolver(roles []*rbac.Role, roleBindings []*rbac.RoleBinding, clusterRoles []*rbac.ClusterRole, clusterRoleBindings []*rbac.ClusterRoleBinding) (AuthorizationRuleResolver, *StaticRoles) { +func NewTestRuleResolver(roles []*rbacv1.Role, roleBindings []*rbacv1.RoleBinding, clusterRoles []*rbacv1.ClusterRole, clusterRoleBindings []*rbacv1.ClusterRoleBinding) (AuthorizationRuleResolver, *StaticRoles) { r := StaticRoles{ roles: roles, roleBindings: roleBindings, @@ -292,13 +292,13 @@ func newMockRuleResolver(r *StaticRoles) AuthorizationRuleResolver { // StaticRoles is a rule resolver that resolves from lists of role objects. type StaticRoles struct { - roles []*rbac.Role - roleBindings []*rbac.RoleBinding - clusterRoles []*rbac.ClusterRole - clusterRoleBindings []*rbac.ClusterRoleBinding + roles []*rbacv1.Role + roleBindings []*rbacv1.RoleBinding + clusterRoles []*rbacv1.ClusterRole + clusterRoleBindings []*rbacv1.ClusterRoleBinding } -func (r *StaticRoles) GetRole(namespace, name string) (*rbac.Role, error) { +func (r *StaticRoles) GetRole(namespace, name string) (*rbacv1.Role, error) { if len(namespace) == 0 { return nil, errors.New("must provide namespace when getting role") } @@ -310,7 +310,7 @@ func (r *StaticRoles) GetRole(namespace, name string) (*rbac.Role, error) { return nil, errors.New("role not found") } -func (r *StaticRoles) GetClusterRole(name string) (*rbac.ClusterRole, error) { +func (r *StaticRoles) GetClusterRole(name string) (*rbacv1.ClusterRole, error) { for _, clusterRole := range r.clusterRoles { if clusterRole.Name == name { return clusterRole, nil @@ -319,12 +319,12 @@ func (r *StaticRoles) GetClusterRole(name string) (*rbac.ClusterRole, error) { return nil, errors.New("clusterrole not found") } -func (r *StaticRoles) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) { +func (r *StaticRoles) ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) { if len(namespace) == 0 { return nil, errors.New("must provide namespace when listing role bindings") } - roleBindingList := []*rbac.RoleBinding{} + roleBindingList := []*rbacv1.RoleBinding{} for _, roleBinding := range r.roleBindings { if roleBinding.Namespace != namespace { continue @@ -335,6 +335,6 @@ func (r *StaticRoles) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, e return roleBindingList, nil } -func (r *StaticRoles) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) { +func (r *StaticRoles) ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) { return r.clusterRoleBindings, nil } diff --git a/pkg/registry/rbac/validation/rule_test.go b/pkg/registry/rbac/validation/rule_test.go index 1a176126bd5..b892f501a36 100644 --- a/pkg/registry/rbac/validation/rule_test.go +++ b/pkg/registry/rbac/validation/rule_test.go @@ -23,14 +23,14 @@ import ( "sort" "testing" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/kubernetes/pkg/apis/rbac" ) // compute a hash of a policy rule so we can sort in a deterministic order -func hashOf(p rbac.PolicyRule) string { +func hashOf(p rbacv1.PolicyRule) string { hash := fnv.New32() writeStrings := func(slis ...[]string) { for _, sli := range slis { @@ -44,68 +44,68 @@ func hashOf(p rbac.PolicyRule) string { } // byHash sorts a set of policy rules by a hash of its fields -type byHash []rbac.PolicyRule +type byHash []rbacv1.PolicyRule func (b byHash) Len() int { return len(b) } func (b byHash) Less(i, j int) bool { return hashOf(b[i]) < hashOf(b[j]) } func (b byHash) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func TestDefaultRuleResolver(t *testing.T) { - ruleReadPods := rbac.PolicyRule{ + ruleReadPods := rbacv1.PolicyRule{ Verbs: []string{"GET", "WATCH"}, APIGroups: []string{"v1"}, Resources: []string{"pods"}, } - ruleReadServices := rbac.PolicyRule{ + ruleReadServices := rbacv1.PolicyRule{ Verbs: []string{"GET", "WATCH"}, APIGroups: []string{"v1"}, Resources: []string{"services"}, } - ruleWriteNodes := rbac.PolicyRule{ + ruleWriteNodes := rbacv1.PolicyRule{ Verbs: []string{"PUT", "CREATE", "UPDATE"}, APIGroups: []string{"v1"}, Resources: []string{"nodes"}, } - ruleAdmin := rbac.PolicyRule{ + ruleAdmin := rbacv1.PolicyRule{ Verbs: []string{"*"}, APIGroups: []string{"*"}, Resources: []string{"*"}, } staticRoles1 := StaticRoles{ - roles: []*rbac.Role{ + roles: []*rbacv1.Role{ { ObjectMeta: metav1.ObjectMeta{Namespace: "namespace1", Name: "readthings"}, - Rules: []rbac.PolicyRule{ruleReadPods, ruleReadServices}, + Rules: []rbacv1.PolicyRule{ruleReadPods, ruleReadServices}, }, }, - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ { ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"}, - Rules: []rbac.PolicyRule{ruleAdmin}, + Rules: []rbacv1.PolicyRule{ruleAdmin}, }, { ObjectMeta: metav1.ObjectMeta{Name: "write-nodes"}, - Rules: []rbac.PolicyRule{ruleWriteNodes}, + Rules: []rbacv1.PolicyRule{ruleWriteNodes}, }, }, - roleBindings: []*rbac.RoleBinding{ + roleBindings: []*rbacv1.RoleBinding{ { ObjectMeta: metav1.ObjectMeta{Namespace: "namespace1"}, - Subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "foobar"}, - {Kind: rbac.GroupKind, Name: "group1"}, + Subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "foobar"}, + {Kind: rbacv1.GroupKind, Name: "group1"}, }, - RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: "readthings"}, + RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "Role", Name: "readthings"}, }, }, - clusterRoleBindings: []*rbac.ClusterRoleBinding{ + clusterRoleBindings: []*rbacv1.ClusterRoleBinding{ { - Subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "admin"}, - {Kind: rbac.GroupKind, Name: "admin"}, + Subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "admin"}, + {Kind: rbacv1.GroupKind, Name: "admin"}, }, - RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: "cluster-admin"}, + RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: "cluster-admin"}, }, }, } @@ -116,13 +116,13 @@ func TestDefaultRuleResolver(t *testing.T) { // For a given context, what are the rules that apply? user user.Info namespace string - effectiveRules []rbac.PolicyRule + effectiveRules []rbacv1.PolicyRule }{ { StaticRoles: staticRoles1, user: &user.DefaultInfo{Name: "foobar"}, namespace: "namespace1", - effectiveRules: []rbac.PolicyRule{ruleReadPods, ruleReadServices}, + effectiveRules: []rbacv1.PolicyRule{ruleReadPods, ruleReadServices}, }, { StaticRoles: staticRoles1, @@ -134,7 +134,7 @@ func TestDefaultRuleResolver(t *testing.T) { StaticRoles: staticRoles1, // Same as above but without a namespace. Only cluster rules should apply. user: &user.DefaultInfo{Name: "foobar", Groups: []string{"admin"}}, - effectiveRules: []rbac.PolicyRule{ruleAdmin}, + effectiveRules: []rbacv1.PolicyRule{ruleAdmin}, }, { StaticRoles: staticRoles1, @@ -164,7 +164,7 @@ func TestDefaultRuleResolver(t *testing.T) { func TestAppliesTo(t *testing.T) { tests := []struct { - subjects []rbac.Subject + subjects []rbacv1.Subject user user.Info namespace string appliesTo bool @@ -172,8 +172,8 @@ func TestAppliesTo(t *testing.T) { testCase string }{ { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "foobar"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "foobar"}, }, user: &user.DefaultInfo{Name: "foobar"}, appliesTo: true, @@ -181,9 +181,9 @@ func TestAppliesTo(t *testing.T) { testCase: "single subject that matches username", }, { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "barfoo"}, - {Kind: rbac.UserKind, Name: "foobar"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "barfoo"}, + {Kind: rbacv1.UserKind, Name: "foobar"}, }, user: &user.DefaultInfo{Name: "foobar"}, appliesTo: true, @@ -191,18 +191,18 @@ func TestAppliesTo(t *testing.T) { testCase: "multiple subjects, one that matches username", }, { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "barfoo"}, - {Kind: rbac.UserKind, Name: "foobar"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "barfoo"}, + {Kind: rbacv1.UserKind, Name: "foobar"}, }, user: &user.DefaultInfo{Name: "zimzam"}, appliesTo: false, testCase: "multiple subjects, none that match username", }, { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "barfoo"}, - {Kind: rbac.GroupKind, Name: "foobar"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "barfoo"}, + {Kind: rbacv1.GroupKind, Name: "foobar"}, }, user: &user.DefaultInfo{Name: "zimzam", Groups: []string{"foobar"}}, appliesTo: true, @@ -210,9 +210,9 @@ func TestAppliesTo(t *testing.T) { testCase: "multiple subjects, one that match group", }, { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "barfoo"}, - {Kind: rbac.GroupKind, Name: "foobar"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "barfoo"}, + {Kind: rbacv1.GroupKind, Name: "foobar"}, }, user: &user.DefaultInfo{Name: "zimzam", Groups: []string{"foobar"}}, namespace: "namespace1", @@ -221,10 +221,10 @@ func TestAppliesTo(t *testing.T) { testCase: "multiple subjects, one that match group, should ignore namespace", }, { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "barfoo"}, - {Kind: rbac.GroupKind, Name: "foobar"}, - {Kind: rbac.ServiceAccountKind, Namespace: "kube-system", Name: "default"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "barfoo"}, + {Kind: rbacv1.GroupKind, Name: "foobar"}, + {Kind: rbacv1.ServiceAccountKind, Namespace: "kube-system", Name: "default"}, }, user: &user.DefaultInfo{Name: "system:serviceaccount:kube-system:default"}, namespace: "default", @@ -233,8 +233,8 @@ func TestAppliesTo(t *testing.T) { testCase: "multiple subjects with a service account that matches", }, { - subjects: []rbac.Subject{ - {Kind: rbac.UserKind, Name: "*"}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.UserKind, Name: "*"}, }, user: &user.DefaultInfo{Name: "foobar"}, namespace: "default", @@ -242,9 +242,9 @@ func TestAppliesTo(t *testing.T) { testCase: "* user subject name doesn't match all users", }, { - subjects: []rbac.Subject{ - {Kind: rbac.GroupKind, Name: user.AllAuthenticated}, - {Kind: rbac.GroupKind, Name: user.AllUnauthenticated}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, Name: user.AllAuthenticated}, + {Kind: rbacv1.GroupKind, Name: user.AllUnauthenticated}, }, user: &user.DefaultInfo{Name: "foobar", Groups: []string{user.AllAuthenticated}}, namespace: "default", @@ -253,9 +253,9 @@ func TestAppliesTo(t *testing.T) { testCase: "binding to all authenticated and unauthenticated subjects matches authenticated user", }, { - subjects: []rbac.Subject{ - {Kind: rbac.GroupKind, Name: user.AllAuthenticated}, - {Kind: rbac.GroupKind, Name: user.AllUnauthenticated}, + subjects: []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, Name: user.AllAuthenticated}, + {Kind: rbacv1.GroupKind, Name: user.AllUnauthenticated}, }, user: &user.DefaultInfo{Name: "system:anonymous", Groups: []string{user.AllUnauthenticated}}, namespace: "default", diff --git a/plugin/pkg/auth/authorizer/node/BUILD b/plugin/pkg/auth/authorizer/node/BUILD index 8618c563482..5d73814efc1 100644 --- a/plugin/pkg/auth/authorizer/node/BUILD +++ b/plugin/pkg/auth/authorizer/node/BUILD @@ -42,7 +42,6 @@ go_library( "//pkg/api/persistentvolume:go_default_library", "//pkg/api/pod:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/rbac:go_default_library", "//pkg/apis/storage:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library", @@ -52,6 +51,7 @@ go_library( "//third_party/forked/gonum/graph/simple:go_default_library", "//third_party/forked/gonum/graph/traverse:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/api/storage/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/plugin/pkg/auth/authorizer/node/node_authorizer.go b/plugin/pkg/auth/authorizer/node/node_authorizer.go index 0239ababd3e..371df74a8b6 100644 --- a/plugin/pkg/auth/authorizer/node/node_authorizer.go +++ b/plugin/pkg/auth/authorizer/node/node_authorizer.go @@ -21,11 +21,11 @@ import ( "github.com/golang/glog" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authorization/authorizer" utilfeature "k8s.io/apiserver/pkg/util/feature" api "k8s.io/kubernetes/pkg/apis/core" - rbacapi "k8s.io/kubernetes/pkg/apis/rbac" storageapi "k8s.io/kubernetes/pkg/apis/storage" "k8s.io/kubernetes/pkg/auth/nodeidentifier" "k8s.io/kubernetes/pkg/features" @@ -49,14 +49,14 @@ import ( type NodeAuthorizer struct { graph *Graph identifier nodeidentifier.NodeIdentifier - nodeRules []rbacapi.PolicyRule + nodeRules []rbacv1.PolicyRule // allows overriding for testing features utilfeature.FeatureGate } // NewAuthorizer returns a new node authorizer -func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacapi.PolicyRule) authorizer.Authorizer { +func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacv1.PolicyRule) authorizer.Authorizer { return &NodeAuthorizer{ graph: graph, identifier: identifier, diff --git a/plugin/pkg/auth/authorizer/rbac/BUILD b/plugin/pkg/auth/authorizer/rbac/BUILD index 258c860c5f1..cc7848ece89 100644 --- a/plugin/pkg/auth/authorizer/rbac/BUILD +++ b/plugin/pkg/auth/authorizer/rbac/BUILD @@ -14,14 +14,15 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", deps = [ - "//pkg/apis/rbac:go_default_library", - "//pkg/client/listers/rbac/internalversion:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/registry/rbac/validation:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library", ], ) @@ -33,9 +34,10 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/registry/rbac/validation:go_default_library", "//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD index 4c891015f9e..83b98d2211c 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD @@ -15,9 +15,10 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", deps = [ - "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/features:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/rbac/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -37,8 +38,8 @@ go_test( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/install:go_default_library", - "//pkg/apis/rbac:go_default_library", "//pkg/apis/rbac/install:go_default_library", + "//pkg/apis/rbac/v1:go_default_library", "//pkg/registry/rbac/validation:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index af7e16edcc1..5dfbaa6f23b 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -21,15 +21,16 @@ import ( "github.com/golang/glog" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" - rbac "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" "k8s.io/kubernetes/pkg/features" ) const saRolePrefix = "system:controller:" -func addControllerRole(controllerRoles *[]rbac.ClusterRole, controllerRoleBindings *[]rbac.ClusterRoleBinding, role rbac.ClusterRole) { +func addControllerRole(controllerRoles *[]rbacv1.ClusterRole, controllerRoleBindings *[]rbacv1.ClusterRoleBinding, role rbacv1.ClusterRole) { if !strings.HasPrefix(role.Name, saRolePrefix) { glog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix) } @@ -44,298 +45,298 @@ func addControllerRole(controllerRoles *[]rbac.ClusterRole, controllerRoleBindin addClusterRoleLabel(*controllerRoles) *controllerRoleBindings = append(*controllerRoleBindings, - rbac.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie()) + rbacv1helpers.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie()) addClusterRoleBindingLabel(*controllerRoleBindings) } -func eventsRule() rbac.PolicyRule { - return rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie() +func eventsRule() rbacv1.PolicyRule { + return rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie() } -func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { +func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) { // controllerRoles is a slice of roles used for controllers - controllerRoles := []rbac.ClusterRole{} + controllerRoles := []rbacv1.ClusterRole{} // controllerRoleBindings is a slice of roles used for controllers - controllerRoleBindings := []rbac.ClusterRoleBinding{} + controllerRoleBindings := []rbacv1.ClusterRoleBinding{} - addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbac.ClusterRole { - role := rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole { + role := rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, } if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { - role.Rules = append(role.Rules, rbac.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()) + role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()) } return role }()) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // this controller must have full permissions to allow it to mutate any role in any way - rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(), - rbac.NewRule("*").URLs("*").RuleOrDie(), + rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cronjob-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(), - rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(), - rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(), - rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(), - rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(), - rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(), - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "deployment-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(), - rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(), - rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), // TODO: remove "update" once // https://github.com/kubernetes/kubernetes/issues/36897 is resolved. - rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "disruption-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), - rbac.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpoint-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(), - rbac.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), - rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(), eventsRule(), }, }) if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "expand-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), - rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), // glusterfs - rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), - rbac.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), - rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(), eventsRule(), }, }) } - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // the GC controller needs to run list/watches, selective gets, and updates against any resource - rbac.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(), - rbac.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(), - rbac.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(), + rbacv1helpers.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(), + rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(), // TODO: restrict this to the appropriate namespace - rbac.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(), // allow listing resource metrics and custom metrics - rbac.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "job-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(), - rbac.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(), - rbac.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(), - rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "namespace-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(), - rbac.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), // used for pod eviction - rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), - rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), + rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(), - rbac.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), // glusterfs - rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), - rbac.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), - rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), + rbacv1helpers.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(), // openstack - rbac.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(), // recyclerClient.WatchPod - rbac.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(), + rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replicaset-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), - rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(), - rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(), - rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replication-controller"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // 1.0 controllers needed get, update, so without these old controllers break on new servers - rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(), - rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // quota can count quota on anything for reconciliation, so it needs full viewing powers - rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "route-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-account-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(), - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), - rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(), - rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(), - rbac.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(), - rbac.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(), + rbacv1helpers.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(), + rbacv1helpers.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ttl-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "certificate-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), - rbac.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(), - rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), - rbac.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(), eventsRule(), }, }) - addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{ + addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pv-protection-controller"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), eventsRule(), }, }) @@ -344,13 +345,13 @@ func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) { } // ControllerRoles returns the cluster roles used by controllers -func ControllerRoles() []rbac.ClusterRole { +func ControllerRoles() []rbacv1.ClusterRole { controllerRoles, _ := buildControllerRoles() return controllerRoles } // ControllerRoleBindings returns the role bindings used by controllers -func ControllerRoleBindings() []rbac.ClusterRoleBinding { +func ControllerRoleBindings() []rbacv1.ClusterRoleBinding { _, controllerRoleBindings := buildControllerRoles() return controllerRoleBindings } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go index 16590fffc84..fefe26d2ceb 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go @@ -21,19 +21,20 @@ import ( "github.com/golang/glog" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - rbac "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" ) var ( // namespaceRoles is a map of namespace to slice of roles to create - namespaceRoles = map[string][]rbac.Role{} + namespaceRoles = map[string][]rbacv1.Role{} // namespaceRoleBindings is a map of namespace to slice of roleBindings to create - namespaceRoleBindings = map[string][]rbac.RoleBinding{} + namespaceRoleBindings = map[string][]rbacv1.RoleBinding{} ) -func addNamespaceRole(namespace string, role rbac.Role) { +func addNamespaceRole(namespace string, role rbacv1.Role) { if !strings.HasPrefix(namespace, "kube-") { glog.Fatalf(`roles can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace) } @@ -51,7 +52,7 @@ func addNamespaceRole(namespace string, role rbac.Role) { namespaceRoles[namespace] = existingRoles } -func addNamespaceRoleBinding(namespace string, roleBinding rbac.RoleBinding) { +func addNamespaceRoleBinding(namespace string, roleBinding rbacv1.RoleBinding) { if !strings.HasPrefix(namespace, "kube-") { glog.Fatalf(`rolebindings can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace) } @@ -70,86 +71,86 @@ func addNamespaceRoleBinding(namespace string, roleBinding rbac.RoleBinding) { } func init() { - addNamespaceRole(metav1.NamespaceSystem, rbac.Role{ + addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{ // role for finding authentication config info for starting a server ObjectMeta: metav1.ObjectMeta{Name: "extension-apiserver-authentication-reader"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // this particular config map is exposed and contains authentication configuration information - rbac.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(), }, }) - addNamespaceRole(metav1.NamespaceSystem, rbac.Role{ + addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{ // role for the bootstrap signer to be able to inspect kube-system secrets ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(), }, }) - addNamespaceRole(metav1.NamespaceSystem, rbac.Role{ + addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{ // role for the cloud providers to access/create kube-system configmaps // Deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy. ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cloud-provider"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), }, }) - addNamespaceRole(metav1.NamespaceSystem, rbac.Role{ + addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{ // role for the token-cleaner to be able to remove secrets, but only in kube-system ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "token-cleaner"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), eventsRule(), }, }) // TODO: Create util on Role+Binding for leader locking if more cases evolve. - addNamespaceRole(metav1.NamespaceSystem, rbac.Role{ + addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{ // role for the leader locking on supplied configmap ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-controller-manager"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), - rbac.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), + rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(), }, }) - addNamespaceRole(metav1.NamespaceSystem, rbac.Role{ + addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{ // role for the leader locking on supplied configmap ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-scheduler"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), - rbac.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), + rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(), }, }) addNamespaceRoleBinding(metav1.NamespaceSystem, - rbac.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie()) + rbacv1helpers.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie()) addNamespaceRoleBinding(metav1.NamespaceSystem, - rbac.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie()) + rbacv1helpers.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie()) addNamespaceRoleBinding(metav1.NamespaceSystem, - rbac.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie()) + rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie()) // cloud-provider is deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy. addNamespaceRoleBinding(metav1.NamespaceSystem, - rbac.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie()) + rbacv1helpers.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie()) addNamespaceRoleBinding(metav1.NamespaceSystem, - rbac.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie()) + rbacv1helpers.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie()) - addNamespaceRole(metav1.NamespacePublic, rbac.Role{ + addNamespaceRole(metav1.NamespacePublic, rbacv1.Role{ // role for the bootstrap signer to be able to write its configmap ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(), eventsRule(), }, }) addNamespaceRoleBinding(metav1.NamespacePublic, - rbac.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie()) + rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie()) } // NamespaceRoles returns a map of namespace to slice of roles to create -func NamespaceRoles() map[string][]rbac.Role { +func NamespaceRoles() map[string][]rbacv1.Role { return namespaceRoles } // NamespaceRoleBindings returns a map of namespace to slice of roles to create -func NamespaceRoleBindings() map[string][]rbac.RoleBinding { +func NamespaceRoleBindings() map[string][]rbacv1.RoleBinding { return namespaceRoleBindings } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index b1f3c564b86..5aed8c6819f 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -17,12 +17,13 @@ limitations under the License. package bootstrappolicy import ( + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authentication/user" utilfeature "k8s.io/apiserver/pkg/util/feature" - rbac "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" "k8s.io/kubernetes/pkg/features" ) @@ -32,7 +33,7 @@ var ( ReadUpdate = []string{"get", "list", "watch", "update", "patch"} Label = map[string]string{"kubernetes.io/bootstrapping": "rbac-defaults"} - Annotation = map[string]string{rbac.AutoUpdateAnnotationKey: "true"} + Annotation = map[string]string{rbacv1.AutoUpdateAnnotationKey: "true"} ) const ( @@ -78,105 +79,105 @@ func addDefaultMetadata(obj runtime.Object) { metadata.SetAnnotations(annotations) } -func addClusterRoleLabel(roles []rbac.ClusterRole) { +func addClusterRoleLabel(roles []rbacv1.ClusterRole) { for i := range roles { addDefaultMetadata(&roles[i]) } return } -func addClusterRoleBindingLabel(rolebindings []rbac.ClusterRoleBinding) { +func addClusterRoleBindingLabel(rolebindings []rbacv1.ClusterRoleBinding) { for i := range rolebindings { addDefaultMetadata(&rolebindings[i]) } return } -func NodeRules() []rbac.PolicyRule { - nodePolicyRules := []rbac.PolicyRule{ +func NodeRules() []rbacv1.PolicyRule { + nodePolicyRules := []rbacv1.PolicyRule{ // Needed to check API access. These creates are non-mutating - rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), - rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(), // Needed to build serviceLister, to populate env vars for services - rbac.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(), // Nodes can register Node API objects and report status. // Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object. - rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), - rbac.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), + rbacv1helpers.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(), // TODO: restrict to the bound node as creator in the NodeRestrictions admission plugin - rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), + rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), // TODO: restrict to pods scheduled on the bound node once field selectors are supported by list/watch authorization - rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(), // Needed for the node to create/delete mirror pods. // Use the NodeRestriction admission plugin to limit a node to creating/deleting mirror pods bound to itself. - rbac.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), // Needed for the node to report status of pods it is running. // Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself. - rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), // Needed for the node to create pod evictions. // Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself. - rbac.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), // Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs // Needed for configmap volume and envs // Use the Node authorization mode to limit a node to get secrets/configmaps referenced by pods bound to itself. - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(), // Needed for persistent volumes // Use the Node authorization mode to limit a node to get pv/pvc objects referenced by pods bound to itself. - rbac.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), // TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node // Needed for glusterfs volumes - rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), // Used to create a certificatesigningrequest for a node-specific client certificate, and watch // for it to be signed. This allows the kubelet to rotate it's own certificate. - rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), + rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), } if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { // Use the Node authorization mode to limit a node to update status of pvc objects referenced by pods bound to itself. // Use the NodeRestriction admission plugin to limit a node to just update the status stanza. - pvcStatusPolicyRule := rbac.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie() + pvcStatusPolicyRule := rbacv1helpers.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie() nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule) } if utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) { // Use the Node authorization to limit a node to create tokens for service accounts running on that node // Use the NodeRestriction admission plugin to limit a node to create tokens bound to pods on that node - tokenRequestRule := rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie() + tokenRequestRule := rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie() nodePolicyRules = append(nodePolicyRules, tokenRequestRule) } // CSI if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { - volAttachRule := rbac.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie() + volAttachRule := rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie() nodePolicyRules = append(nodePolicyRules, volAttachRule) } return nodePolicyRules } // ClusterRoles returns the cluster roles to bootstrap an API server with -func ClusterRoles() []rbac.ClusterRole { - roles := []rbac.ClusterRole{ +func ClusterRoles() []rbacv1.ClusterRole { + roles := []rbacv1.ClusterRole{ { // a "root" role which can do absolutely anything ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(), - rbac.NewRule("*").URLs("*").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(), }, }, { // a role which provides just enough power to determine if the server is ready and discover API versions for negotiation ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get").URLs( + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get").URLs( "/healthz", "/version", "/version/", // remove once swagger 1.2 support is removed "/swaggerapi", "/swaggerapi/*", @@ -192,16 +193,16 @@ func ClusterRoles() []rbac.ClusterRole { { // a role which provides minimal resource access to allow a "normal" user to learn information about themselves ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // TODO add future selfsubjectrulesreview, project request APIs, project listing APIs - rbac.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(), }, }, { // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users. ObjectMeta: metav1.ObjectMeta{Name: "admin"}, - AggregationRule: &rbac.AggregationRule{ + AggregationRule: &rbacv1.AggregationRule{ ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}}, }, }, @@ -210,7 +211,7 @@ func ClusterRoles() []rbac.ClusterRole { // It does not grant powers for "privileged" resources which are domain of the system: `/status` // subresources or `quota`/`limits` which are used to control namespaces ObjectMeta: metav1.ObjectMeta{Name: "edit"}, - AggregationRule: &rbac.AggregationRule{ + AggregationRule: &rbacv1.AggregationRule{ ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}}, }, }, @@ -218,46 +219,46 @@ func ClusterRoles() []rbac.ClusterRole { // a role for namespace level viewing. It grants Read-only access to non-escalating resources in // a namespace. ObjectMeta: metav1.ObjectMeta{Name: "view"}, - AggregationRule: &rbac.AggregationRule{ + AggregationRule: &rbacv1.AggregationRule{ ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}}, }, }, { // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users. ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}, - Rules: []rbac.PolicyRule{ - rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(), - rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(), // read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an // indicator of which namespaces you have access to. - rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), + rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources( + rbacv1helpers.NewRule(ReadWrite...).Groups(appsGroup).Resources( "statefulsets", "statefulsets/scale", "daemonsets", "deployments", "deployments/scale", "deployments/rollback", "replicasets", "replicasets/scale").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", + rbacv1helpers.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale", "networkpolicies").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), // additional admin powers - rbac.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(), }, }, { @@ -265,75 +266,75 @@ func ClusterRoles() []rbac.ClusterRole { // It does not grant powers for "privileged" resources which are domain of the system: `/status` // subresources or `quota`/`limits` which are used to control namespaces ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}, - Rules: []rbac.PolicyRule{ - rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(), - rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(), // read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an // indicator of which namespaces you have access to. - rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), + rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources( + rbacv1helpers.NewRule(ReadWrite...).Groups(appsGroup).Resources( "statefulsets", "statefulsets/scale", "daemonsets", "deployments", "deployments/scale", "deployments/rollback", "replicasets", "replicasets/scale").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", + rbacv1helpers.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "deployments/rollback", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale", "networkpolicies").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), - rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), + rbacv1helpers.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), }, }, { // a role for namespace level viewing. It grants Read-only access to non-escalating resources in // a namespace. ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}, - Rules: []rbac.PolicyRule{ - rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", "services", "endpoints", "persistentvolumeclaims", "configmaps").RuleOrDie(), - rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(), // read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an // indicator of which namespaces you have access to. - rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), - rbac.NewRule(Read...).Groups(appsGroup).Resources( + rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources( "statefulsets", "statefulsets/scale", "daemonsets", "deployments", "deployments/scale", "replicasets", "replicasets/scale").RuleOrDie(), - rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), - rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), - rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", + rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale", "networkpolicies").RuleOrDie(), - rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), - rbac.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(), }, }, { // a role to use for heapster's connections back to the API server ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(), - rbac.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(), }, }, { @@ -345,19 +346,19 @@ func ClusterRoles() []rbac.ClusterRole { // a role to use for node-problem-detector access. It does not get bound to default location since // deployment locations can reasonably vary. ObjectMeta: metav1.ObjectMeta{Name: "system:node-problem-detector"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), eventsRule(), }, }, { // a role to use for setting up a proxy ObjectMeta: metav1.ObjectMeta{Name: "system:node-proxier"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // Used to build serviceLister - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), - rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(), eventsRule(), }, @@ -365,98 +366,98 @@ func ClusterRoles() []rbac.ClusterRole { { // a role to use for full access to the kubelet API ObjectMeta: metav1.ObjectMeta{Name: "system:kubelet-api-admin"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // Allow read-only access to the Node API objects - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), // Allow all API calls to the nodes - rbac.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(), + rbacv1helpers.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(), }, }, { // a role to use for bootstrapping a node's client certificates ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // used to create a certificatesigningrequest for a node-specific client certificate, and watch for it to be signed - rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), + rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), }, }, { // a role to use for allowing authentication and authorization delegation ObjectMeta: metav1.ObjectMeta{Name: "system:auth-delegator"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // These creates are non-mutating - rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), - rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), }, }, { // a role to use for the API registry, summarization, and proxy handling ObjectMeta: metav1.ObjectMeta{Name: "system:kube-aggregator"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ // it needs to see all services so that it knows whether the ones it points to exist or not - rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), }, }, { // a role to use for bootstrapping the kube-controller-manager so it can create the shared informers // service accounts, and secrets that we need to create separate identities for other controllers ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ eventsRule(), - rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), - rbac.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), - rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(), - rbac.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), + rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(), + rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), // Needed to check API access. These creates are non-mutating - rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), // Needed for all shared informers - rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(), }, }, { // a role to use for the kube-scheduler ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"}, - Rules: []rbac.PolicyRule{ + Rules: []rbacv1.PolicyRule{ eventsRule(), // this is for leaderlease access // TODO: scope this to the kube-system namespace - rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), - rbac.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), + rbacv1helpers.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(), // fundamental resources - rbac.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), - rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(), - rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(), + rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), // things that select pods - rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(), - rbac.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), - rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), // things that pods use or applies to them - rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), - rbac.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), }, }, { // a role to use for the kube-dns pod ObjectMeta: metav1.ObjectMeta{Name: "system:kube-dns"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(), }, }, { // a role for an external/out-of-tree persistent volume provisioner ObjectMeta: metav1.ObjectMeta{Name: "system:persistent-volume-provisioner"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), // update is needed in addition to read access for setting lock annotations on PVCs - rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), - rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), // Needed for watching provisioning success and failure events - rbac.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(), + rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(), eventsRule(), }, @@ -464,52 +465,52 @@ func ClusterRoles() []rbac.ClusterRole { { // a role for the csi external provisioner ObjectMeta: metav1.ObjectMeta{Name: "system:csi-external-provisioner"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("create", "delete", "list", "watch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), - rbac.NewRule("list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create", "delete", "list", "watch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), + rbacv1helpers.NewRule("list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), }, }, { // a role for the csi external attacher ObjectMeta: metav1.ObjectMeta{Name: "system:csi-external-attacher"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), - rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "update", "patch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(), - rbac.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), }, }, { ObjectMeta: metav1.ObjectMeta{Name: "system:aws-cloud-provider"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), eventsRule(), }, }, { // a role making the csrapprover controller approve a node client CSR ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(), }, }, { // a role making the csrapprover controller approve a node client CSR requested by the node itself ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(), }, }, } if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { - roles = append(roles, rbac.ClusterRole{ + roles = append(roles, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"}, - Rules: []rbac.PolicyRule{ - rbac.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), - rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), + Rules: []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), }, }) } @@ -521,27 +522,27 @@ func ClusterRoles() []rbac.ClusterRole { const systemNodeRoleName = "system:node" // ClusterRoleBindings return default rolebindings to the default roles -func ClusterRoleBindings() []rbac.ClusterRoleBinding { - rolebindings := []rbac.ClusterRoleBinding{ - rbac.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(), - rbac.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), - rbac.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), - rbac.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(), - rbac.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(), - rbac.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(), - rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(), - rbac.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(), +func ClusterRoleBindings() []rbacv1.ClusterRoleBinding { + rolebindings := []rbacv1.ClusterRoleBinding{ + rbacv1helpers.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(), + rbacv1helpers.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(), // This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer. // This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject. { ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName}, - RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName}, + RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName}, }, } if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { - rolebindings = append(rolebindings, rbac.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie()) + rolebindings = append(rolebindings, rbacv1helpers.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie()) } addClusterRoleBindingLabel(rolebindings) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy_test.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy_test.go index 830d92576f9..1b024c44143 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy_test.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy_test.go @@ -34,8 +34,8 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" _ "k8s.io/kubernetes/pkg/apis/core/install" - "k8s.io/kubernetes/pkg/apis/rbac" _ "k8s.io/kubernetes/pkg/apis/rbac/install" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" ) @@ -43,12 +43,12 @@ import ( // semanticRoles is a few enumerated roles for which the relationships are well established // and we want to maintain symmetric roles type semanticRoles struct { - admin *rbac.ClusterRole - edit *rbac.ClusterRole - view *rbac.ClusterRole + admin *rbacv1.ClusterRole + edit *rbacv1.ClusterRole + view *rbacv1.ClusterRole } -func getSemanticRoles(roles []rbac.ClusterRole) semanticRoles { +func getSemanticRoles(roles []rbacv1.ClusterRole) semanticRoles { ret := semanticRoles{} for i := range roles { role := roles[i] @@ -81,10 +81,10 @@ func TestCovers(t *testing.T) { // additionalAdminPowers is the list of powers that we expect to be different than the editor role. // one resource per rule to make the "does not already contain" check easy -var additionalAdminPowers = []rbac.PolicyRule{ - rbac.NewRule("create").Groups("authorization.k8s.io").Resources("localsubjectaccessreviews").RuleOrDie(), - rbac.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(), - rbac.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("roles").RuleOrDie(), +var additionalAdminPowers = []rbacv1.PolicyRule{ + rbacv1helpers.NewRule("create").Groups("authorization.k8s.io").Resources("localsubjectaccessreviews").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("roles").RuleOrDie(), } func TestAdminEditRelationship(t *testing.T) { @@ -92,7 +92,7 @@ func TestAdminEditRelationship(t *testing.T) { // confirm that the edit role doesn't already have extra powers for _, rule := range additionalAdminPowers { - if covers, _ := rbacregistryvalidation.Covers(semanticRoles.edit.Rules, []rbac.PolicyRule{rule}); covers { + if covers, _ := rbacregistryvalidation.Covers(semanticRoles.edit.Rules, []rbacv1.PolicyRule{rule}); covers { t.Errorf("edit has extra powers: %#v", rule) } } @@ -109,19 +109,19 @@ func TestAdminEditRelationship(t *testing.T) { // viewEscalatingNamespaceResources is the list of rules that would allow privilege escalation attacks based on // ability to view (GET) them -var viewEscalatingNamespaceResources = []rbac.PolicyRule{ - rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/attach").RuleOrDie(), - rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/proxy").RuleOrDie(), - rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/exec").RuleOrDie(), - rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/portforward").RuleOrDie(), - rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("secrets").RuleOrDie(), - rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("services/proxy").RuleOrDie(), +var viewEscalatingNamespaceResources = []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/attach").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/proxy").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/exec").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/portforward").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("secrets").RuleOrDie(), + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("services/proxy").RuleOrDie(), } // ungettableResources is the list of rules that don't allow to view (GET) them // this is purposefully separate list to distinguish from escalating privs -var ungettableResources = []rbac.PolicyRule{ - rbac.NewRule(bootstrappolicy.Read...).Groups("apps", "extensions").Resources("deployments/rollback").RuleOrDie(), +var ungettableResources = []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("apps", "extensions").Resources("deployments/rollback").RuleOrDie(), } func TestEditViewRelationship(t *testing.T) { @@ -143,7 +143,7 @@ func TestEditViewRelationship(t *testing.T) { // confirm that the view role doesn't already have extra powers for _, rule := range viewEscalatingNamespaceResources { - if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbac.PolicyRule{rule}); covers { + if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbacv1.PolicyRule{rule}); covers { t.Errorf("view has extra powers: %#v", rule) } } @@ -151,7 +151,7 @@ func TestEditViewRelationship(t *testing.T) { // confirm that the view role doesn't have ungettable resources for _, rule := range ungettableResources { - if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbac.PolicyRule{rule}); covers { + if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbacv1.PolicyRule{rule}); covers { t.Errorf("view has ungettable resource: %#v", rule) } } diff --git a/plugin/pkg/auth/authorizer/rbac/rbac.go b/plugin/pkg/auth/authorizer/rbac/rbac.go index 122a10b2f3a..a0f173c393b 100644 --- a/plugin/pkg/auth/authorizer/rbac/rbac.go +++ b/plugin/pkg/auth/authorizer/rbac/rbac.go @@ -18,18 +18,18 @@ limitations under the License. package rbac import ( + "bytes" "fmt" "github.com/golang/glog" - "bytes" - + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/kubernetes/pkg/apis/rbac" - rbaclisters "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion" + rbaclisters "k8s.io/client-go/listers/rbac/v1" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" ) @@ -38,12 +38,12 @@ type RequestToRuleMapper interface { // Any rule returned is still valid, since rules are deny by default. If you can pass with the rules // supplied, you do not have to fail the request. If you cannot, you should indicate the error along // with your denial. - RulesFor(subject user.Info, namespace string) ([]rbac.PolicyRule, error) + RulesFor(subject user.Info, namespace string) ([]rbacv1.PolicyRule, error) // VisitRulesFor invokes visitor() with each rule that applies to a given user in a given namespace, // and each error encountered resolving those rules. Rule may be nil if err is non-nil. // If visitor() returns false, visiting is short-circuited. - VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool) + VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool) } type RBACAuthorizer struct { @@ -59,7 +59,7 @@ type authorizingVisitor struct { errors []error } -func (v *authorizingVisitor) visit(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool { +func (v *authorizingVisitor) visit(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool { if rule != nil && RuleAllows(v.requestAttributes, rule) { v.allowed = true v.reason = fmt.Sprintf("RBAC: allowed by %s", source.String()) @@ -164,7 +164,7 @@ func New(roles rbacregistryvalidation.RoleGetter, roleBindings rbacregistryvalid return authorizer } -func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRule) bool { +func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbacv1.PolicyRule) bool { for i := range rules { if RuleAllows(requestAttributes, &rules[i]) { return true @@ -174,28 +174,28 @@ func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRul return false } -func RuleAllows(requestAttributes authorizer.Attributes, rule *rbac.PolicyRule) bool { +func RuleAllows(requestAttributes authorizer.Attributes, rule *rbacv1.PolicyRule) bool { if requestAttributes.IsResourceRequest() { combinedResource := requestAttributes.GetResource() if len(requestAttributes.GetSubresource()) > 0 { combinedResource = requestAttributes.GetResource() + "/" + requestAttributes.GetSubresource() } - return rbac.VerbMatches(rule, requestAttributes.GetVerb()) && - rbac.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) && - rbac.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) && - rbac.ResourceNameMatches(rule, requestAttributes.GetName()) + return rbacv1helpers.VerbMatches(rule, requestAttributes.GetVerb()) && + rbacv1helpers.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) && + rbacv1helpers.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) && + rbacv1helpers.ResourceNameMatches(rule, requestAttributes.GetName()) } - return rbac.VerbMatches(rule, requestAttributes.GetVerb()) && - rbac.NonResourceURLMatches(rule, requestAttributes.GetPath()) + return rbacv1helpers.VerbMatches(rule, requestAttributes.GetVerb()) && + rbacv1helpers.NonResourceURLMatches(rule, requestAttributes.GetPath()) } type RoleGetter struct { Lister rbaclisters.RoleLister } -func (g *RoleGetter) GetRole(namespace, name string) (*rbac.Role, error) { +func (g *RoleGetter) GetRole(namespace, name string) (*rbacv1.Role, error) { return g.Lister.Roles(namespace).Get(name) } @@ -203,7 +203,7 @@ type RoleBindingLister struct { Lister rbaclisters.RoleBindingLister } -func (l *RoleBindingLister) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) { +func (l *RoleBindingLister) ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) { return l.Lister.RoleBindings(namespace).List(labels.Everything()) } @@ -211,7 +211,7 @@ type ClusterRoleGetter struct { Lister rbaclisters.ClusterRoleLister } -func (g *ClusterRoleGetter) GetClusterRole(name string) (*rbac.ClusterRole, error) { +func (g *ClusterRoleGetter) GetClusterRole(name string) (*rbacv1.ClusterRole, error) { return g.Lister.Get(name) } @@ -219,6 +219,6 @@ type ClusterRoleBindingLister struct { Lister rbaclisters.ClusterRoleBindingLister } -func (l *ClusterRoleBindingLister) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) { +func (l *ClusterRoleBindingLister) ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) { return l.Lister.List(labels.Everything()) } diff --git a/plugin/pkg/auth/authorizer/rbac/rbac_test.go b/plugin/pkg/auth/authorizer/rbac/rbac_test.go index d4425a60476..1b2a8e1ef32 100644 --- a/plugin/pkg/auth/authorizer/rbac/rbac_test.go +++ b/plugin/pkg/auth/authorizer/rbac/rbac_test.go @@ -21,16 +21,17 @@ import ( "strings" "testing" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" ) -func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRule { - return rbac.PolicyRule{ +func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbacv1.PolicyRule { + return rbacv1.PolicyRule{ Verbs: strings.Split(verbs, ","), APIGroups: strings.Split(apiGroups, ","), Resources: strings.Split(resources, ","), @@ -38,12 +39,12 @@ func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRul } } -func newRole(name, namespace string, rules ...rbac.PolicyRule) *rbac.Role { - return &rbac.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules} +func newRole(name, namespace string, rules ...rbacv1.PolicyRule) *rbacv1.Role { + return &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules} } -func newClusterRole(name string, rules ...rbac.PolicyRule) *rbac.ClusterRole { - return &rbac.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules} +func newClusterRole(name string, rules ...rbacv1.PolicyRule) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules} } const ( @@ -51,26 +52,26 @@ const ( bindToClusterRole uint16 = 0x1 ) -func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRoleBinding { - r := &rbac.ClusterRoleBinding{ +func newClusterRoleBinding(roleName string, subjects ...string) *rbacv1.ClusterRoleBinding { + r := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{}, - RoleRef: rbac.RoleRef{ - APIGroup: rbac.GroupName, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, Kind: "ClusterRole", // ClusterRoleBindings can only refer to ClusterRole Name: roleName, }, } - r.Subjects = make([]rbac.Subject, len(subjects)) + r.Subjects = make([]rbacv1.Subject, len(subjects)) for i, subject := range subjects { split := strings.SplitN(subject, ":", 2) r.Subjects[i].Kind, r.Subjects[i].Name = split[0], split[1] switch r.Subjects[i].Kind { - case rbac.ServiceAccountKind: + case rbacv1.ServiceAccountKind: r.Subjects[i].APIGroup = "" - case rbac.UserKind, rbac.GroupKind: - r.Subjects[i].APIGroup = rbac.GroupName + case rbacv1.UserKind, rbacv1.GroupKind: + r.Subjects[i].APIGroup = rbacv1.GroupName default: panic(fmt.Errorf("invalid kind %s", r.Subjects[i].Kind)) } @@ -78,26 +79,26 @@ func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRol return r } -func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbac.RoleBinding { - r := &rbac.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}} +func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbacv1.RoleBinding { + r := &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}} switch bindType { case bindToRole: - r.RoleRef = rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: roleName} + r.RoleRef = rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "Role", Name: roleName} case bindToClusterRole: - r.RoleRef = rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: roleName} + r.RoleRef = rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: roleName} } - r.Subjects = make([]rbac.Subject, len(subjects)) + r.Subjects = make([]rbacv1.Subject, len(subjects)) for i, subject := range subjects { split := strings.SplitN(subject, ":", 2) r.Subjects[i].Kind, r.Subjects[i].Name = split[0], split[1] switch r.Subjects[i].Kind { - case rbac.ServiceAccountKind: + case rbacv1.ServiceAccountKind: r.Subjects[i].APIGroup = "" - case rbac.UserKind, rbac.GroupKind: - r.Subjects[i].APIGroup = rbac.GroupName + case rbacv1.UserKind, rbacv1.GroupKind: + r.Subjects[i].APIGroup = rbacv1.GroupName default: panic(fmt.Errorf("invalid kind %s", r.Subjects[i].Kind)) } @@ -136,19 +137,19 @@ func (d *defaultAttributes) GetPath() string { return "" } func TestAuthorizer(t *testing.T) { tests := []struct { - roles []*rbac.Role - roleBindings []*rbac.RoleBinding - clusterRoles []*rbac.ClusterRole - clusterRoleBindings []*rbac.ClusterRoleBinding + roles []*rbacv1.Role + roleBindings []*rbacv1.RoleBinding + clusterRoles []*rbacv1.ClusterRole + clusterRoleBindings []*rbacv1.ClusterRoleBinding shouldPass []authorizer.Attributes shouldFail []authorizer.Attributes }{ { - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ newClusterRole("admin", newRule("*", "*", "*", "*")), }, - roleBindings: []*rbac.RoleBinding{ + roleBindings: []*rbacv1.RoleBinding{ newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"), }, shouldPass: []authorizer.Attributes{ @@ -167,12 +168,12 @@ func TestAuthorizer(t *testing.T) { }, { // Non-resource-url tests - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ newClusterRole("non-resource-url-getter", newRule("get", "", "", "/apis")), newClusterRole("non-resource-url", newRule("*", "", "", "/apis")), newClusterRole("non-resource-url-prefix", newRule("get", "", "", "/apis/*")), }, - clusterRoleBindings: []*rbac.ClusterRoleBinding{ + clusterRoleBindings: []*rbacv1.ClusterRoleBinding{ newClusterRoleBinding("non-resource-url-getter", "User:foo", "Group:bar"), newClusterRoleBinding("non-resource-url", "User:admin", "Group:admin"), newClusterRoleBinding("non-resource-url-prefix", "User:prefixed", "Group:prefixed"), @@ -208,10 +209,10 @@ func TestAuthorizer(t *testing.T) { }, { // test subresource resolution - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ newClusterRole("admin", newRule("*", "*", "pods", "*")), }, - roleBindings: []*rbac.RoleBinding{ + roleBindings: []*rbacv1.RoleBinding{ newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"), }, shouldPass: []authorizer.Attributes{ @@ -223,13 +224,13 @@ func TestAuthorizer(t *testing.T) { }, { // test subresource resolution - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ newClusterRole("admin", newRule("*", "*", "pods/status", "*"), newRule("*", "*", "*/scale", "*"), ), }, - roleBindings: []*rbac.RoleBinding{ + roleBindings: []*rbacv1.RoleBinding{ newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"), }, shouldPass: []authorizer.Attributes{ @@ -263,13 +264,13 @@ func TestAuthorizer(t *testing.T) { func TestRuleMatches(t *testing.T) { tests := []struct { name string - rule rbac.PolicyRule + rule rbacv1.PolicyRule requestsToExpected map[authorizer.AttributesRecord]bool }{ { name: "star verb, exact match other", - rule: rbac.NewRule("*").Groups("group1").Resources("resource1").RuleOrDie(), + rule: rbacv1helpers.NewRule("*").Groups("group1").Resources("resource1").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ resourceRequest("verb1").Group("group1").Resource("resource1").New(): true, resourceRequest("verb1").Group("group2").Resource("resource1").New(): false, @@ -283,7 +284,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "star group, exact match other", - rule: rbac.NewRule("verb1").Groups("*").Resources("resource1").RuleOrDie(), + rule: rbacv1helpers.NewRule("verb1").Groups("*").Resources("resource1").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ resourceRequest("verb1").Group("group1").Resource("resource1").New(): true, resourceRequest("verb1").Group("group2").Resource("resource1").New(): true, @@ -297,7 +298,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "star resource, exact match other", - rule: rbac.NewRule("verb1").Groups("group1").Resources("*").RuleOrDie(), + rule: rbacv1helpers.NewRule("verb1").Groups("group1").Resources("*").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ resourceRequest("verb1").Group("group1").Resource("resource1").New(): true, resourceRequest("verb1").Group("group2").Resource("resource1").New(): false, @@ -311,7 +312,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "tuple expansion", - rule: rbac.NewRule("verb1", "verb2").Groups("group1", "group2").Resources("resource1", "resource2").RuleOrDie(), + rule: rbacv1helpers.NewRule("verb1", "verb2").Groups("group1", "group2").Resources("resource1", "resource2").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ resourceRequest("verb1").Group("group1").Resource("resource1").New(): true, resourceRequest("verb1").Group("group2").Resource("resource1").New(): true, @@ -325,7 +326,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "subresource expansion", - rule: rbac.NewRule("*").Groups("*").Resources("resource1/subresource1").RuleOrDie(), + rule: rbacv1helpers.NewRule("*").Groups("*").Resources("resource1/subresource1").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ resourceRequest("verb1").Group("group1").Resource("resource1").Subresource("subresource1").New(): true, resourceRequest("verb1").Group("group2").Resource("resource1").Subresource("subresource2").New(): false, @@ -339,7 +340,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "star nonresource, exact match other", - rule: rbac.NewRule("verb1").URLs("*").RuleOrDie(), + rule: rbacv1helpers.NewRule("verb1").URLs("*").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ nonresourceRequest("verb1").URL("/foo").New(): true, nonresourceRequest("verb1").URL("/foo/bar").New(): true, @@ -355,7 +356,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "star nonresource subpath", - rule: rbac.NewRule("verb1").URLs("/foo/*").RuleOrDie(), + rule: rbacv1helpers.NewRule("verb1").URLs("/foo/*").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ nonresourceRequest("verb1").URL("/foo").New(): false, nonresourceRequest("verb1").URL("/foo/bar").New(): true, @@ -371,7 +372,7 @@ func TestRuleMatches(t *testing.T) { }, { name: "star verb, exact nonresource", - rule: rbac.NewRule("*").URLs("/foo", "/foo/bar/one").RuleOrDie(), + rule: rbacv1helpers.NewRule("*").URLs("/foo", "/foo/bar/one").RuleOrDie(), requestsToExpected: map[authorizer.AttributesRecord]bool{ nonresourceRequest("verb1").URL("/foo").New(): true, nonresourceRequest("verb1").URL("/foo/bar").New(): false, @@ -441,19 +442,19 @@ func (r *requestAttributeBuilder) New() authorizer.AttributesRecord { } func BenchmarkAuthorize(b *testing.B) { - bootstrapRoles := []rbac.ClusterRole{} + bootstrapRoles := []rbacv1.ClusterRole{} bootstrapRoles = append(bootstrapRoles, bootstrappolicy.ControllerRoles()...) bootstrapRoles = append(bootstrapRoles, bootstrappolicy.ClusterRoles()...) - bootstrapBindings := []rbac.ClusterRoleBinding{} + bootstrapBindings := []rbacv1.ClusterRoleBinding{} bootstrapBindings = append(bootstrapBindings, bootstrappolicy.ClusterRoleBindings()...) bootstrapBindings = append(bootstrapBindings, bootstrappolicy.ControllerRoleBindings()...) - clusterRoles := []*rbac.ClusterRole{} + clusterRoles := []*rbacv1.ClusterRole{} for i := range bootstrapRoles { clusterRoles = append(clusterRoles, &bootstrapRoles[i]) } - clusterRoleBindings := []*rbac.ClusterRoleBinding{} + clusterRoleBindings := []*rbacv1.ClusterRoleBinding{} for i := range bootstrapBindings { clusterRoleBindings = append(clusterRoleBindings, &bootstrapBindings[i]) } diff --git a/plugin/pkg/auth/authorizer/rbac/subject_locator.go b/plugin/pkg/auth/authorizer/rbac/subject_locator.go index 0f5f413b9a3..cdd327e5b7c 100644 --- a/plugin/pkg/auth/authorizer/rbac/subject_locator.go +++ b/plugin/pkg/auth/authorizer/rbac/subject_locator.go @@ -18,21 +18,21 @@ limitations under the License. package rbac import ( + rbacv1 "k8s.io/api/rbac/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/kubernetes/pkg/apis/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" ) type RoleToRuleMapper interface { // GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namespace // of the role binding, the empty string if a cluster role binding. - GetRoleReferenceRules(roleRef rbac.RoleRef, namespace string) ([]rbac.PolicyRule, error) + GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error) } type SubjectLocator interface { - AllowedSubjects(attributes authorizer.Attributes) ([]rbac.Subject, error) + AllowedSubjects(attributes authorizer.Attributes) ([]rbacv1.Subject, error) } var _ = SubjectLocator(&SubjectAccessEvaluator{}) @@ -59,10 +59,10 @@ func NewSubjectAccessEvaluator(roles rbacregistryvalidation.RoleGetter, roleBind // AllowedSubjects returns the subjects that can perform an action and any errors encountered while computing the list. // It is possible to have both subjects and errors returned if some rolebindings couldn't be resolved, but others could be. -func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.Attributes) ([]rbac.Subject, error) { - subjects := []rbac.Subject{{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}} +func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.Attributes) ([]rbacv1.Subject, error) { + subjects := []rbacv1.Subject{{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}} if len(r.superUser) > 0 { - subjects = append(subjects, rbac.Subject{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: r.superUser}) + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: r.superUser}) } errorlist := []error{} @@ -104,7 +104,7 @@ func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.At } } - dedupedSubjects := []rbac.Subject{} + dedupedSubjects := []rbacv1.Subject{} for _, subject := range subjects { found := false for _, curr := range dedupedSubjects { diff --git a/plugin/pkg/auth/authorizer/rbac/subject_locator_test.go b/plugin/pkg/auth/authorizer/rbac/subject_locator_test.go index 1182a28b548..d798494885a 100644 --- a/plugin/pkg/auth/authorizer/rbac/subject_locator_test.go +++ b/plugin/pkg/auth/authorizer/rbac/subject_locator_test.go @@ -20,24 +20,24 @@ import ( "reflect" "testing" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - "k8s.io/kubernetes/pkg/apis/rbac" rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" ) func TestSubjectLocator(t *testing.T) { type actionToSubjects struct { action authorizer.Attributes - subjects []rbac.Subject + subjects []rbacv1.Subject } tests := []struct { name string - roles []*rbac.Role - roleBindings []*rbac.RoleBinding - clusterRoles []*rbac.ClusterRole - clusterRoleBindings []*rbac.ClusterRoleBinding + roles []*rbacv1.Role + roleBindings []*rbacv1.RoleBinding + clusterRoles []*rbacv1.ClusterRole + clusterRoleBindings []*rbacv1.ClusterRoleBinding superUser string @@ -45,42 +45,42 @@ func TestSubjectLocator(t *testing.T) { }{ { name: "no super user, star matches star", - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ newClusterRole("admin", newRule("*", "*", "*", "*")), }, - clusterRoleBindings: []*rbac.ClusterRoleBinding{ + clusterRoleBindings: []*rbacv1.ClusterRoleBinding{ newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"), }, - roleBindings: []*rbac.RoleBinding{ + roleBindings: []*rbacv1.RoleBinding{ newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"), }, actionsToSubjects: []actionToSubjects{ { &defaultAttributes{"", "", "get", "Pods", "", "ns1", ""}, - []rbac.Subject{ - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "admins"}, + []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "admins"}, }, }, { // cluster role matches star in namespace &defaultAttributes{"", "", "*", "Pods", "", "*", ""}, - []rbac.Subject{ - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"}, + []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"}, }, }, { // empty ns &defaultAttributes{"", "", "*", "Pods", "", "", ""}, - []rbac.Subject{ - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"}, + []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"}, }, }, }, @@ -88,48 +88,48 @@ func TestSubjectLocator(t *testing.T) { { name: "super user, local roles work", superUser: "foo", - clusterRoles: []*rbac.ClusterRole{ + clusterRoles: []*rbacv1.ClusterRole{ newClusterRole("admin", newRule("*", "*", "*", "*")), }, - clusterRoleBindings: []*rbac.ClusterRoleBinding{ + clusterRoleBindings: []*rbacv1.ClusterRoleBinding{ newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"), }, - roles: []*rbac.Role{ + roles: []*rbacv1.Role{ newRole("admin", "ns1", newRule("get", "*", "Pods", "*")), }, - roleBindings: []*rbac.RoleBinding{ + roleBindings: []*rbacv1.RoleBinding{ newRoleBinding("ns1", "admin", bindToRole, "User:admin", "Group:admins"), }, actionsToSubjects: []actionToSubjects{ { &defaultAttributes{"", "", "get", "Pods", "", "ns1", ""}, - []rbac.Subject{ - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "admins"}, + []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "admins"}, }, }, { // verb matchies correctly &defaultAttributes{"", "", "create", "Pods", "", "ns1", ""}, - []rbac.Subject{ - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"}, + []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"}, }, }, { // binding only works in correct ns &defaultAttributes{"", "", "get", "Pods", "", "ns2", ""}, - []rbac.Subject{ - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"}, - {Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"}, - {Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"}, + []rbacv1.Subject{ + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"}, + {Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"}, + {Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"}, }, }, }, diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 387d3bb4d47..f7d6e50d9cd 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -199,6 +199,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv closeFn() glog.Fatal(err) } + var lastHealthContent []byte err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { result := privilegedClient.Get().AbsPath("/healthz").Do() status := 0 @@ -206,10 +207,12 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv if status == 200 { return true, nil } + lastHealthContent, _ = result.Raw() return false, nil }) if err != nil { closeFn() + glog.Errorf("last health content: %q", string(lastHealthContent)) glog.Fatal(err) } From ff743c7f4f85e7277dc137937f3616c6e0d93a04 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 22 May 2018 08:16:53 -0400 Subject: [PATCH 093/307] generated --- pkg/apis/rbac/v1/zz_generated.deepcopy.go | 75 ++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) diff --git a/pkg/apis/rbac/v1/zz_generated.deepcopy.go b/pkg/apis/rbac/v1/zz_generated.deepcopy.go index 1e699e99228..0a68a23434f 100644 --- a/pkg/apis/rbac/v1/zz_generated.deepcopy.go +++ b/pkg/apis/rbac/v1/zz_generated.deepcopy.go @@ -16,6 +16,79 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by conversion-gen. DO NOT EDIT. +// Code generated by deepcopy-gen. DO NOT EDIT. package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingBuilder) DeepCopyInto(out *ClusterRoleBindingBuilder) { + *out = *in + in.ClusterRoleBinding.DeepCopyInto(&out.ClusterRoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingBuilder. +func (in *ClusterRoleBindingBuilder) DeepCopy() *ClusterRoleBindingBuilder { + if in == nil { + return nil + } + out := new(ClusterRoleBindingBuilder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRuleBuilder) DeepCopyInto(out *PolicyRuleBuilder) { + *out = *in + in.PolicyRule.DeepCopyInto(&out.PolicyRule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRuleBuilder. +func (in *PolicyRuleBuilder) DeepCopy() *PolicyRuleBuilder { + if in == nil { + return nil + } + out := new(PolicyRuleBuilder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingBuilder) DeepCopyInto(out *RoleBindingBuilder) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingBuilder. +func (in *RoleBindingBuilder) DeepCopy() *RoleBindingBuilder { + if in == nil { + return nil + } + out := new(RoleBindingBuilder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in SortableRuleSlice) DeepCopyInto(out *SortableRuleSlice) { + { + in := &in + *out = make(SortableRuleSlice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SortableRuleSlice. +func (in SortableRuleSlice) DeepCopy() SortableRuleSlice { + if in == nil { + return nil + } + out := new(SortableRuleSlice) + in.DeepCopyInto(out) + return *out +} From 76794643c574a0ac4d9efb51515404f3b423feea Mon Sep 17 00:00:00 2001 From: David Eads Date: Fri, 18 May 2018 08:12:55 -0400 Subject: [PATCH 094/307] add wait --- build/visible_to/BUILD | 3 + hack/.golint_failures | 1 + hack/make-rules/test-cmd-util.sh | 6 +- pkg/kubectl/cmd/BUILD | 2 + pkg/kubectl/cmd/apply.go | 6 +- pkg/kubectl/cmd/apply_test.go | 10 + pkg/kubectl/cmd/cmd.go | 2 + pkg/kubectl/cmd/delete.go | 52 +- pkg/kubectl/cmd/delete_flags.go | 6 +- pkg/kubectl/cmd/replace.go | 6 +- pkg/kubectl/cmd/run.go | 2 +- pkg/kubectl/cmd/run_test.go | 4 +- pkg/kubectl/cmd/wait/BUILD | 60 +++ pkg/kubectl/cmd/wait/fakeresourcefinder.go | 54 ++ pkg/kubectl/cmd/wait/flags.go | 114 +++++ pkg/kubectl/cmd/wait/wait.go | 330 ++++++++++++ pkg/kubectl/cmd/wait/wait_test.go | 477 ++++++++++++++++++ .../genericclioptions/resource/interfaces.go | 12 + .../genericclioptions/resource/visitor.go | 12 - 19 files changed, 1135 insertions(+), 24 deletions(-) create mode 100644 pkg/kubectl/cmd/wait/BUILD create mode 100644 pkg/kubectl/cmd/wait/fakeresourcefinder.go create mode 100644 pkg/kubectl/cmd/wait/flags.go create mode 100644 pkg/kubectl/cmd/wait/wait.go create mode 100644 pkg/kubectl/cmd/wait/wait_test.go diff --git a/build/visible_to/BUILD b/build/visible_to/BUILD index 752599ee075..8e883787192 100644 --- a/build/visible_to/BUILD +++ b/build/visible_to/BUILD @@ -177,6 +177,7 @@ package_group( "//pkg/kubectl/cmd/templates", "//pkg/kubectl/cmd/util", "//pkg/kubectl/cmd/util/sanity", + "//pkg/kubectl/cmd/wait", ], ) @@ -196,6 +197,7 @@ package_group( "//pkg/kubectl/cmd/get", "//pkg/kubectl/cmd/rollout", "//pkg/kubectl/cmd/set", + "//pkg/kubectl/cmd/wait", "//pkg/kubectl/explain", ], ) @@ -230,6 +232,7 @@ package_group( "//pkg/kubectl/cmd/testing", "//pkg/kubectl/cmd/util", "//pkg/kubectl/cmd/util/editor", + "//pkg/kubectl/cmd/wait", ], ) diff --git a/hack/.golint_failures b/hack/.golint_failures index e0d0e58722d..2b2798e46a7 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -150,6 +150,7 @@ pkg/kubectl/cmd/util pkg/kubectl/cmd/util/editor pkg/kubectl/cmd/util/jsonmerge pkg/kubectl/cmd/util/sanity +pkg/kubectl/cmd/wait pkg/kubectl/genericclioptions pkg/kubectl/genericclioptions/printers pkg/kubectl/genericclioptions/resource diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index bba8d4656cc..216bb51f951 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -2382,7 +2382,11 @@ run_namespace_tests() { # Post-condition: namespace 'my-namespace' is created. kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace' # Clean up - kubectl delete namespace my-namespace + kubectl delete namespace my-namespace --wait=false + # make sure that wait properly waits for finalization + kubectl wait --for=delete ns/my-namespace + output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" ' not found' ###################### # Pods in Namespaces # diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 44c1e63bc5f..ca7d1d13cc8 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -77,6 +77,7 @@ go_library( "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/cmd/util/editor:go_default_library", "//pkg/kubectl/cmd/util/openapi:go_default_library", + "//pkg/kubectl/cmd/wait:go_default_library", "//pkg/kubectl/explain:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/printers:go_default_library", @@ -264,6 +265,7 @@ filegroup( "//pkg/kubectl/cmd/testdata/edit:all-srcs", "//pkg/kubectl/cmd/testing:all-srcs", "//pkg/kubectl/cmd/util:all-srcs", + "//pkg/kubectl/cmd/wait:all-srcs", ], tags = ["automanaged"], visibility = [ diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index bf112f3db3a..96b0298d288 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -207,7 +207,11 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } - o.DeleteOptions = o.DeleteFlags.ToOptions(o.IOStreams) + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + o.DeleteOptions = o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams) o.ShouldIncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, o.Prune) o.OpenAPISchema, _ = f.OpenAPISchema() diff --git a/pkg/kubectl/cmd/apply_test.go b/pkg/kubectl/cmd/apply_test.go index 283266f5381..52593f2391b 100644 --- a/pkg/kubectl/cmd/apply_test.go +++ b/pkg/kubectl/cmd/apply_test.go @@ -523,6 +523,7 @@ func TestApplyObject(t *testing.T) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -587,6 +588,7 @@ func TestApplyObjectOutput(t *testing.T) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -648,6 +650,7 @@ func TestApplyRetry(t *testing.T) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -697,6 +700,7 @@ func TestApplyNonExistObject(t *testing.T) { }), } tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -749,6 +753,7 @@ func TestApplyEmptyPatch(t *testing.T) { }), } tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() // 1. apply non exist object ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -823,6 +828,7 @@ func testApplyMultipleObjects(t *testing.T, asList bool) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -923,6 +929,7 @@ func TestApplyNULLPreservation(t *testing.T) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -989,6 +996,7 @@ func TestUnstructuredApply(t *testing.T) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -1054,6 +1062,7 @@ func TestUnstructuredIdempotentApply(t *testing.T) { } tf.OpenAPISchemaFunc = fn tf.Namespace = "test" + tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) @@ -1223,6 +1232,7 @@ func TestForceApply(t *testing.T) { tf := cmdtesting.NewTestFactory() defer tf.Cleanup() + tf.ClientConfigVal = defaultClientConfig() tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 623b0e6e400..4dad3727855 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/cmd/set" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/cmd/wait" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "github.com/spf13/cobra" @@ -362,6 +363,7 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { NewCmdApply("kubectl", f, ioStreams), NewCmdPatch(f, ioStreams), NewCmdReplace(f, ioStreams), + wait.NewCmdWait(f, ioStreams), NewCmdConvert(f, ioStreams), }, }, diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 8773d2e3b40..6fac3d34c2f 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -21,15 +21,18 @@ import ( "strings" "time" + "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + kubectlwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/util/i18n" @@ -106,8 +109,9 @@ type DeleteOptions struct { Output string - Mapper meta.RESTMapper - Result *resource.Result + DynamicClient dynamic.Interface + Mapper meta.RESTMapper + Result *resource.Result genericclioptions.IOStreams } @@ -122,7 +126,7 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra Long: delete_long, Example: delete_example, Run: func(cmd *cobra.Command, args []string) { - o := deleteFlags.ToOptions(streams) + o := deleteFlags.ToOptions(nil, streams) if err := o.Complete(f, args, cmd); err != nil { cmdutil.CheckErr(err) } @@ -138,6 +142,8 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra deleteFlags.AddFlags(cmd) + cmd.Flags().Bool("wait", true, `If true, wait for resources to be gone before returning. This waits for finalizers.`) + cmdutil.AddIncludeUninitializedFlag(cmd) return cmd } @@ -167,6 +173,9 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co o.WaitForDeletion = true o.GracePeriod = 1 } + if b, err := cmd.Flags().GetBool("wait"); err == nil { + o.WaitForDeletion = b + } o.Reaper = f.Reaper @@ -194,6 +203,11 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co return err } + o.DynamicClient, err = f.DynamicClient() + if err != nil { + return err + } + return nil } @@ -300,8 +314,38 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { } if found == 0 { fmt.Fprintf(o.Out, "No resources found\n") + return nil } - return nil + if !o.WaitForDeletion { + return nil + } + // if we don't have a dynamic client, we don't want to wait. Eventually when delete is cleaned up, this will likely + // drop out. + if o.DynamicClient == nil { + return nil + } + + effectiveTimeout := o.Timeout + if effectiveTimeout == 0 { + // if we requested to wait forever, set it to a week. + effectiveTimeout = 168 * time.Hour + } + waitOptions := kubectlwait.WaitOptions{ + ResourceFinder: kubectlwait.ResourceFinderForResult(o.Result), + DynamicClient: o.DynamicClient, + Timeout: effectiveTimeout, + + Printer: kubectlwait.NewDiscardingPrinter(), + ConditionFn: kubectlwait.IsDeleted, + IOStreams: o.IOStreams, + } + err = waitOptions.RunWait() + if errors.IsForbidden(err) { + // if we're forbidden from waiting, we shouldn't fail. + glog.V(1).Info(err) + return nil + } + return err } func (o *DeleteOptions) cascadingDeleteResource(info *resource.Info) error { diff --git a/pkg/kubectl/cmd/delete_flags.go b/pkg/kubectl/cmd/delete_flags.go index 6e65f166964..1a5a6db3e2c 100644 --- a/pkg/kubectl/cmd/delete_flags.go +++ b/pkg/kubectl/cmd/delete_flags.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/cobra" + "k8s.io/client-go/dynamic" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" @@ -72,9 +73,10 @@ type DeleteFlags struct { Output *string } -func (f *DeleteFlags) ToOptions(streams genericclioptions.IOStreams) *DeleteOptions { +func (f *DeleteFlags) ToOptions(dynamicClient dynamic.Interface, streams genericclioptions.IOStreams) *DeleteOptions { options := &DeleteOptions{ - IOStreams: streams, + DynamicClient: dynamicClient, + IOStreams: streams, } // add filename options diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 70131091f57..aca20204846 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -150,7 +150,11 @@ func (o *ReplaceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] return printer.PrintObj(obj, o.Out) } - deleteOpts := o.DeleteFlags.ToOptions(o.IOStreams) + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + deleteOpts := o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams) //Replace will create a resource if it doesn't exist already, so ignore not found error deleteOpts.IgnoreNotFound = true diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index e19fea9f546..bd7d4edb721 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -223,7 +223,7 @@ func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return printer.PrintObj(obj, o.Out) } - deleteOpts := o.DeleteFlags.ToOptions(o.IOStreams) + deleteOpts := o.DeleteFlags.ToOptions(o.DynamicClient, o.IOStreams) deleteOpts.IgnoreNotFound = true deleteOpts.WaitForDeletion = false deleteOpts.GracePeriod = -1 diff --git a/pkg/kubectl/cmd/run_test.go b/pkg/kubectl/cmd/run_test.go index a628412afb0..b4a6f6850e6 100644 --- a/pkg/kubectl/cmd/run_test.go +++ b/pkg/kubectl/cmd/run_test.go @@ -207,7 +207,7 @@ func TestRunArgsFollowDashRules(t *testing.T) { deleteFlags := NewDeleteFlags("to use to replace the resource.") opts := &RunOptions{ PrintFlags: printFlags, - DeleteOptions: deleteFlags.ToOptions(genericclioptions.NewTestIOStreamsDiscard()), + DeleteOptions: deleteFlags.ToOptions(nil, genericclioptions.NewTestIOStreamsDiscard()), IOStreams: genericclioptions.NewTestIOStreamsDiscard(), @@ -376,7 +376,7 @@ func TestGenerateService(t *testing.T) { deleteFlags := NewDeleteFlags("to use to replace the resource.") opts := &RunOptions{ PrintFlags: printFlags, - DeleteOptions: deleteFlags.ToOptions(genericclioptions.NewTestIOStreamsDiscard()), + DeleteOptions: deleteFlags.ToOptions(nil, genericclioptions.NewTestIOStreamsDiscard()), IOStreams: ioStreams, diff --git a/pkg/kubectl/cmd/wait/BUILD b/pkg/kubectl/cmd/wait/BUILD new file mode 100644 index 00000000000..621977eda94 --- /dev/null +++ b/pkg/kubectl/cmd/wait/BUILD @@ -0,0 +1,60 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "fakeresourcefinder.go", + "flags.go", + "wait.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/wait", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubectl/cmd/util:go_default_library", + "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", + "//pkg/kubectl/genericclioptions/resource:go_default_library", + "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["wait_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/resource:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/dynamic/fake:go_default_library", + "//vendor/k8s.io/client-go/testing:go_default_library", + ], +) diff --git a/pkg/kubectl/cmd/wait/fakeresourcefinder.go b/pkg/kubectl/cmd/wait/fakeresourcefinder.go new file mode 100644 index 00000000000..591dea27ef4 --- /dev/null +++ b/pkg/kubectl/cmd/wait/fakeresourcefinder.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" +) + +// NewSimpleResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided +func NewSimpleResourceFinder(infos ...*resource.Info) ResourceFinder { + return &fakeResourceFinder{ + Infos: infos, + } +} + +type fakeResourceFinder struct { + Infos []*resource.Info +} + +// Do implements the interface +func (f *fakeResourceFinder) Do() resource.Visitor { + return &fakeResourceResult{ + Infos: f.Infos, + } +} + +type fakeResourceResult struct { + Infos []*resource.Info +} + +// Visit just iterates over info +func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error { + for _, info := range r.Infos { + err := fn(info, nil) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/kubectl/cmd/wait/flags.go b/pkg/kubectl/cmd/wait/flags.go new file mode 100644 index 00000000000..824a8af8db6 --- /dev/null +++ b/pkg/kubectl/cmd/wait/flags.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" +) + +// ResourceBuilderFlags are flags for finding resources +type ResourceBuilderFlags struct { + FilenameOptions resource.FilenameOptions + + LabelSelector string + FieldSelector string + AllNamespaces bool + Namespace string + ExplicitNamespace bool + + // TODO add conditional support. These are false for now. + All bool + Local bool +} + +// NewResourceBuilderFlags returns a default ResourceBuilderFlags +func NewResourceBuilderFlags() *ResourceBuilderFlags { + return &ResourceBuilderFlags{ + FilenameOptions: resource.FilenameOptions{ + Recursive: true, + }, + } +} + +// AddFlags registers flags for finding resources +func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { + flagset.StringSliceVarP(&o.FilenameOptions.Filenames, "filename", "f", o.FilenameOptions.Filenames, "Filename, directory, or URL to files identifying the resource.") + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flagset.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) + flagset.BoolVar(&o.FilenameOptions.Recursive, "recursive", o.FilenameOptions.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") + + flagset.StringVarP(&o.LabelSelector, "selector", "l", o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") + flagset.StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + flagset.BoolVar(&o.AllNamespaces, "all-namespaces", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") +} + +// ToBuilder gives you back a resource finder to visit resources that are located +func (o *ResourceBuilderFlags) ToBuilder(restClientGetter genericclioptions.RESTClientGetter, resources []string) ResourceFinder { + namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() + + return &ResourceFindBuilderWrapper{ + builder: resource.NewBuilder(restClientGetter). + Unstructured(). + NamespaceParam(namespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + LabelSelectorParam(o.LabelSelector). + FieldSelectorParam(o.FieldSelector). + ResourceTypeOrNameArgs(o.All, resources...). + Latest(). + Flatten(). + AddError(namespaceErr), + } +} + +// ResourceFindBuilderWrapper wraps a builder in an interface +type ResourceFindBuilderWrapper struct { + builder *resource.Builder +} + +// Do finds you resources to check +func (b *ResourceFindBuilderWrapper) Do() resource.Visitor { + return b.builder.Do() +} + +// ResourceFinder allows mocking the resource builder +// TODO resource builders needs to become more interfacey +type ResourceFinder interface { + Do() resource.Visitor +} + +// ResourceFinderFunc is a handy way to make a ResourceFinder +type ResourceFinderFunc func() resource.Visitor + +// Do implements ResourceFinder +func (fn ResourceFinderFunc) Do() resource.Visitor { + return fn() +} + +// ResourceFinderForResult skins a visitor for re-use as a ResourceFinder +func ResourceFinderForResult(result resource.Visitor) ResourceFinder { + return ResourceFinderFunc(func() resource.Visitor { + return result + }) +} diff --git a/pkg/kubectl/cmd/wait/wait.go b/pkg/kubectl/cmd/wait/wait.go new file mode 100644 index 00000000000..6838a8b91d1 --- /dev/null +++ b/pkg/kubectl/cmd/wait/wait.go @@ -0,0 +1,330 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "fmt" + "io" + "strings" + "time" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" +) + +// WaitFlags directly reflect the information that CLI is gathering via flags. They will be converted to Options, which +// reflect the runtime requirements for the command. This structure reduces the transformation to wiring and makes +// the logic itself easy to unit test +type WaitFlags struct { + RESTClientGetter genericclioptions.RESTClientGetter + PrintFlags *genericclioptions.PrintFlags + ResourceBuilderFlags *ResourceBuilderFlags + + Timeout time.Duration + ForCondition string + + genericclioptions.IOStreams +} + +// NewWaitFlags returns a default WaitFlags +func NewWaitFlags(restClientGetter genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *WaitFlags { + return &WaitFlags{ + RESTClientGetter: restClientGetter, + PrintFlags: genericclioptions.NewPrintFlags("condition met"), + ResourceBuilderFlags: NewResourceBuilderFlags(), + + Timeout: 30 * time.Second, + + IOStreams: streams, + } +} + +// NewCmdWait returns a cobra command for waiting +func NewCmdWait(restClientGetter genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *cobra.Command { + flags := NewWaitFlags(restClientGetter, streams) + + cmd := &cobra.Command{ + Use: "wait resource.group/name [--for=delete|--for condition=available]", + DisableFlagsInUseLine: true, + Short: "Wait for one condition on one or many resources", + Run: func(cmd *cobra.Command, args []string) { + o, err := flags.ToOptions(args) + cmdutil.CheckErr(err) + err = o.RunWait() + cmdutil.CheckErr(err) + }, + SuggestFor: []string{"list", "ps"}, + } + + flags.AddFlags(cmd) + + return cmd +} + +// AddFlags registers flags for a cli +func (flags *WaitFlags) AddFlags(cmd *cobra.Command) { + flags.PrintFlags.AddFlags(cmd) + flags.ResourceBuilderFlags.AddFlags(cmd.Flags()) + + cmd.Flags().DurationVar(&flags.Timeout, "timeout", flags.Timeout, "The length of time to wait before giving up. Zero means check once and don't wait, negative means wait for a week.") + cmd.Flags().StringVar(&flags.ForCondition, "for", flags.ForCondition, "The condition to wait on: [delete|condition=condition-name].") +} + +// ToOptions converts from CLI inputs to runtime inputs +func (flags *WaitFlags) ToOptions(args []string) (*WaitOptions, error) { + printer, err := flags.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + builder := flags.ResourceBuilderFlags.ToBuilder(flags.RESTClientGetter, args) + clientConfig, err := flags.RESTClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + conditionFn, err := conditionFuncFor(flags.ForCondition) + if err != nil { + return nil, err + } + + effectiveTimeout := flags.Timeout + if effectiveTimeout < 0 { + effectiveTimeout = 168 * time.Hour + } + + o := &WaitOptions{ + ResourceFinder: builder, + DynamicClient: dynamicClient, + Timeout: effectiveTimeout, + + Printer: printer, + ConditionFn: conditionFn, + IOStreams: flags.IOStreams, + } + + return o, nil +} + +func conditionFuncFor(condition string) (ConditionFunc, error) { + if strings.ToLower(condition) == "delete" { + return IsDeleted, nil + } + if strings.HasPrefix(condition, "condition=") { + conditionName := condition[len("condition="):] + return ConditionalWait{ + conditionName: conditionName, + // TODO allow specifying a false + conditionStatus: "true", + }.IsConditionMet, nil + } + + return nil, fmt.Errorf("unrecognized condition: %q", condition) +} + +// WaitOptions is a set of options that allows you to wait. This is the object reflects the runtime needs of a wait +// command, making the logic itself easy to unit test with our existing mocks. +type WaitOptions struct { + ResourceFinder ResourceFinder + DynamicClient dynamic.Interface + Timeout time.Duration + + Printer printers.ResourcePrinter + ConditionFn ConditionFunc + genericclioptions.IOStreams +} + +// ConditionFunc is the interface for providing condition checks +type ConditionFunc func(info *resource.Info, o *WaitOptions) (finalObject runtime.Object, done bool, err error) + +// RunWait runs the waiting logic +func (o *WaitOptions) RunWait() error { + return o.ResourceFinder.Do().Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + finalObject, success, err := o.ConditionFn(info, o) + if success { + o.Printer.PrintObj(finalObject, o.Out) + return nil + } + if err == nil { + return fmt.Errorf("%v unsatisified for unknown reason", finalObject) + } + return err + }) +} + +// IsDeleted is a condition func for waiting for something to be deleted +func IsDeleted(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + endTime := time.Now().Add(o.Timeout) + for { + gottenObj, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(info.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return info.Object, true, nil + } + if err != nil { + // TODO this could do something slightly fancier if we wish + return info.Object, false, err + } + + watchOptions := metav1.ListOptions{} + watchOptions.FieldSelector = "metadata.name=" + info.Name + watchOptions.ResourceVersion = gottenObj.GetResourceVersion() + objWatch, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(watchOptions) + if err != nil { + return gottenObj, false, err + } + + timeout := endTime.Sub(time.Now()) + if timeout < 0 { + // we're out of time + return gottenObj, false, wait.ErrWaitTimeout + } + watchEvent, err := watch.Until(o.Timeout, objWatch, isDeleted) + switch { + case err == nil: + return watchEvent.Object, true, nil + case err == watch.ErrWatchClosed: + continue + case err == wait.ErrWaitTimeout: + if watchEvent != nil { + return watchEvent.Object, false, wait.ErrWaitTimeout + } + return gottenObj, false, wait.ErrWaitTimeout + default: + return gottenObj, false, err + } + } +} + +func isDeleted(event watch.Event) (bool, error) { + return event.Type == watch.Deleted, nil +} + +// ConditionalWait hold information to check an API status condition +type ConditionalWait struct { + conditionName string + conditionStatus string +} + +// IsConditionMet is a conditionfunc for waiting on an API condition to be met +func (w ConditionalWait) IsConditionMet(info *resource.Info, o *WaitOptions) (runtime.Object, bool, error) { + endTime := time.Now().Add(o.Timeout) + for { + resourceVersion := "" + gottenObj, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Get(info.Name, metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + resourceVersion = "0" + case err != nil: + return info.Object, false, err + default: + conditionMet, err := w.checkCondition(gottenObj) + if conditionMet { + return gottenObj, true, nil + } + if err != nil { + return gottenObj, false, err + } + resourceVersion = gottenObj.GetResourceVersion() + } + + watchOptions := metav1.ListOptions{} + watchOptions.FieldSelector = "metadata.name=" + info.Name + watchOptions.ResourceVersion = resourceVersion + objWatch, err := o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(watchOptions) + if err != nil { + return gottenObj, false, err + } + + timeout := endTime.Sub(time.Now()) + if timeout < 0 { + // we're out of time + return gottenObj, false, wait.ErrWaitTimeout + } + watchEvent, err := watch.Until(o.Timeout, objWatch, w.isConditionMet) + switch { + case err == nil: + return watchEvent.Object, true, nil + case err == watch.ErrWatchClosed: + continue + case err == wait.ErrWaitTimeout: + if watchEvent != nil { + return watchEvent.Object, false, wait.ErrWaitTimeout + } + return gottenObj, false, wait.ErrWaitTimeout + default: + return gottenObj, false, err + } + } +} + +func (w ConditionalWait) checkCondition(obj *unstructured.Unstructured) (bool, error) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil { + return false, err + } + if !found { + return false, nil + } + for _, conditionUncast := range conditions { + condition := conditionUncast.(map[string]interface{}) + name, found, err := unstructured.NestedString(condition, "type") + if !found || err != nil || strings.ToLower(name) != strings.ToLower(w.conditionName) { + continue + } + status, found, err := unstructured.NestedString(condition, "status") + if !found || err != nil { + continue + } + return strings.ToLower(status) == strings.ToLower(w.conditionStatus), nil + } + + return false, nil +} + +func (w ConditionalWait) isConditionMet(event watch.Event) (bool, error) { + if event.Type == watch.Deleted { + // this will chain back out, result in another get and an return false back up the chain + return false, nil + } + obj := event.Object.(*unstructured.Unstructured) + return w.checkCondition(obj) +} + +// NewDiscardingPrinter is a printer that discards all objects +// TODO use the real discarding printer from a different pull I just opened. +func NewDiscardingPrinter() printers.ResourcePrinterFunc { + return printers.ResourcePrinterFunc(func(runtime.Object, io.Writer) error { + return nil + }) +} diff --git a/pkg/kubectl/cmd/wait/wait_test.go b/pkg/kubectl/cmd/wait/wait_test.go new file mode 100644 index 00000000000..b26f3060a94 --- /dev/null +++ b/pkg/kubectl/cmd/wait/wait_test.go @@ -0,0 +1,477 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "testing" + + "time" + + "strings" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + dynamicfakeclient "k8s.io/client-go/dynamic/fake" + clienttesting "k8s.io/client-go/testing" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" +) + +func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + }, + } +} + +func addCondition(in *unstructured.Unstructured, name, status string) *unstructured.Unstructured { + conditions, _, _ := unstructured.NestedSlice(in.Object, "status", "conditions") + conditions = append(conditions, map[string]interface{}{ + "type": name, + "status": status, + }) + unstructured.SetNestedSlice(in.Object, conditions, "status", "conditions") + return in +} + +func TestWaitForDeletion(t *testing.T) { + scheme := runtime.NewScheme() + + tests := []struct { + name string + info *resource.Info + fakeClient func() *dynamicfakeclient.FakeDynamicClient + timeout time.Duration + + expectedErr string + validateActions func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "missing on get", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + return dynamicfakeclient.NewSimpleDynamicClient(scheme) + }, + timeout: 10 * time.Second, + + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 1 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "times out", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), nil + }) + return fakeClient + }, + timeout: 1 * time.Second, + + expectedErr: wait.ErrWaitTimeout.Error(), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "handles watch close out", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), nil + }) + count := 0 + fakeClient.PrependWatchReactor("theresource", func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { + if count == 0 { + count++ + fakeWatch := watch.NewRaceFreeFake() + go func() { + time.Sleep(100 * time.Millisecond) + fakeWatch.Stop() + }() + return true, fakeWatch, nil + } + fakeWatch := watch.NewRaceFreeFake() + return true, fakeWatch, nil + }) + return fakeClient + }, + timeout: 3 * time.Second, + + expectedErr: wait.ErrWaitTimeout.Error(), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 4 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + if !actions[2].Matches("get", "theresource") || actions[2].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[3].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "handles watch delete", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), nil + }) + fakeClient.PrependWatchReactor("theresource", func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { + fakeWatch := watch.NewRaceFreeFake() + fakeWatch.Action(watch.Deleted, newUnstructured("group/version", "TheKind", "ns-foo", "name-foo")) + return true, fakeWatch, nil + }) + return fakeClient + }, + timeout: 10 * time.Second, + + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeClient := test.fakeClient() + o := &WaitOptions{ + ResourceFinder: NewSimpleResourceFinder(test.info), + DynamicClient: fakeClient, + Timeout: test.timeout, + + Printer: NewDiscardingPrinter(), + ConditionFn: IsDeleted, + IOStreams: genericclioptions.NewTestIOStreamsDiscard(), + } + err := o.RunWait() + switch { + case err == nil && len(test.expectedErr) == 0: + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing: %q", test.expectedErr) + case err != nil && len(test.expectedErr) != 0: + if !strings.Contains(err.Error(), test.expectedErr) { + t.Fatalf("expected %q, got %q", test.expectedErr, err.Error()) + } + } + + test.validateActions(t, fakeClient.Actions()) + }) + } +} + +func TestWaitForCondition(t *testing.T) { + scheme := runtime.NewScheme() + + tests := []struct { + name string + info *resource.Info + fakeClient func() *dynamicfakeclient.FakeDynamicClient + timeout time.Duration + + expectedErr string + validateActions func(t *testing.T, actions []clienttesting.Action) + }{ + { + name: "present on get", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, addCondition( + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + "the-condition", "status-value", + ), nil + }) + return fakeClient + }, + timeout: 10 * time.Second, + + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 1 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "times out", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, addCondition( + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + "some-other-condition", "status-value", + ), nil + }) + return fakeClient + }, + timeout: 1 * time.Second, + + expectedErr: wait.ErrWaitTimeout.Error(), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "handles watch close out", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), nil + }) + count := 0 + fakeClient.PrependWatchReactor("theresource", func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { + if count == 0 { + count++ + fakeWatch := watch.NewRaceFreeFake() + go func() { + time.Sleep(100 * time.Millisecond) + fakeWatch.Stop() + }() + return true, fakeWatch, nil + } + fakeWatch := watch.NewRaceFreeFake() + return true, fakeWatch, nil + }) + return fakeClient + }, + timeout: 3 * time.Second, + + expectedErr: wait.ErrWaitTimeout.Error(), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 4 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + if !actions[2].Matches("get", "theresource") || actions[2].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[3].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "handles watch condition change", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependReactor("get", "theresource", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), nil + }) + fakeClient.PrependWatchReactor("theresource", func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { + fakeWatch := watch.NewRaceFreeFake() + fakeWatch.Action(watch.Modified, addCondition( + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + "the-condition", "status-value", + )) + return true, fakeWatch, nil + }) + return fakeClient + }, + timeout: 10 * time.Second, + + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "handles watch created", + info: &resource.Info{ + Mapping: &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "theresource"}, + }, + Name: "name-foo", + Namespace: "ns-foo", + }, + fakeClient: func() *dynamicfakeclient.FakeDynamicClient { + fakeClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeClient.PrependWatchReactor("theresource", func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { + fakeWatch := watch.NewRaceFreeFake() + fakeWatch.Action(watch.Added, addCondition( + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + "the-condition", "status-value", + )) + return true, fakeWatch, nil + }) + return fakeClient + }, + timeout: 10 * time.Second, + + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "theresource") || actions[0].(clienttesting.GetAction).GetName() != "name-foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("watch", "theresource") { + t.Error(spew.Sdump(actions)) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeClient := test.fakeClient() + o := &WaitOptions{ + ResourceFinder: NewSimpleResourceFinder(test.info), + DynamicClient: fakeClient, + Timeout: test.timeout, + + Printer: NewDiscardingPrinter(), + ConditionFn: ConditionalWait{conditionName: "the-condition", conditionStatus: "status-value"}.IsConditionMet, + IOStreams: genericclioptions.NewTestIOStreamsDiscard(), + } + err := o.RunWait() + switch { + case err == nil && len(test.expectedErr) == 0: + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing: %q", test.expectedErr) + case err != nil && len(test.expectedErr) != 0: + if !strings.Contains(err.Error(), test.expectedErr) { + t.Fatalf("expected %q, got %q", test.expectedErr, err.Error()) + } + } + + test.validateActions(t, fakeClient.Actions()) + }) + } +} diff --git a/pkg/kubectl/genericclioptions/resource/interfaces.go b/pkg/kubectl/genericclioptions/resource/interfaces.go index 1a26ec9cc84..6179481a5d8 100644 --- a/pkg/kubectl/genericclioptions/resource/interfaces.go +++ b/pkg/kubectl/genericclioptions/resource/interfaces.go @@ -83,3 +83,15 @@ func (c *clientOptions) Put() *rest.Request { type ContentValidator interface { ValidateBytes(data []byte) error } + +// Visitor lets clients walk a list of resources. +type Visitor interface { + Visit(VisitorFunc) error +} + +// VisitorFunc implements the Visitor interface for a matching function. +// If there was a problem walking a list of resources, the incoming error +// will describe the problem and the function can decide how to handle that error. +// A nil returned indicates to accept an error to continue loops even when errors happen. +// This is useful for ignoring certain kinds of errors or aggregating errors in some way. +type VisitorFunc func(*Info, error) error diff --git a/pkg/kubectl/genericclioptions/resource/visitor.go b/pkg/kubectl/genericclioptions/resource/visitor.go index 55031a470bd..e83d02aa422 100644 --- a/pkg/kubectl/genericclioptions/resource/visitor.go +++ b/pkg/kubectl/genericclioptions/resource/visitor.go @@ -45,18 +45,6 @@ const ( stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" ) -// Visitor lets clients walk a list of resources. -type Visitor interface { - Visit(VisitorFunc) error -} - -// VisitorFunc implements the Visitor interface for a matching function. -// If there was a problem walking a list of resources, the incoming error -// will describe the problem and the function can decide how to handle that error. -// A nil returned indicates to accept an error to continue loops even when errors happen. -// This is useful for ignoring certain kinds of errors or aggregating errors in some way. -type VisitorFunc func(*Info, error) error - // Watchable describes a resource that can be watched for changes that occur on the server, // beginning after the provided resource version. type Watchable interface { From 0d0863ea1010a9b5d69416a8fab94b939b69725b Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 21 May 2018 11:51:03 -0400 Subject: [PATCH 095/307] generated --- docs/.generated_docs | 3 +++ docs/man/man1/kubectl-wait.1 | 3 +++ docs/user-guide/kubectl/kubectl_wait.md | 3 +++ docs/yaml/kubectl/kubectl_wait.yaml | 3 +++ 4 files changed, 12 insertions(+) create mode 100644 docs/man/man1/kubectl-wait.1 create mode 100644 docs/user-guide/kubectl/kubectl_wait.md create mode 100644 docs/yaml/kubectl/kubectl_wait.yaml diff --git a/docs/.generated_docs b/docs/.generated_docs index 7a4aefa192b..ae0fe4f2c45 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -258,6 +258,7 @@ docs/man/man1/kubectl-top-pod.1 docs/man/man1/kubectl-top.1 docs/man/man1/kubectl-uncordon.1 docs/man/man1/kubectl-version.1 +docs/man/man1/kubectl-wait.1 docs/man/man1/kubectl.1 docs/man/man1/kubelet.1 docs/user-guide/kubectl/kubectl.md @@ -357,6 +358,7 @@ docs/user-guide/kubectl/kubectl_top_node.md docs/user-guide/kubectl/kubectl_top_pod.md docs/user-guide/kubectl/kubectl_uncordon.md docs/user-guide/kubectl/kubectl_version.md +docs/user-guide/kubectl/kubectl_wait.md docs/yaml/kubectl/kubectl.yaml docs/yaml/kubectl/kubectl_alpha.yaml docs/yaml/kubectl/kubectl_annotate.yaml @@ -400,3 +402,4 @@ docs/yaml/kubectl/kubectl_taint.yaml docs/yaml/kubectl/kubectl_top.yaml docs/yaml/kubectl/kubectl_uncordon.yaml docs/yaml/kubectl/kubectl_version.yaml +docs/yaml/kubectl/kubectl_wait.yaml diff --git a/docs/man/man1/kubectl-wait.1 b/docs/man/man1/kubectl-wait.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubectl-wait.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/user-guide/kubectl/kubectl_wait.md b/docs/user-guide/kubectl/kubectl_wait.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/user-guide/kubectl/kubectl_wait.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/yaml/kubectl/kubectl_wait.yaml b/docs/yaml/kubectl/kubectl_wait.yaml new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/yaml/kubectl/kubectl_wait.yaml @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. From 49258593c34c377592865e2adfb3c01cd4c34bbd Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 22 May 2018 08:46:51 -0400 Subject: [PATCH 096/307] add a discarding printer for testing and delegation --- pkg/kubectl/cmd/delete.go | 3 +- pkg/kubectl/cmd/wait/BUILD | 1 + pkg/kubectl/cmd/wait/wait.go | 9 ------ pkg/kubectl/cmd/wait/wait_test.go | 5 ++-- pkg/kubectl/genericclioptions/printers/BUILD | 1 + .../genericclioptions/printers/discard.go | 30 +++++++++++++++++++ .../genericclioptions/printers/interface.go | 5 ++++ 7 files changed, 42 insertions(+), 12 deletions(-) create mode 100644 pkg/kubectl/genericclioptions/printers/discard.go diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 6fac3d34c2f..30b1872e3af 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -34,6 +34,7 @@ import ( cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" kubectlwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -335,7 +336,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { DynamicClient: o.DynamicClient, Timeout: effectiveTimeout, - Printer: kubectlwait.NewDiscardingPrinter(), + Printer: printers.NewDiscardingPrinter(), ConditionFn: kubectlwait.IsDeleted, IOStreams: o.IOStreams, } diff --git a/pkg/kubectl/cmd/wait/BUILD b/pkg/kubectl/cmd/wait/BUILD index 621977eda94..ce1253f61eb 100644 --- a/pkg/kubectl/cmd/wait/BUILD +++ b/pkg/kubectl/cmd/wait/BUILD @@ -46,6 +46,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/pkg/kubectl/cmd/wait/wait.go b/pkg/kubectl/cmd/wait/wait.go index 6838a8b91d1..e75d5f418dc 100644 --- a/pkg/kubectl/cmd/wait/wait.go +++ b/pkg/kubectl/cmd/wait/wait.go @@ -18,7 +18,6 @@ package wait import ( "fmt" - "io" "strings" "time" @@ -320,11 +319,3 @@ func (w ConditionalWait) isConditionMet(event watch.Event) (bool, error) { obj := event.Object.(*unstructured.Unstructured) return w.checkCondition(obj) } - -// NewDiscardingPrinter is a printer that discards all objects -// TODO use the real discarding printer from a different pull I just opened. -func NewDiscardingPrinter() printers.ResourcePrinterFunc { - return printers.ResourcePrinterFunc(func(runtime.Object, io.Writer) error { - return nil - }) -} diff --git a/pkg/kubectl/cmd/wait/wait_test.go b/pkg/kubectl/cmd/wait/wait_test.go index b26f3060a94..6ef63357bd5 100644 --- a/pkg/kubectl/cmd/wait/wait_test.go +++ b/pkg/kubectl/cmd/wait/wait_test.go @@ -34,6 +34,7 @@ import ( dynamicfakeclient "k8s.io/client-go/dynamic/fake" clienttesting "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" ) @@ -222,7 +223,7 @@ func TestWaitForDeletion(t *testing.T) { DynamicClient: fakeClient, Timeout: test.timeout, - Printer: NewDiscardingPrinter(), + Printer: printers.NewDiscardingPrinter(), ConditionFn: IsDeleted, IOStreams: genericclioptions.NewTestIOStreamsDiscard(), } @@ -454,7 +455,7 @@ func TestWaitForCondition(t *testing.T) { DynamicClient: fakeClient, Timeout: test.timeout, - Printer: NewDiscardingPrinter(), + Printer: printers.NewDiscardingPrinter(), ConditionFn: ConditionalWait{conditionName: "the-condition", conditionStatus: "status-value"}.IsConditionMet, IOStreams: genericclioptions.NewTestIOStreamsDiscard(), } diff --git a/pkg/kubectl/genericclioptions/printers/BUILD b/pkg/kubectl/genericclioptions/printers/BUILD index f62c55053ea..c29044dc639 100644 --- a/pkg/kubectl/genericclioptions/printers/BUILD +++ b/pkg/kubectl/genericclioptions/printers/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "discard.go", "interface.go", "json.go", "name.go", diff --git a/pkg/kubectl/genericclioptions/printers/discard.go b/pkg/kubectl/genericclioptions/printers/discard.go new file mode 100644 index 00000000000..cd934976da7 --- /dev/null +++ b/pkg/kubectl/genericclioptions/printers/discard.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDiscardingPrinter is a printer that discards all objects +func NewDiscardingPrinter() ResourcePrinterFunc { + return ResourcePrinterFunc(func(runtime.Object, io.Writer) error { + return nil + }) +} diff --git a/pkg/kubectl/genericclioptions/printers/interface.go b/pkg/kubectl/genericclioptions/printers/interface.go index a42a136a98d..b59a935fcab 100644 --- a/pkg/kubectl/genericclioptions/printers/interface.go +++ b/pkg/kubectl/genericclioptions/printers/interface.go @@ -25,6 +25,11 @@ import ( // ResourcePrinterFunc is a function that can print objects type ResourcePrinterFunc func(runtime.Object, io.Writer) error +// PrintObj implements ResourcePrinter +func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { + return fn(obj, w) +} + // ResourcePrinter is an interface that knows how to print runtime objects. type ResourcePrinter interface { // Print receives a runtime object, formats it and prints it to a writer. From 043f66b86ed2218f57dcfbad8288d25f9b6d33cf Mon Sep 17 00:00:00 2001 From: David Eads Date: Mon, 21 May 2018 16:01:08 -0400 Subject: [PATCH 097/307] remove portsforobject from factory --- pkg/kubectl/cmd/autoscale.go | 6 +- pkg/kubectl/cmd/expose.go | 5 +- pkg/kubectl/cmd/util/factory.go | 23 ------- pkg/kubectl/cmd/util/factory_client_access.go | 29 --------- pkg/kubectl/cmd/util/factory_test.go | 31 ---------- pkg/kubectl/polymorphichelpers/BUILD | 5 ++ .../polymorphichelpers/canbeautoscaled.go | 37 +++++++++++ pkg/kubectl/polymorphichelpers/interface.go | 13 ++++ .../polymorphichelpers/portsforobject.go | 62 +++++++++++++++++++ .../polymorphichelpers/portsforobject_test.go | 54 ++++++++++++++++ 10 files changed, 177 insertions(+), 88 deletions(-) create mode 100644 pkg/kubectl/polymorphichelpers/canbeautoscaled.go create mode 100644 pkg/kubectl/polymorphichelpers/portsforobject.go create mode 100644 pkg/kubectl/polymorphichelpers/portsforobject_test.go diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index eb0054dfbf2..b9282e86dbd 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -24,7 +24,6 @@ import ( autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" autoscalingv1client "k8s.io/client-go/kubernetes/typed/autoscaling/v1" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" @@ -33,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -73,7 +73,7 @@ type AutoscaleOptions struct { namespace string dryRun bool builder *resource.Builder - canBeAutoscaled func(kind schema.GroupKind) error + canBeAutoscaled polymorphichelpers.CanBeAutoscaledFunc generatorFunc func(string, *meta.RESTMapping) (kubectl.StructuredGenerator, error) HPAClient autoscalingv1client.HorizontalPodAutoscalersGetter @@ -132,7 +132,7 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args o.dryRun = cmdutil.GetFlagBool(cmd, "dry-run") o.createAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) o.builder = f.NewBuilder() - o.canBeAutoscaled = f.CanBeAutoscaled + o.canBeAutoscaled = polymorphichelpers.CanBeAutoscaledFn o.args = args o.RecordFlags.Complete(f.Command(cmd, false)) diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index daecc86ca09..e4e7e7490dd 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -94,7 +95,7 @@ type ExposeServiceOptions struct { CanBeExposed func(kind schema.GroupKind) error ClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error) MapBasedSelectorForObject func(runtime.Object) (string, error) - PortsForObject func(runtime.Object) ([]string, error) + PortsForObject polymorphichelpers.PortsForObjectFunc ProtocolsForObject func(runtime.Object) (map[string]string, error) Namespace string @@ -193,7 +194,7 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e o.CanBeExposed = f.CanBeExposed o.ClientForMapping = f.ClientForMapping o.MapBasedSelectorForObject = f.MapBasedSelectorForObject - o.PortsForObject = f.PortsForObject + o.PortsForObject = polymorphichelpers.PortsForObjectFn o.ProtocolsForObject = f.ProtocolsForObject o.Mapper, err = f.ToRESTMapper() if err != nil { diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 9af233887c1..bc549c455e1 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -82,8 +82,6 @@ type ClientAccessFactory interface { // new set-based selector is provided, an error is returned if the selector cannot be converted to a // map-based selector MapBasedSelectorForObject(object runtime.Object) (string, error) - // PortsForObject returns the ports associated with the provided object - PortsForObject(object runtime.Object) ([]string, error) // ProtocolsForObject returns the mapping associated with the provided object ProtocolsForObject(object runtime.Object) (map[string]string, error) @@ -111,8 +109,6 @@ type ClientAccessFactory interface { Generators(cmdName string) map[string]kubectl.Generator // Check whether the kind of resources could be exposed CanBeExposed(kind schema.GroupKind) error - // Check whether the kind of resources could be autoscaled - CanBeAutoscaled(kind schema.GroupKind) error } // ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. @@ -178,16 +174,6 @@ func makePortsString(ports []api.ServicePort, useNodePort bool) string { return strings.Join(pieces, ",") } -func getPorts(spec api.PodSpec) []string { - result := []string{} - for _, container := range spec.Containers { - for _, port := range container.Ports { - result = append(result, strconv.Itoa(int(port.ContainerPort))) - } - } - return result -} - func getProtocols(spec api.PodSpec) map[string]string { result := make(map[string]string) for _, container := range spec.Containers { @@ -198,15 +184,6 @@ func getProtocols(spec api.PodSpec) map[string]string { return result } -// Extracts the ports exposed by a service from the given service spec. -func getServicePorts(spec api.ServiceSpec) []string { - result := []string{} - for _, servicePort := range spec.Ports { - result = append(result, strconv.Itoa(int(servicePort.Port))) - } - return result -} - // Extracts the protocols exposed by a service from the given service spec. func getServiceProtocols(spec api.ServiceSpec) map[string]string { result := make(map[string]string) diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 9fb932d262c..1cffea783f3 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -160,24 +160,6 @@ func (f *ring0Factory) MapBasedSelectorForObject(object runtime.Object) (string, } } -func (f *ring0Factory) PortsForObject(object runtime.Object) ([]string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return getPorts(t.Spec.Template.Spec), nil - case *api.Pod: - return getPorts(t.Spec), nil - case *api.Service: - return getServicePorts(t.Spec), nil - case *extensions.Deployment: - return getPorts(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getPorts(t.Spec.Template.Spec), nil - default: - return nil, fmt.Errorf("cannot extract ports from %T", object) - } -} - func (f *ring0Factory) ProtocolsForObject(object runtime.Object) (map[string]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { @@ -488,17 +470,6 @@ func (f *ring0Factory) CanBeExposed(kind schema.GroupKind) error { return nil } -func (f *ring0Factory) CanBeAutoscaled(kind schema.GroupKind) error { - switch kind { - case api.Kind("ReplicationController"), extensions.Kind("ReplicaSet"), - extensions.Kind("Deployment"), apps.Kind("Deployment"), apps.Kind("ReplicaSet"): - // nothing to do here - default: - return fmt.Errorf("cannot autoscale a %v", kind) - } - return nil -} - // this method exists to help us find the points still relying on internal types. func InternalVersionDecoder() runtime.Decoder { return legacyscheme.Codecs.UniversalDecoder() diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index 8f9b120dde4..360091f46c7 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -28,37 +28,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) -func TestPortsForObject(t *testing.T) { - f := NewFactory(genericclioptions.NewTestConfigFlags()) - - pod := &api.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Ports: []api.ContainerPort{ - { - ContainerPort: 101, - }, - }, - }, - }, - }, - } - - expected := sets.NewString("101") - ports, err := f.PortsForObject(pod) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - got := sets.NewString(ports...) - - if !expected.Equal(got) { - t.Fatalf("Ports mismatch! Expected %v, got %v", expected, got) - } -} - func TestProtocolsForObject(t *testing.T) { f := NewFactory(genericclioptions.NewTestConfigFlags()) diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index 1feaaf523b1..9848dc98c24 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -4,10 +4,12 @@ go_library( name = "go_default_library", srcs = [ "attachablepodforobject.go", + "canbeautoscaled.go", "helpers.go", "historyviewer.go", "interface.go", "logsforobject.go", + "portsforobject.go", "statusviewer.go", "updatepodspec.go", ], @@ -36,6 +38,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", @@ -47,6 +50,7 @@ go_test( srcs = [ "helpers_test.go", "logsforobject_test.go", + "portsforobject_test.go", ], embed = [":go_default_library"], deps = [ @@ -63,6 +67,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", ], diff --git a/pkg/kubectl/polymorphichelpers/canbeautoscaled.go b/pkg/kubectl/polymorphichelpers/canbeautoscaled.go new file mode 100644 index 00000000000..6009f2b5733 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/canbeautoscaled.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func canBeAutoscaled(kind schema.GroupKind) error { + switch kind { + case api.Kind("ReplicationController"), extensions.Kind("ReplicaSet"), + extensions.Kind("Deployment"), apps.Kind("Deployment"), apps.Kind("ReplicaSet"): + // nothing to do here + default: + return fmt.Errorf("cannot autoscale a %v", kind) + } + return nil +} diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index 3029730742e..bf0c230ce27 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -22,6 +22,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" @@ -58,3 +59,15 @@ type UpdatePodSpecForObjectFunc func(obj runtime.Object, fn func(*v1.PodSpec) er // UpdatePodSpecForObjectFn gives a way to easily override the function for unit testing if needed var UpdatePodSpecForObjectFn UpdatePodSpecForObjectFunc = updatePodSpecForObject + +// PortsForObjectFunc returns the ports associated with the provided object +type PortsForObjectFunc func(object runtime.Object) ([]string, error) + +// PortsForObjectFn gives a way to easily override the function for unit testing if needed +var PortsForObjectFn PortsForObjectFunc = portsForObject + +// CanBeAutoscaledFunc checks whether the kind of resources could be autoscaled +type CanBeAutoscaledFunc func(kind schema.GroupKind) error + +// CanBeAutoscaledFn gives a way to easily override the function for unit testing if needed +var CanBeAutoscaledFn CanBeAutoscaledFunc = canBeAutoscaled diff --git a/pkg/kubectl/polymorphichelpers/portsforobject.go b/pkg/kubectl/polymorphichelpers/portsforobject.go new file mode 100644 index 00000000000..48318d30a98 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/portsforobject.go @@ -0,0 +1,62 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func portsForObject(object runtime.Object) ([]string, error) { + switch t := object.(type) { + case *api.ReplicationController: + return getPorts(t.Spec.Template.Spec), nil + case *api.Pod: + return getPorts(t.Spec), nil + case *api.Service: + return getServicePorts(t.Spec), nil + case *extensions.Deployment: + return getPorts(t.Spec.Template.Spec), nil + case *extensions.ReplicaSet: + return getPorts(t.Spec.Template.Spec), nil + default: + return nil, fmt.Errorf("cannot extract ports from %T", object) + } +} + +func getPorts(spec api.PodSpec) []string { + result := []string{} + for _, container := range spec.Containers { + for _, port := range container.Ports { + result = append(result, strconv.Itoa(int(port.ContainerPort))) + } + } + return result +} + +// Extracts the ports exposed by a service from the given service spec. +func getServicePorts(spec api.ServiceSpec) []string { + result := []string{} + for _, servicePort := range spec.Ports { + result = append(result, strconv.Itoa(int(servicePort.Port))) + } + return result +} diff --git a/pkg/kubectl/polymorphichelpers/portsforobject_test.go b/pkg/kubectl/polymorphichelpers/portsforobject_test.go new file mode 100644 index 00000000000..3bd05b1f803 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/portsforobject_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + api "k8s.io/kubernetes/pkg/apis/core" +) + +func TestPortsForObject(t *testing.T) { + pod := &api.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Ports: []api.ContainerPort{ + { + ContainerPort: 101, + }, + }, + }, + }, + }, + } + + expected := sets.NewString("101") + ports, err := portsForObject(pod) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + got := sets.NewString(ports...) + + if !expected.Equal(got) { + t.Fatalf("Ports mismatch! Expected %v, got %v", expected, got) + } +} From 3e53e99aae1d6de6e63788525822fa9e8bf27bb9 Mon Sep 17 00:00:00 2001 From: Andy Xie Date: Tue, 22 May 2018 23:49:50 +0800 Subject: [PATCH 098/307] log bad format git version --- hack/lib/version.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/lib/version.sh b/hack/lib/version.sh index cac14eddb4a..5096ac3ab5e 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -99,7 +99,7 @@ kube::version::get_version_vars() { # If KUBE_GIT_VERSION is not a valid Semantic Version, then refuse to build. if ! [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then - echo "KUBE_GIT_VERSION should be a valid Semantic Version" + echo "KUBE_GIT_VERSION should be a valid Semantic Version. Current value: ${KUBE_GIT_VERSION}" echo "Please see more details here: https://semver.org" exit 1 fi From 0d3e85608fa24d26886dcc7b21556a646b4238a6 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Mon, 21 May 2018 15:27:11 -0400 Subject: [PATCH 099/307] move f.Command out of the factory --- pkg/kubectl/cmd/annotate.go | 2 +- pkg/kubectl/cmd/apply.go | 2 +- pkg/kubectl/cmd/autoscale.go | 2 +- pkg/kubectl/cmd/create/create.go | 2 +- pkg/kubectl/cmd/expose.go | 2 +- pkg/kubectl/cmd/label.go | 4 +- pkg/kubectl/cmd/patch.go | 2 +- pkg/kubectl/cmd/replace.go | 2 +- pkg/kubectl/cmd/run.go | 2 +- pkg/kubectl/cmd/scale.go | 2 +- pkg/kubectl/cmd/set/set_image.go | 2 +- pkg/kubectl/cmd/set/set_resources.go | 4 +- pkg/kubectl/cmd/set/set_selector.go | 3 +- pkg/kubectl/cmd/set/set_serviceaccount.go | 4 +- pkg/kubectl/cmd/util/editor/editoptions.go | 2 +- pkg/kubectl/cmd/util/factory.go | 6 --- pkg/kubectl/cmd/util/factory_client_access.go | 37 -------------- pkg/kubectl/genericclioptions/record_flags.go | 51 ++++++++++++++++++- 18 files changed, 68 insertions(+), 63 deletions(-) diff --git a/pkg/kubectl/cmd/annotate.go b/pkg/kubectl/cmd/annotate.go index 608398d20fa..20e12bd2b3f 100644 --- a/pkg/kubectl/cmd/annotate.go +++ b/pkg/kubectl/cmd/annotate.go @@ -157,7 +157,7 @@ func NewCmdAnnotate(parent string, f cmdutil.Factory, ioStreams genericclioption func (o *AnnotateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index bf112f3db3a..be27fc9b278 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -201,7 +201,7 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { } var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index eb0054dfbf2..e867fb9521d 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -134,7 +134,7 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args o.builder = f.NewBuilder() o.canBeAutoscaled = f.CanBeAutoscaled o.args = args - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { diff --git a/pkg/kubectl/cmd/create/create.go b/pkg/kubectl/cmd/create/create.go index 72fa60bd547..1be7fc47562 100644 --- a/pkg/kubectl/cmd/create/create.go +++ b/pkg/kubectl/cmd/create/create.go @@ -178,7 +178,7 @@ func (o *CreateOptions) ValidateArgs(cmd *cobra.Command, args []string) error { func (o *CreateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index daecc86ca09..fea106bf94e 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -177,7 +177,7 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e } o.PrintObj = printer.PrintObj - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 58de43af8ca..ce22d4af0bf 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -27,13 +27,13 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -159,7 +159,7 @@ func NewCmdLabel(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr func (o *LabelOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/patch.go b/pkg/kubectl/cmd/patch.go index e5b3fc3d9b4..f773632e18c 100644 --- a/pkg/kubectl/cmd/patch.go +++ b/pkg/kubectl/cmd/patch.go @@ -134,7 +134,7 @@ func NewCmdPatch(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr func (o *PatchOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 70131091f57..02f6c581b2f 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -133,7 +133,7 @@ func NewCmdReplace(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr func (o *ReplaceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index e19fea9f546..5cb87731061 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -193,7 +193,7 @@ func addRunFlags(cmd *cobra.Command, opt *RunOptions) { func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index c7c908a6537..aa5640e32dd 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -145,7 +145,7 @@ func NewCmdScale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobr func (o *ScaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 540e40bebc9..2e79c77b386 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -129,7 +129,7 @@ func NewCmdImage(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 1089b3f61f5..1ff8ad40793 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -21,9 +21,9 @@ import ( "strings" "github.com/spf13/cobra" - "k8s.io/api/core/v1" "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -147,7 +147,7 @@ func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *co func (o *SetResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index ce8cca2a16f..7468af37e9b 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -21,6 +21,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -122,7 +123,7 @@ func NewCmdSelector(f cmdutil.Factory, streams genericclioptions.IOStreams) *cob func (o *SetSelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index e33e12ac3b7..fc39516bad5 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -20,9 +20,9 @@ import ( "errors" "fmt" + "github.com/golang/glog" "github.com/spf13/cobra" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -121,7 +121,7 @@ func NewCmdServiceAccount(f cmdutil.Factory, streams genericclioptions.IOStreams func (o *SetServiceAccountOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index 990ae53e58a..abf1e7ef2a3 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -109,7 +109,7 @@ type editPrinterOptions struct { func (o *EditOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Command) error { var err error - o.RecordFlags.Complete(f.Command(cmd, false)) + o.RecordFlags.Complete(cmd) o.Recorder, err = o.RecordFlags.ToRecorder() if err != nil { return err diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 9af233887c1..16e5170921f 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -21,8 +21,6 @@ import ( "strconv" "strings" - "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -87,10 +85,6 @@ type ClientAccessFactory interface { // ProtocolsForObject returns the mapping associated with the provided object ProtocolsForObject(object runtime.Object) (map[string]string, error) - // Command will stringify and return all environment arguments ie. a command run by a client - // using the factory. - Command(cmd *cobra.Command, showSecrets bool) string - // SuggestedPodTemplateResources returns a list of resource types that declare a pod template SuggestedPodTemplateResources() []schema.GroupResource diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 9fb932d262c..2d114485593 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -22,12 +22,6 @@ import ( "errors" "fmt" "io" - "os" - "path/filepath" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -196,37 +190,6 @@ func (f *ring0Factory) ProtocolsForObject(object runtime.Object) (map[string]str } } -// Set showSecrets false to filter out stuff like secrets. -func (f *ring0Factory) Command(cmd *cobra.Command, showSecrets bool) string { - if len(os.Args) == 0 { - return "" - } - - flags := "" - parseFunc := func(flag *pflag.Flag, value string) error { - flags = flags + " --" + flag.Name - if set, ok := flag.Annotations["classified"]; showSecrets || !ok || len(set) == 0 { - flags = flags + "=" + value - } else { - flags = flags + "=CLASSIFIED" - } - return nil - } - var err error - err = cmd.Flags().ParseAll(os.Args[1:], parseFunc) - if err != nil || !cmd.Flags().Parsed() { - return "" - } - - args := "" - if arguments := cmd.Flags().Args(); len(arguments) > 0 { - args = " " + strings.Join(arguments, " ") - } - - base := filepath.Base(os.Args[0]) - return base + args + flags -} - func (f *ring0Factory) SuggestedPodTemplateResources() []schema.GroupResource { return []schema.GroupResource{ {Resource: "replicationcontroller"}, diff --git a/pkg/kubectl/genericclioptions/record_flags.go b/pkg/kubectl/genericclioptions/record_flags.go index 1a32d8b05ce..faf250d53cb 100644 --- a/pkg/kubectl/genericclioptions/record_flags.go +++ b/pkg/kubectl/genericclioptions/record_flags.go @@ -17,8 +17,13 @@ limitations under the License. package genericclioptions import ( + "os" + "path/filepath" + "strings" + "github.com/evanphx/json-patch" "github.com/spf13/cobra" + "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -60,12 +65,21 @@ func (f *RecordFlags) ToRecorder() (Recorder, error) { } // Complete is called before the command is run, but after it is invoked to finish the state of the struct before use. -func (f *RecordFlags) Complete(changeCause string) error { +func (f *RecordFlags) Complete(cmd *cobra.Command) error { if f == nil { return nil } - f.changeCause = changeCause + f.changeCause = parseCommandArguments(cmd) + return nil +} + +func (f *RecordFlags) CompleteWithChangeCause(cause string) error { + if f == nil { + return nil + } + + f.changeCause = cause return nil } @@ -150,3 +164,36 @@ func (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, return jsonpatch.CreateMergePatch(oldData, newData) } + +// parseCommandArguments will stringify and return all environment arguments ie. a command run by a client +// using the factory. +// Set showSecrets false to filter out stuff like secrets. +func parseCommandArguments(cmd *cobra.Command) string { + if len(os.Args) == 0 { + return "" + } + + flags := "" + parseFunc := func(flag *pflag.Flag, value string) error { + flags = flags + " --" + flag.Name + if set, ok := flag.Annotations["classified"]; !ok || len(set) == 0 { + flags = flags + "=" + value + } else { + flags = flags + "=CLASSIFIED" + } + return nil + } + var err error + err = cmd.Flags().ParseAll(os.Args[1:], parseFunc) + if err != nil || !cmd.Flags().Parsed() { + return "" + } + + args := "" + if arguments := cmd.Flags().Args(); len(arguments) > 0 { + args = " " + strings.Join(arguments, " ") + } + + base := filepath.Base(os.Args[0]) + return base + args + flags +} From 62a1532d518dbd09bae69e997c88cec30bcd5d6b Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Mon, 21 May 2018 19:05:07 -0700 Subject: [PATCH 100/307] Remove some completed TODOs --- api/openapi-spec/swagger.json | 4 ++-- api/swagger-spec/v1.json | 4 ++-- .../v1beta1/definitions.html | 2 +- .../scheduling.k8s.io/v1beta1/operations.html | 2 +- docs/api-reference/v1/definitions.html | 4 ++-- pkg/apis/core/types.go | 19 ++----------------- .../src/k8s.io/api/core/v1/generated.proto | 7 ++----- staging/src/k8s.io/api/core/v1/types.go | 7 ++----- .../core/v1/types_swagger_doc_generated.go | 4 ++-- 9 files changed, 16 insertions(+), 37 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b8fa0e77484..a2dd9539f13 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -76513,11 +76513,11 @@ "type": "string" }, "resourceVersion": { - "description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec.", + "description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", "type": "string" }, "uid": { - "description": "UID is the metadata.UID of the referenced ConfigMap. This field is currently reqired in Node.Spec.", + "description": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", "type": "string" } } diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 05388e592d6..7ac54ed33b1 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -18743,11 +18743,11 @@ }, "uid": { "type": "string", - "description": "UID is the metadata.UID of the referenced ConfigMap. This field is currently reqired in Node.Spec." + "description": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status." }, "resourceVersion": { "type": "string", - "description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec." + "description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status." }, "kubeletConfigKey": { "type": "string", diff --git a/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html b/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html index f1dd21bda95..4b8adef5819 100755 --- a/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html +++ b/docs/api-reference/scheduling.k8s.io/v1beta1/definitions.html @@ -1368,7 +1368,7 @@ Examples:
diff --git a/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html b/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html index c70d1a259d2..8c9c1566c9d 100755 --- a/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html +++ b/docs/api-reference/scheduling.k8s.io/v1beta1/operations.html @@ -1785,7 +1785,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index c82d349a170..ac244a75f50 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -3705,14 +3705,14 @@ Examples:
- + - + diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 4284fbd2d4e..54c23ab7194 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -3269,27 +3269,12 @@ type ConfigMapNodeConfigSource struct { Name string // UID is the metadata.UID of the referenced ConfigMap. - // This field is currently reqired in Node.Spec. - // TODO(#61643): This field will be forbidden in Node.Spec when #61643 is resolved. - // #61643 changes the behavior of dynamic Kubelet config to respect - // ConfigMap updates, and thus removes the ability to pin the Spec to a given UID. - // TODO(#56896): This field will be required in Node.Status when #56896 is resolved. - // #63314 (the PR that resolves #56896) adds a structured status to the Node - // object for reporting information about the config. This status requires UID - // and ResourceVersion, so that it represents a fully-explicit description of - // the configuration in use, while (see previous TODO) the Spec will be - // restricted to namespace/name in #61643. + // This field is forbidden in Node.Spec, and required in Node.Status. // +optional UID types.UID // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec. - // TODO(#56896): This field will be required in Node.Status when #56896 is resolved. - // #63314 (the PR that resolves #56896) adds a structured status to the Node - // object for reporting information about the config. This status requires UID - // and ResourceVersion, so that it represents a fully-explicit description of - // the configuration in use, while (see previous TODO) the Spec will be - // restricted to namespace/name in #61643. + // This field is forbidden in Node.Spec, and required in Node.Status. // +optional ResourceVersion string diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index c1f260c659b..a92d7392c6e 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -450,15 +450,12 @@ message ConfigMapNodeConfigSource { optional string name = 2; // UID is the metadata.UID of the referenced ConfigMap. - // This field is currently reqired in Node.Spec. - // TODO(#61643): This field will be forbidden in Node.Spec when #61643 is resolved. - // TODO(#56896): This field will be required in Node.Status when #56896 is resolved. + // This field is forbidden in Node.Spec, and required in Node.Status. // +optional optional string uid = 3; // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec. - // TODO(#56896): This field will be required in Node.Status when #56896 is resolved. + // This field is forbidden in Node.Spec, and required in Node.Status. // +optional optional string resourceVersion = 4; diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 47344bb0498..e46e9db0a7f 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -3666,15 +3666,12 @@ type ConfigMapNodeConfigSource struct { Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // UID is the metadata.UID of the referenced ConfigMap. - // This field is currently reqired in Node.Spec. - // TODO(#61643): This field will be forbidden in Node.Spec when #61643 is resolved. - // TODO(#56896): This field will be required in Node.Status when #56896 is resolved. + // This field is forbidden in Node.Spec, and required in Node.Status. // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec. - // TODO(#56896): This field will be required in Node.Status when #56896 is resolved. + // This field is forbidden in Node.Spec, and required in Node.Status. // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 6316816abcd..567f37dd22d 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -266,8 +266,8 @@ var map_ConfigMapNodeConfigSource = map[string]string{ "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", "namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", "name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", - "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is currently reqired in Node.Spec.", - "resourceVersion": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec.", + "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "resourceVersion": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", "kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", } From 98bc39dcd5fd384bc03b62fce9029b452aecae90 Mon Sep 17 00:00:00 2001 From: Jacob Gillespie Date: Tue, 22 May 2018 12:40:20 -0700 Subject: [PATCH 101/307] Add Logf message for skipped succeeded pods --- test/e2e/framework/util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 73dc9de60ff..4373d970701 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -704,7 +704,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN case res && err == nil: nOk++ case pod.Status.Phase == v1.PodSucceeded: - // pod status is succeeded, it doesn't make sense to wait for this pod + Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name) + // it doesn't make sense to wait for this pod continue case pod.Status.Phase != v1.PodFailed: Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) From 4eb665784844b0aa8f6a8ac9b17c5d713f6e9a4a Mon Sep 17 00:00:00 2001 From: Rohit Ramkumar Date: Tue, 22 May 2018 11:07:25 -0700 Subject: [PATCH 102/307] Stub out BackendService check in Ingress upgrade test. --- test/e2e/upgrades/ingress.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/e2e/upgrades/ingress.go b/test/e2e/upgrades/ingress.go index 715302f23e3..855e8dcabf6 100644 --- a/test/e2e/upgrades/ingress.go +++ b/test/e2e/upgrades/ingress.go @@ -212,6 +212,10 @@ func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{} postUpgradeResourceStore.SslList = nil } + // TODO(rramkumar): Remove this when GLBC v1.2.0 is released. + t.resourceStore.BeList = nil + postUpgradeResourceStore.BeList = nil + framework.ExpectNoError(compareGCPResourceStores(t.resourceStore, postUpgradeResourceStore, func(v1 reflect.Value, v2 reflect.Value) error { i1 := v1.Interface() i2 := v2.Interface() From e32a15558b8f3d2e5138f4593161d0eced438cd4 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Mon, 19 Mar 2018 16:47:20 -0700 Subject: [PATCH 103/307] Use apps/v1 in Deployment controller. --- cmd/kube-controller-manager/app/BUILD | 1 - cmd/kube-controller-manager/app/apps.go | 18 ++ cmd/kube-controller-manager/app/extensions.go | 45 --- pkg/controller/BUILD | 3 +- pkg/controller/controller_ref_manager.go | 13 +- pkg/controller/controller_ref_manager_test.go | 4 +- pkg/controller/controller_utils.go | 18 +- pkg/controller/controller_utils_test.go | 10 +- pkg/controller/deployment/BUILD | 7 +- .../deployment/deployment_controller.go | 74 ++--- .../deployment/deployment_controller_test.go | 77 +++-- pkg/controller/deployment/progress.go | 32 +- pkg/controller/deployment/progress_test.go | 116 +++---- pkg/controller/deployment/recreate.go | 10 +- pkg/controller/deployment/recreate_test.go | 32 +- pkg/controller/deployment/rollback.go | 55 +++- pkg/controller/deployment/rolling.go | 12 +- pkg/controller/deployment/rolling_test.go | 18 +- pkg/controller/deployment/sync.go | 80 ++--- pkg/controller/deployment/sync_test.go | 104 +++--- pkg/controller/deployment/util/BUILD | 8 +- .../deployment/util/deployment_util.go | 125 +++---- .../deployment/util/deployment_util_test.go | 306 +++++++++--------- .../deployment/util/replicaset_util.go | 12 +- 24 files changed, 591 insertions(+), 589 deletions(-) delete mode 100644 cmd/kube-controller-manager/app/extensions.go diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index 74462342012..64c82a719a2 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -11,7 +11,6 @@ go_library( "cloudproviders.go", "controllermanager.go", "core.go", - "extensions.go", "import_known_versions.go", "plugins.go", "policy.go", diff --git a/cmd/kube-controller-manager/app/apps.go b/cmd/kube-controller-manager/app/apps.go index 143367e3e9d..7525f174bbe 100644 --- a/cmd/kube-controller-manager/app/apps.go +++ b/cmd/kube-controller-manager/app/apps.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/controller/daemon" + "k8s.io/kubernetes/pkg/controller/deployment" "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/statefulset" ) @@ -73,3 +74,20 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) { ).Run(int(ctx.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Stop) return true, nil } + +func startDeploymentController(ctx ControllerContext) (bool, error) { + if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}] { + return false, nil + } + dc, err := deployment.NewDeploymentController( + ctx.InformerFactory.Apps().V1().Deployments(), + ctx.InformerFactory.Apps().V1().ReplicaSets(), + ctx.InformerFactory.Core().V1().Pods(), + ctx.ClientBuilder.ClientOrDie("deployment-controller"), + ) + if err != nil { + return true, fmt.Errorf("error creating Deployment controller: %v", err) + } + go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop) + return true, nil +} diff --git a/cmd/kube-controller-manager/app/extensions.go b/cmd/kube-controller-manager/app/extensions.go deleted file mode 100644 index 6cca85bab6e..00000000000 --- a/cmd/kube-controller-manager/app/extensions.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package app implements a server that runs a set of active -// components. This includes replication controllers, service endpoints and -// nodes. -// -package app - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/controller/deployment" -) - -func startDeploymentController(ctx ControllerContext) (bool, error) { - if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] { - return false, nil - } - dc, err := deployment.NewDeploymentController( - ctx.InformerFactory.Extensions().V1beta1().Deployments(), - ctx.InformerFactory.Extensions().V1beta1().ReplicaSets(), - ctx.InformerFactory.Core().V1().Pods(), - ctx.ClientBuilder.ClientOrDie("deployment-controller"), - ) - if err != nil { - return true, fmt.Errorf("error creating Deployment controller: %v", err) - } - go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop) - return true, nil -} diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index 3c9438c63fe..d2d5ed451f2 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -19,8 +19,8 @@ go_test( "//pkg/controller/testutil:go_default_library", "//pkg/securitycontext:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -64,7 +64,6 @@ go_library( "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/controller_ref_manager.go b/pkg/controller/controller_ref_manager.go index 21d7aa302ea..6cf2ac18946 100644 --- a/pkg/controller/controller_ref_manager.go +++ b/pkg/controller/controller_ref_manager.go @@ -23,7 +23,6 @@ import ( "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -301,18 +300,18 @@ func NewReplicaSetControllerRefManager( // If the error is nil, either the reconciliation succeeded, or no // reconciliation was necessary. The list of ReplicaSets that you now own is // returned. -func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, error) { - var claimed []*extensions.ReplicaSet +func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) { + var claimed []*apps.ReplicaSet var errlist []error match := func(obj metav1.Object) bool { return m.Selector.Matches(labels.Set(obj.GetLabels())) } adopt := func(obj metav1.Object) error { - return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet)) + return m.AdoptReplicaSet(obj.(*apps.ReplicaSet)) } release := func(obj metav1.Object) error { - return m.ReleaseReplicaSet(obj.(*extensions.ReplicaSet)) + return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet)) } for _, rs := range sets { @@ -330,7 +329,7 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.Rep // AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns // the error if the patching fails. -func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaSet) error { +func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error { if err := m.CanAdopt(); err != nil { return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err) } @@ -345,7 +344,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *extensions.ReplicaS // ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. -func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *extensions.ReplicaSet) error { +func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) diff --git a/pkg/controller/controller_ref_manager_test.go b/pkg/controller/controller_ref_manager_test.go index 4e6acb7cf38..fe878176430 100644 --- a/pkg/controller/controller_ref_manager_test.go +++ b/pkg/controller/controller_ref_manager_test.go @@ -20,8 +20,8 @@ import ( "reflect" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" @@ -52,7 +52,7 @@ func newPod(podName string, label map[string]string, owner metav1.Object) *v1.Po }, } if owner != nil { - pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, v1beta1.SchemeGroupVersion.WithKind("Fake"))} + pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, apps.SchemeGroupVersion.WithKind("Fake"))} } return pod } diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index cc3f5828008..fb00a7567ad 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -25,8 +25,8 @@ import ( "sync/atomic" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -818,18 +818,18 @@ func IsPodActive(p *v1.Pod) bool { } // FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods. -func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet { - activeFilter := func(rs *extensions.ReplicaSet) bool { +func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet { + activeFilter := func(rs *apps.ReplicaSet) bool { return rs != nil && *(rs.Spec.Replicas) > 0 } return FilterReplicaSets(replicaSets, activeFilter) } -type filterRS func(rs *extensions.ReplicaSet) bool +type filterRS func(rs *apps.ReplicaSet) bool // FilterReplicaSets returns replica sets that are filtered by filterFn (all returned ones should match filterFn). -func FilterReplicaSets(RSes []*extensions.ReplicaSet, filterFn filterRS) []*extensions.ReplicaSet { - var filtered []*extensions.ReplicaSet +func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.ReplicaSet { + var filtered []*apps.ReplicaSet for i := range RSes { if filterFn(RSes[i]) { filtered = append(filtered, RSes[i]) @@ -859,7 +859,7 @@ func (o ControllersByCreationTimestamp) Less(i, j int) bool { } // ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. -type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet +type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -872,7 +872,7 @@ func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { // ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from old to new replica sets. -type ReplicaSetsBySizeOlder []*extensions.ReplicaSet +type ReplicaSetsBySizeOlder []*apps.ReplicaSet func (o ReplicaSetsBySizeOlder) Len() int { return len(o) } func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } @@ -885,7 +885,7 @@ func (o ReplicaSetsBySizeOlder) Less(i, j int) bool { // ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from new to old replica sets. -type ReplicaSetsBySizeNewer []*extensions.ReplicaSet +type ReplicaSetsBySizeNewer []*apps.ReplicaSet func (o ReplicaSetsBySizeNewer) Len() int { return len(o) } func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index e6ecef86b3c..857b8b46fef 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -27,8 +27,8 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -122,8 +122,8 @@ func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.Replica } } -func newReplicaSet(name string, replicas int) *extensions.ReplicaSet { - return &extensions.ReplicaSet{ +func newReplicaSet(name string, replicas int) *apps.ReplicaSet { + return &apps.ReplicaSet{ TypeMeta: metav1.TypeMeta{APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), @@ -131,7 +131,7 @@ func newReplicaSet(name string, replicas int) *extensions.ReplicaSet { Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func() *int32 { i := int32(replicas); return &i }(), Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Template: v1.PodTemplateSpec{ @@ -417,7 +417,7 @@ func TestSortingActivePods(t *testing.T) { } func TestActiveReplicaSetsFiltering(t *testing.T) { - var replicaSets []*extensions.ReplicaSet + var replicaSets []*apps.ReplicaSet replicaSets = append(replicaSets, newReplicaSet("zero", 0)) replicaSets = append(replicaSets, nil) replicaSets = append(replicaSets, newReplicaSet("foo", 1)) diff --git a/pkg/controller/deployment/BUILD b/pkg/controller/deployment/BUILD index da5d39a8348..05339b46622 100644 --- a/pkg/controller/deployment/BUILD +++ b/pkg/controller/deployment/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/util/labels:go_default_library", "//pkg/util/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -32,13 +33,13 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", @@ -64,13 +65,13 @@ go_test( "//pkg/apis/batch/install:go_default_library", "//pkg/apis/certificates/install:go_default_library", "//pkg/apis/core/install:go_default_library", - "//pkg/apis/extensions/install:go_default_library", "//pkg/apis/policy/install:go_default_library", "//pkg/apis/rbac/install:go_default_library", "//pkg/apis/settings/install:go_default_library", "//pkg/apis/storage/install:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index ecff7d5142d..8b14efddb22 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -27,21 +27,21 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" - extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + appslisters "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" - extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" @@ -60,7 +60,7 @@ const ( ) // controllerKind contains the schema.GroupVersionKind for this controller type. -var controllerKind = extensions.SchemeGroupVersion.WithKind("Deployment") +var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment") // DeploymentController is responsible for synchronizing Deployment objects stored // in the system with actual running replica sets and pods. @@ -73,12 +73,12 @@ type DeploymentController struct { // To allow injection of syncDeployment for testing. syncHandler func(dKey string) error // used for unit testing - enqueueDeployment func(deployment *extensions.Deployment) + enqueueDeployment func(deployment *apps.Deployment) // dLister can list/get deployments from the shared informer's store - dLister extensionslisters.DeploymentLister + dLister appslisters.DeploymentLister // rsLister can list/get replica sets from the shared informer's store - rsLister extensionslisters.ReplicaSetLister + rsLister appslisters.ReplicaSetLister // podLister can list/get pods from the shared informer's store podLister corelisters.PodLister @@ -97,7 +97,7 @@ type DeploymentController struct { } // NewDeploymentController creates a new DeploymentController. -func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { +func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) @@ -164,27 +164,27 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) { } func (dc *DeploymentController) addDeployment(obj interface{}) { - d := obj.(*extensions.Deployment) + d := obj.(*apps.Deployment) glog.V(4).Infof("Adding deployment %s", d.Name) dc.enqueueDeployment(d) } func (dc *DeploymentController) updateDeployment(old, cur interface{}) { - oldD := old.(*extensions.Deployment) - curD := cur.(*extensions.Deployment) + oldD := old.(*apps.Deployment) + curD := cur.(*apps.Deployment) glog.V(4).Infof("Updating deployment %s", oldD.Name) dc.enqueueDeployment(curD) } func (dc *DeploymentController) deleteDeployment(obj interface{}) { - d, ok := obj.(*extensions.Deployment) + d, ok := obj.(*apps.Deployment) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) return } - d, ok = tombstone.Obj.(*extensions.Deployment) + d, ok = tombstone.Obj.(*apps.Deployment) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Deployment %#v", obj)) return @@ -196,7 +196,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) { // addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created. func (dc *DeploymentController) addReplicaSet(obj interface{}) { - rs := obj.(*extensions.ReplicaSet) + rs := obj.(*apps.ReplicaSet) if rs.DeletionTimestamp != nil { // On a restart of the controller manager, it's possible for an object to @@ -230,7 +230,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) { // getDeploymentsForReplicaSet returns a list of Deployments that potentially // match a ReplicaSet. -func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.ReplicaSet) []*extensions.Deployment { +func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) []*apps.Deployment { deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs) if err != nil || len(deployments) == 0 { return nil @@ -250,11 +250,11 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *extensions.Repli // updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet // is updated and wake them up. If the anything of the ReplicaSets have changed, we need to -// awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet +// awaken both the old and new deployments. old and cur must be *apps.ReplicaSet // types. func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { - curRS := cur.(*extensions.ReplicaSet) - oldRS := old.(*extensions.ReplicaSet) + curRS := cur.(*apps.ReplicaSet) + oldRS := old.(*apps.ReplicaSet) if curRS.ResourceVersion == oldRS.ResourceVersion { // Periodic resync will send update events for all known replica sets. // Two different versions of the same replica set will always have different RVs. @@ -298,10 +298,10 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { } // deleteReplicaSet enqueues the deployment that manages a ReplicaSet when -// the ReplicaSet is deleted. obj could be an *extensions.ReplicaSet, or +// the ReplicaSet is deleted. obj could be an *apps.ReplicaSet, or // a DeletionFinalStateUnknown marker item. func (dc *DeploymentController) deleteReplicaSet(obj interface{}) { - rs, ok := obj.(*extensions.ReplicaSet) + rs, ok := obj.(*apps.ReplicaSet) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains @@ -313,7 +313,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) return } - rs, ok = tombstone.Obj.(*extensions.ReplicaSet) + rs, ok = tombstone.Obj.(*apps.ReplicaSet) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ReplicaSet %#v", obj)) return @@ -354,9 +354,9 @@ func (dc *DeploymentController) deletePod(obj interface{}) { } } glog.V(4).Infof("Pod %s deleted.", pod.Name) - if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == extensions.RecreateDeploymentStrategyType { + if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType { // Sync if this Deployment now has no more Pods. - rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.ExtensionsV1beta1())) + rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1())) if err != nil { return } @@ -374,7 +374,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) { } } -func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) { +func (dc *DeploymentController) enqueue(deployment *apps.Deployment) { key, err := controller.KeyFunc(deployment) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err)) @@ -384,7 +384,7 @@ func (dc *DeploymentController) enqueue(deployment *extensions.Deployment) { dc.queue.Add(key) } -func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deployment) { +func (dc *DeploymentController) enqueueRateLimited(deployment *apps.Deployment) { key, err := controller.KeyFunc(deployment) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err)) @@ -395,7 +395,7 @@ func (dc *DeploymentController) enqueueRateLimited(deployment *extensions.Deploy } // enqueueAfter will enqueue a deployment after the provided amount of time. -func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment, after time.Duration) { +func (dc *DeploymentController) enqueueAfter(deployment *apps.Deployment, after time.Duration) { key, err := controller.KeyFunc(deployment) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err)) @@ -406,16 +406,16 @@ func (dc *DeploymentController) enqueueAfter(deployment *extensions.Deployment, } // getDeploymentForPod returns the deployment managing the given Pod. -func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Deployment { +func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deployment { // Find the owning replica set - var rs *extensions.ReplicaSet + var rs *apps.ReplicaSet var err error controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { // No controller owns this Pod. return nil } - if controllerRef.Kind != extensions.SchemeGroupVersion.WithKind("ReplicaSet").Kind { + if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind { // Not a pod owned by a replica set. return nil } @@ -436,7 +436,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *extensions.Dep // resolveControllerRef returns the controller referenced by a ControllerRef, // or nil if the ControllerRef could not be resolved to a matching controller // of the correct Kind. -func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.Deployment { +func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. if controllerRef.Kind != controllerKind.Kind { @@ -494,7 +494,7 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) { // getReplicaSetsForDeployment uses ControllerRefManager to reconcile // ControllerRef by adopting and orphaning. // It returns the list of ReplicaSets that this Deployment should manage. -func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deployment) ([]*extensions.ReplicaSet, error) { +func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) ([]*apps.ReplicaSet, error) { // List all ReplicaSets to find those we own but that no longer match our // selector. They will be orphaned by ClaimReplicaSets(). rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything()) @@ -508,7 +508,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy // If any adoptions are attempted, we should first recheck for deletion with // an uncached quorum read sometime after listing ReplicaSets (see #42639). canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) { - fresh, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -525,7 +525,7 @@ func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deploy // // It returns a map from ReplicaSet UID to a list of Pods controlled by that RS, // according to the Pod's ControllerRef. -func (dc *DeploymentController) getPodMapForDeployment(d *extensions.Deployment, rsList []*extensions.ReplicaSet) (map[types.UID]*v1.PodList, error) { +func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID]*v1.PodList, error) { // Get all Pods that potentially belong to this Deployment. selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) if err != nil { @@ -586,7 +586,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") if d.Status.ObservedGeneration < d.Generation { d.Status.ObservedGeneration = d.Generation - dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) } return nil } @@ -625,7 +625,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { // rollback is not re-entrant in case the underlying replica sets are updated with a new // revision so we should ensure that we won't proceed to update replica sets until we // make sure that the deployment has cleaned up its rollback spec in subsequent enqueues. - if d.Spec.RollbackTo != nil { + if getRollbackTo(d) != nil { return dc.rollback(d, rsList, podMap) } @@ -638,9 +638,9 @@ func (dc *DeploymentController) syncDeployment(key string) error { } switch d.Spec.Strategy.Type { - case extensions.RecreateDeploymentStrategyType: + case apps.RecreateDeploymentStrategyType: return dc.rolloutRecreate(d, rsList, podMap) - case extensions.RollingUpdateDeploymentStrategyType: + case apps.RollingUpdateDeploymentStrategyType: return dc.rolloutRolling(d, rsList, podMap) } return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type) diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index a75f8daf8ee..6b289fe0d14 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -20,6 +20,7 @@ import ( "strconv" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +39,6 @@ import ( _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/certificates/install" _ "k8s.io/kubernetes/pkg/apis/core/install" - _ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/policy/install" _ "k8s.io/kubernetes/pkg/apis/rbac/install" _ "k8s.io/kubernetes/pkg/apis/settings/install" @@ -52,14 +52,14 @@ var ( noTimestamp = metav1.Time{} ) -func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *extensions.ReplicaSet { - return &extensions.ReplicaSet{ +func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *apps.ReplicaSet { + return &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, CreationTimestamp: timestamp, Namespace: metav1.NamespaceDefault, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func() *int32 { i := int32(replicas); return &i }(), Selector: &metav1.LabelSelector{MatchLabels: selector}, Template: v1.PodTemplateSpec{}, @@ -67,27 +67,27 @@ func rs(name string, replicas int, selector map[string]string, timestamp metav1. } } -func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *extensions.ReplicaSet { +func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *apps.ReplicaSet { rs := rs(name, specReplicas, selector, noTimestamp) - rs.Status = extensions.ReplicaSetStatus{ + rs.Status = apps.ReplicaSetStatus{ Replicas: int32(statusReplicas), } return rs } -func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment { - d := extensions.Deployment{ - TypeMeta: metav1.TypeMeta{APIVersion: "extensions/v1beta1"}, +func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment { + d := apps.Deployment{ + TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ UID: uuid.NewUUID(), Name: name, Namespace: metav1.NamespaceDefault, Annotations: make(map[string]string), }, - Spec: extensions.DeploymentSpec{ - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RollingUpdateDeploymentStrategyType, - RollingUpdate: &extensions.RollingUpdateDeployment{ + Spec: apps.DeploymentSpec{ + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(), MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(), }, @@ -118,8 +118,8 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu return &d } -func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet { - return &extensions.ReplicaSet{ +func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet { + return &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, UID: uuid.NewUUID(), @@ -127,7 +127,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi Labels: d.Spec.Selector.MatchLabels, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: d.Spec.Selector, Replicas: func() *int32 { i := int32(replicas); return &i }(), Template: d.Spec.Template, @@ -135,7 +135,7 @@ func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensi } } -func getKey(d *extensions.Deployment, t *testing.T) string { +func getKey(d *apps.Deployment, t *testing.T) string { if key, err := controller.KeyFunc(d); err != nil { t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err) return "" @@ -149,8 +149,8 @@ type fixture struct { client *fake.Clientset // Objects to put in the store. - dLister []*extensions.Deployment - rsLister []*extensions.ReplicaSet + dLister []*apps.Deployment + rsLister []*apps.ReplicaSet podLister []*v1.Pod // Actions expected to happen on the client. Objects from here are also @@ -159,23 +159,23 @@ type fixture struct { objects []runtime.Object } -func (f *fixture) expectGetDeploymentAction(d *extensions.Deployment) { +func (f *fixture) expectGetDeploymentAction(d *apps.Deployment) { action := core.NewGetAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d.Name) f.actions = append(f.actions, action) } -func (f *fixture) expectUpdateDeploymentStatusAction(d *extensions.Deployment) { +func (f *fixture) expectUpdateDeploymentStatusAction(d *apps.Deployment) { action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d) action.Subresource = "status" f.actions = append(f.actions, action) } -func (f *fixture) expectUpdateDeploymentAction(d *extensions.Deployment) { +func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) { action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d) f.actions = append(f.actions, action) } -func (f *fixture) expectCreateRSAction(rs *extensions.ReplicaSet) { +func (f *fixture) expectCreateRSAction(rs *apps.ReplicaSet) { f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs)) } @@ -189,7 +189,7 @@ func newFixture(t *testing.T) *fixture { func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory, error) { f.client = fake.NewSimpleClientset(f.objects...) informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc()) - c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), f.client) + c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), f.client) if err != nil { return nil, nil, err } @@ -198,10 +198,10 @@ func (f *fixture) newController() (*DeploymentController, informers.SharedInform c.rsListerSynced = alwaysReady c.podListerSynced = alwaysReady for _, d := range f.dLister { - informers.Extensions().V1beta1().Deployments().Informer().GetIndexer().Add(d) + informers.Apps().V1().Deployments().Informer().GetIndexer().Add(d) } for _, rs := range f.rsLister { - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) } for _, pod := range f.podLister { informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) @@ -344,20 +344,19 @@ func TestReentrantRollback(t *testing.T) { f := newFixture(t) d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - - d.Spec.RollbackTo = &extensions.RollbackConfig{Revision: 0} d.Annotations = map[string]string{util.RevisionAnnotation: "2"} + setRollbackTo(d, &extensions.RollbackConfig{Revision: 0}) f.dLister = append(f.dLister, d) rs1 := newReplicaSet(d, "deploymentrs-old", 0) rs1.Annotations = map[string]string{util.RevisionAnnotation: "1"} one := int64(1) rs1.Spec.Template.Spec.TerminationGracePeriodSeconds = &one - rs1.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + rs1.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash" rs2 := newReplicaSet(d, "deploymentrs-new", 1) rs2.Annotations = map[string]string{util.RevisionAnnotation: "2"} - rs2.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + rs2.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash" f.rsLister = append(f.rsLister, rs1, rs2) f.objects = append(f.objects, d, rs1, rs2) @@ -375,7 +374,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) { f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs := newReplicaSet(foo, "foo-1", 1) pod := generatePodFromRS(rs) @@ -388,7 +387,7 @@ func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) { t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -408,7 +407,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) { f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs1 := newReplicaSet(foo, "foo-1", 1) rs2 := newReplicaSet(foo, "foo-1", 1) pod1 := generatePodFromRS(rs1) @@ -424,7 +423,7 @@ func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) { t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -445,7 +444,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs1 := newReplicaSet(foo, "foo-1", 1) rs2 := newReplicaSet(foo, "foo-2", 2) rs2.OwnerReferences = nil @@ -460,7 +459,7 @@ func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testi t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -481,7 +480,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t f := newFixture(t) foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) - foo.Spec.Strategy.Type = extensions.RecreateDeploymentStrategyType + foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType rs1 := newReplicaSet(foo, "foo-1", 1) rs2 := newReplicaSet(foo, "foo-2", 2) rs2.OwnerReferences = nil @@ -499,7 +498,7 @@ func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t t.Fatalf("error creating Deployment controller: %v", err) } enqueued := false - c.enqueueDeployment = func(d *extensions.Deployment) { + c.enqueueDeployment = func(d *apps.Deployment) { if d.Name == "foo" { enqueued = true } @@ -972,7 +971,7 @@ func bumpResourceVersion(obj metav1.Object) { } // generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template -func generatePodFromRS(rs *extensions.ReplicaSet) *v1.Pod { +func generatePodFromRS(rs *apps.ReplicaSet) *v1.Pod { trueVar := true return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/deployment/progress.go b/pkg/controller/deployment/progress.go index 1cd25ed1fff..cfe35ebab4d 100644 --- a/pkg/controller/deployment/progress.go +++ b/pkg/controller/deployment/progress.go @@ -23,8 +23,8 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller/deployment/util" ) @@ -32,18 +32,18 @@ import ( // cases this helper will run that cannot be prevented from the scaling detection, // for example a resync of the deployment after it was scaled up. In those cases, // we shouldn't try to estimate any progress. -func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { +func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) // If there is no progressDeadlineSeconds set, remove any Progressing condition. if d.Spec.ProgressDeadlineSeconds == nil { - util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing) + util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing) } // If there is only one replica set that is active then that means we are not running // a new rollout and this is a resync where we don't need to estimate any progress. // In such a case, we should simply not estimate any progress for this deployment. - currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) + currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason // Check for progress only if there is a progress deadline set and the latest rollout // hasn't completed yet. @@ -56,7 +56,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name) } - condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg) + condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg) util.SetDeploymentCondition(&newStatus, *condition) case util.DeploymentProgressing(d, &newStatus): @@ -66,7 +66,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q is progressing.", newRS.Name) } - condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg) + condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg) // Update the current Progressing condition or add a new one if it doesn't exist. // If a Progressing condition with status=true already exists, we should update // everything but lastTransitionTime. SetDeploymentCondition already does that but @@ -78,7 +78,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if currentCond.Status == v1.ConditionTrue { condition.LastTransitionTime = currentCond.LastTransitionTime } - util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing) + util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing) } util.SetDeploymentCondition(&newStatus, *condition) @@ -89,7 +89,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q has timed out progressing.", newRS.Name) } - condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg) + condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg) util.SetDeploymentCondition(&newStatus, *condition) } } @@ -100,7 +100,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe // There will be only one ReplicaFailure condition on the replica set. util.SetDeploymentCondition(&newStatus, replicaFailureCond[0]) } else { - util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentReplicaFailure) + util.RemoveDeploymentCondition(&newStatus, apps.DeploymentReplicaFailure) } // Do not update if there is nothing new to add. @@ -112,17 +112,17 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // getReplicaFailures will convert replica failure conditions from replica sets // to deployment conditions. -func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) []extensions.DeploymentCondition { - var conditions []extensions.DeploymentCondition +func (dc *DeploymentController) getReplicaFailures(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) []apps.DeploymentCondition { + var conditions []apps.DeploymentCondition if newRS != nil { for _, c := range newRS.Status.Conditions { - if c.Type != extensions.ReplicaSetReplicaFailure { + if c.Type != apps.ReplicaSetReplicaFailure { continue } conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c)) @@ -141,7 +141,7 @@ func (dc *DeploymentController) getReplicaFailures(allRSs []*extensions.ReplicaS } for _, c := range rs.Status.Conditions { - if c.Type != extensions.ReplicaSetReplicaFailure { + if c.Type != apps.ReplicaSetReplicaFailure { continue } conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c)) @@ -156,8 +156,8 @@ var nowFn = func() time.Time { return time.Now() } // requeueStuckDeployment checks whether the provided deployment needs to be synced for a progress // check. It returns the time after the deployment will be requeued for the progress check, 0 if it // will be requeued now, or -1 if it does not need to be requeued. -func (dc *DeploymentController) requeueStuckDeployment(d *extensions.Deployment, newStatus extensions.DeploymentStatus) time.Duration { - currentCond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) +func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration { + currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) // Can't estimate progress if there is no deadline in the spec or progressing condition in the current status. if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil { return time.Duration(-1) diff --git a/pkg/controller/deployment/progress_test.go b/pkg/controller/deployment/progress_test.go index 978b21469fb..da0aa2d67df 100644 --- a/pkg/controller/deployment/progress_test.go +++ b/pkg/controller/deployment/progress_test.go @@ -20,16 +20,16 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller/deployment/util" ) -func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) extensions.DeploymentStatus { - return extensions.DeploymentStatus{ +func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) apps.DeploymentStatus { + return apps.DeploymentStatus{ Replicas: replicas, UpdatedReplicas: updatedReplicas, AvailableReplicas: availableReplicas, @@ -37,16 +37,16 @@ func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) ext } // assumes the retuned deployment is always observed - not needed to be tested here. -func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []extensions.DeploymentCondition) *extensions.Deployment { - d := &extensions.Deployment{ +func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []apps.DeploymentCondition) *apps.Deployment { + d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "progress-test", }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ ProgressDeadlineSeconds: pds, Replicas: &replicas, - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RecreateDeploymentStrategyType, + Strategy: apps.DeploymentStrategy{ + Type: apps.RecreateDeploymentStrategyType, }, }, Status: newDeploymentStatus(statusReplicas, updatedReplicas, availableReplicas), @@ -56,9 +56,9 @@ func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, av } // helper to create RS with given availableReplicas -func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *extensions.ReplicaSet { +func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *apps.ReplicaSet { rs := rs(name, specReplicas, nil, metav1.Time{}) - rs.Status = extensions.ReplicaSetStatus{ + rs.Status = apps.ReplicaSetStatus{ Replicas: int32(statusReplicas), AvailableReplicas: int32(availableReplicas), } @@ -67,16 +67,16 @@ func newRSWithAvailable(name string, specReplicas, statusReplicas, availableRepl func TestRequeueStuckDeployment(t *testing.T) { pds := int32(60) - failed := []extensions.DeploymentCondition{ + failed := []apps.DeploymentCondition{ { - Type: extensions.DeploymentProgressing, + Type: apps.DeploymentProgressing, Status: v1.ConditionFalse, Reason: util.TimedOutReason, }, } - stuck := []extensions.DeploymentCondition{ + stuck := []apps.DeploymentCondition{ { - Type: extensions.DeploymentProgressing, + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, LastUpdateTime: metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC), }, @@ -84,8 +84,8 @@ func TestRequeueStuckDeployment(t *testing.T) { tests := []struct { name string - d *extensions.Deployment - status extensions.DeploymentStatus + d *apps.Deployment + status apps.DeploymentStatus nowFn func() time.Time expected time.Duration }{ @@ -178,20 +178,20 @@ func TestRequeueStuckDeployment(t *testing.T) { func TestSyncRolloutStatus(t *testing.T) { pds := int32(60) testTime := metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC) - failedTimedOut := extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + failedTimedOut := apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionFalse, Reason: util.TimedOutReason, } - newRSAvailable := extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + newRSAvailable := apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, Reason: util.NewRSAvailableReason, LastUpdateTime: testTime, LastTransitionTime: testTime, } - replicaSetUpdated := extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + replicaSetUpdated := apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, Reason: util.ReplicaSetUpdatedReason, LastUpdateTime: testTime, @@ -200,10 +200,10 @@ func TestSyncRolloutStatus(t *testing.T) { tests := []struct { name string - d *extensions.Deployment - allRSs []*extensions.ReplicaSet - newRS *extensions.ReplicaSet - conditionType extensions.DeploymentConditionType + d *apps.Deployment + allRSs []*apps.ReplicaSet + newRS *apps.ReplicaSet + conditionType apps.DeploymentConditionType conditionStatus v1.ConditionStatus conditionReason string lastUpdate metav1.Time @@ -211,15 +211,15 @@ func TestSyncRolloutStatus(t *testing.T) { }{ { name: "General: remove Progressing condition and do not estimate progress if deployment has no Progress Deadline", - d: currentDeployment(nil, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(nil, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), }, { name: "General: do not estimate progress of deployment with only one active ReplicaSet", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{newRSAvailable}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)}, - conditionType: extensions.DeploymentProgressing, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{newRSAvailable}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)}, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, lastUpdate: testTime, @@ -227,83 +227,83 @@ func TestSyncRolloutStatus(t *testing.T) { }, { name: "DeploymentProgressing: dont update lastTransitionTime if deployment already has Progressing=True", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.ReplicaSetUpdatedReason, lastTransition: testTime, }, { name: "DeploymentProgressing: update everything if deployment has Progressing=False", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.ReplicaSetUpdatedReason, }, { name: "DeploymentProgressing: create Progressing condition if it does not exist", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.ReplicaSetUpdatedReason, }, { name: "DeploymentComplete: dont update lastTransitionTime if deployment already has Progressing=True", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 3, 3), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, lastTransition: testTime, }, { name: "DeploymentComplete: update everything if deployment has Progressing=False", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{failedTimedOut}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{failedTimedOut}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 3, 3), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, }, { name: "DeploymentComplete: create Progressing condition if it does not exist", - d: currentDeployment(&pds, 3, 3, 3, 3, []extensions.DeploymentCondition{}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 3, 3), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, }, { name: "DeploymentComplete: defend against NPE when newRS=nil", - d: currentDeployment(&pds, 0, 3, 3, 3, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)}, - conditionType: extensions.DeploymentProgressing, + d: currentDeployment(&pds, 0, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)}, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionTrue, conditionReason: util.NewRSAvailableReason, }, { name: "DeploymentTimedOut: update status if rollout exceeds Progress Deadline", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{replicaSetUpdated}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionFalse, conditionReason: util.TimedOutReason, }, { name: "DeploymentTimedOut: do not update status if deployment has existing timedOut condition", - d: currentDeployment(&pds, 3, 2, 2, 2, []extensions.DeploymentCondition{failedTimedOut}), - allRSs: []*extensions.ReplicaSet{}, + d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}), + allRSs: []*apps.ReplicaSet{}, newRS: newRSWithAvailable("foo", 3, 2, 2), - conditionType: extensions.DeploymentProgressing, + conditionType: apps.DeploymentProgressing, conditionStatus: v1.ConditionFalse, conditionReason: util.TimedOutReason, lastUpdate: testTime, diff --git a/pkg/controller/deployment/recreate.go b/pkg/controller/deployment/recreate.go index b6f01ef5460..04403978d5b 100644 --- a/pkg/controller/deployment/recreate.go +++ b/pkg/controller/deployment/recreate.go @@ -17,15 +17,15 @@ limitations under the License. package deployment import ( + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/deployment/util" ) // rolloutRecreate implements the logic for recreating a replica set. -func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { // Don't create a new RS if not already existed, so that we avoid scaling up before scaling down. newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { @@ -74,7 +74,7 @@ func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList } // scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate". -func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { scaled := false for i := range oldRSs { rs := oldRSs[i] @@ -95,7 +95,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext } // oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods. -func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) bool { +func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) bool { if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 { return true } @@ -123,7 +123,7 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe } // scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate". -func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment) return scaled, err } diff --git a/pkg/controller/deployment/recreate_test.go b/pkg/controller/deployment/recreate_test.go index d81435c759e..dc9d3b04aa4 100644 --- a/pkg/controller/deployment/recreate_test.go +++ b/pkg/controller/deployment/recreate_test.go @@ -20,8 +20,8 @@ import ( "fmt" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" @@ -33,7 +33,7 @@ import ( func TestScaleDownOldReplicaSets(t *testing.T) { tests := []struct { oldRSSizes []int - d *extensions.Deployment + d *apps.Deployment }{ { oldRSSizes: []int{3}, @@ -45,7 +45,7 @@ func TestScaleDownOldReplicaSets(t *testing.T) { t.Logf("running scenario %d", i) test := tests[i] - var oldRSs []*extensions.ReplicaSet + var oldRSs []*apps.ReplicaSet var expected []runtime.Object for n, size := range test.oldRSSizes { @@ -58,14 +58,14 @@ func TestScaleDownOldReplicaSets(t *testing.T) { rsCopy.Spec.Replicas = &zero expected = append(expected, rsCopy) - if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*extensions.ReplicaSet).Spec.Replicas) { + if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*apps.ReplicaSet).Spec.Replicas) { t.Errorf("broken test - original and expected RS have the same size") } } kc := fake.NewSimpleClientset(expected...) informers := informers.NewSharedInformerFactory(kc, controller.NoResyncPeriodFunc()) - c, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), kc) + c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), kc) if err != nil { t.Fatalf("error creating Deployment controller: %v", err) } @@ -86,8 +86,8 @@ func TestOldPodsRunning(t *testing.T) { tests := []struct { name string - newRS *extensions.ReplicaSet - oldRSs []*extensions.ReplicaSet + newRS *apps.ReplicaSet + oldRSs []*apps.ReplicaSet podMap map[types.UID]*v1.PodList hasOldPodsRunning bool @@ -98,23 +98,23 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with running pods", - oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")}, + oldRSs: []*apps.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")}, podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}), hasOldPodsRunning: true, }, { name: "old RSs without pods but with non-zero status replicas", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)}, hasOldPodsRunning: true, }, { name: "old RSs without pods or non-zero status replicas", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, hasOldPodsRunning: false, }, { name: "old RSs with zero status replicas but pods in terminal state are present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -135,7 +135,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas but pod in unknown phase present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -151,7 +151,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas with pending pod present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -167,7 +167,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas with running pod present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -183,7 +183,7 @@ func TestOldPodsRunning(t *testing.T) { }, { name: "old RSs with zero status replicas but pods in terminal state and pending are present", - oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, + oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)}, podMap: map[types.UID]*v1.PodList{ "uid-1": { Items: []v1.Pod{ @@ -225,7 +225,7 @@ func TestOldPodsRunning(t *testing.T) { } } -func rsWithUID(uid string) *extensions.ReplicaSet { +func rsWithUID(uid string) *apps.ReplicaSet { d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0) rs.UID = types.UID(uid) diff --git a/pkg/controller/deployment/rollback.go b/pkg/controller/deployment/rollback.go index 826185afc33..97e3b3027ec 100644 --- a/pkg/controller/deployment/rollback.go +++ b/pkg/controller/deployment/rollback.go @@ -18,9 +18,11 @@ package deployment import ( "fmt" + "strconv" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/types" @@ -28,17 +30,17 @@ import ( ) // rollback the deployment to the specified revision. In any case cleanup the rollback spec. -func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true) if err != nil { return err } allRSs := append(allOldRSs, newRS) - toRevision := &d.Spec.RollbackTo.Revision + rollbackTo := getRollbackTo(d) // If rollback revision is 0, rollback to the last revision - if *toRevision == 0 { - if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 { + if rollbackTo.Revision == 0 { + if rollbackTo.Revision = deploymentutil.LastRevision(allRSs); rollbackTo.Revision == 0 { // If we still can't find the last revision, gives up rollback dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.") // Gives up rollback @@ -51,14 +53,14 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err) continue } - if v == *toRevision { + if v == rollbackTo.Revision { glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v) // rollback by copying podTemplate.Spec from the replica set // revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call // no-op if the spec matches current deployment's podTemplate.Spec performedRollback, err := dc.rollbackToTemplate(d, rs) if performedRollback && err == nil { - dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision)) + dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision)) } return err } @@ -71,7 +73,7 @@ func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*ext // rollbackToTemplate compares the templates of the provided deployment and replica set and // updates the deployment with the replica set template in case they are different. It also // cleans up the rollback spec so subsequent requeues of the deployment won't end up in here. -func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs *extensions.ReplicaSet) (bool, error) { +func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) { performedRollback := false if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) { glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec) @@ -98,20 +100,49 @@ func (dc *DeploymentController) rollbackToTemplate(d *extensions.Deployment, rs return performedRollback, dc.updateDeploymentAndClearRollbackTo(d) } -func (dc *DeploymentController) emitRollbackWarningEvent(d *extensions.Deployment, reason, message string) { +func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) { dc.eventRecorder.Eventf(d, v1.EventTypeWarning, reason, message) } -func (dc *DeploymentController) emitRollbackNormalEvent(d *extensions.Deployment, message string) { +func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, message string) { dc.eventRecorder.Eventf(d, v1.EventTypeNormal, deploymentutil.RollbackDone, message) } // updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment // It is assumed that the caller will have updated the deployment template appropriately (in case // we want to rollback). -func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *extensions.Deployment) error { +func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error { glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) - d.Spec.RollbackTo = nil - _, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Update(d) + setRollbackTo(d, nil) + _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d) return err } + +// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped. +func getRollbackTo(d *apps.Deployment) *extensions.RollbackConfig { + // Extract the annotation used for round-tripping the deprecated RollbackTo field. + revision := d.Annotations[apps.DeprecatedRollbackTo] + if revision == "" { + return nil + } + revision64, err := strconv.ParseInt(revision, 10, 64) + if err != nil { + // If it's invalid, ignore it. + return nil + } + return &extensions.RollbackConfig{ + Revision: revision64, + } +} + +// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped. +func setRollbackTo(d *apps.Deployment, rollbackTo *extensions.RollbackConfig) { + if rollbackTo == nil { + delete(d.Annotations, apps.DeprecatedRollbackTo) + return + } + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + d.Annotations[apps.DeprecatedRollbackTo] = strconv.FormatInt(rollbackTo.Revision, 10) +} diff --git a/pkg/controller/deployment/rolling.go b/pkg/controller/deployment/rolling.go index 598928366d7..132981fa3e6 100644 --- a/pkg/controller/deployment/rolling.go +++ b/pkg/controller/deployment/rolling.go @@ -21,8 +21,8 @@ import ( "sort" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/integer" "k8s.io/kubernetes/pkg/controller" @@ -30,7 +30,7 @@ import ( ) // rolloutRolling implements the logic for rolling a new replica set. -func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true) if err != nil { return err @@ -67,7 +67,7 @@ func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList return dc.syncRolloutStatus(allRSs, newRS, d) } -func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) { // Scaling not required. return false, nil @@ -85,7 +85,7 @@ func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.Repl return scaled, err } -func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { +func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) { oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs) if oldPodsCount == 0 { // Can't scale down further @@ -154,7 +154,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep } // cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted. -func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) { +func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) { sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) // Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will @@ -191,7 +191,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re // scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability -func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int32, error) { +func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) { maxUnavailable := deploymentutil.MaxUnavailable(*deployment) // Check if we can scale down. diff --git a/pkg/controller/deployment/rolling_test.go b/pkg/controller/deployment/rolling_test.go index 5f870e40b5f..b0feb42b40b 100644 --- a/pkg/controller/deployment/rolling_test.go +++ b/pkg/controller/deployment/rolling_test.go @@ -19,7 +19,7 @@ package deployment import ( "testing" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" @@ -82,7 +82,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { t.Logf("executing scenario %d", i) newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) - allRSs := []*extensions.ReplicaSet{newRS, oldRS} + allRSs := []*apps.ReplicaSet{newRS, oldRS} maxUnavailable := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"}) fake := fake.Clientset{} @@ -109,7 +109,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) { t.Errorf("expected 1 action during scale, got: %v", fake.Actions()) continue } - updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet) + updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*apps.ReplicaSet) if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } @@ -187,8 +187,8 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) { newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS) oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS) - oldRSs := []*extensions.ReplicaSet{oldRS} - allRSs := []*extensions.ReplicaSet{oldRS, newRS} + oldRSs := []*apps.ReplicaSet{oldRS} + allRSs := []*apps.ReplicaSet{oldRS, newRS} maxSurge := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector) fakeClientset := fake.Clientset{} @@ -255,7 +255,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) { t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPods) - oldRSs := []*extensions.ReplicaSet{oldRS} + oldRSs := []*apps.ReplicaSet{oldRS} maxSurge := intstr.FromInt(2) maxUnavailable := intstr.FromInt(2) deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil) @@ -330,8 +330,8 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing t.Logf("executing scenario %d", i) oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp) oldRS.Status.AvailableReplicas = int32(test.readyPods) - allRSs := []*extensions.ReplicaSet{oldRS} - oldRSs := []*extensions.ReplicaSet{oldRS} + allRSs := []*apps.ReplicaSet{oldRS} + oldRSs := []*apps.ReplicaSet{oldRS} maxSurge := intstr.FromInt(0) deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"}) fakeClientset := fake.Clientset{} @@ -371,7 +371,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing t.Errorf("expected an update action") continue } - updated := updateAction.GetObject().(*extensions.ReplicaSet) + updated := updateAction.GetObject().(*apps.ReplicaSet) if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a { t.Errorf("expected update to %d replicas, got %d", e, a) } diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 42891c1b340..a49938bba9b 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -23,8 +23,8 @@ import ( "strconv" "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -35,7 +35,7 @@ import ( ) // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. -func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { return err @@ -47,7 +47,7 @@ func (dc *DeploymentController) syncStatusOnly(d *extensions.Deployment, rsList // sync is responsible for reconciling deployments on scaling events or when they // are paused. -func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error { +func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { return err @@ -59,7 +59,7 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi } // Clean up the deployment when it's paused and no rollback is in flight. - if d.Spec.Paused && d.Spec.RollbackTo == nil { + if d.Spec.Paused && getRollbackTo(d) == nil { if err := dc.cleanupDeployment(oldRSs, d); err != nil { return err } @@ -72,11 +72,11 @@ func (dc *DeploymentController) sync(d *extensions.Deployment, rsList []*extensi // checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition. // These conditions are needed so that we won't accidentally report lack of progress for resumed deployments // that were paused for longer than progressDeadlineSeconds. -func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error { +func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error { if d.Spec.ProgressDeadlineSeconds == nil { return nil } - cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) + cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) if cond != nil && cond.Reason == deploymentutil.TimedOutReason { // If we have reported lack of progress, do not overwrite it with a paused condition. return nil @@ -85,11 +85,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) needsUpdate := false if d.Spec.Paused && !pausedCondExists { - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } else if !d.Spec.Paused && pausedCondExists { - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } @@ -99,7 +99,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) } var err error - d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) return err } @@ -115,7 +115,7 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) // // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of replica sets, thus incorrect deployment status. -func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { +func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) { _, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList) // Get new replica set with the updated revision number @@ -132,7 +132,7 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *extensions.D // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes. // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the pod-template-hash will be added to adopted RSes and pods. -func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) { +func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) { existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList) // Calculate the max revision number among all old RSes @@ -152,7 +152,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds - return dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) + return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) } // Should use the revision in existingNewRS's annotation, since it set by before @@ -160,17 +160,17 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // If no other Progressing condition has been recorded and we need to estimate the progress // of this deployment then it is likely that old users started caring about progress. In that // case we need to take into account the first time we noticed their new replica set. - cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) + cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) if d.Spec.ProgressDeadlineSeconds != nil && cond == nil { msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name) - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if needsUpdate { var err error - if d, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d); err != nil { + if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil { return nil, err } } @@ -184,19 +184,19 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // new ReplicaSet does not exist, create one. newRSTemplate := *d.Spec.Template.DeepCopy() podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount)) - newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) + newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Add podTemplateHash label to selector. - newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) + newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Create new ReplicaSet - newRS := extensions.ReplicaSet{ + newRS := apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ // Make the name deterministic, to ensure idempotence Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash), Namespace: d.Namespace, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: new(int32), MinReadySeconds: d.Spec.MinReadySeconds, Selector: newRSSelector, @@ -216,7 +216,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. alreadyExists := false - createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS) + createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the Deployment. case errors.IsAlreadyExists(err): @@ -248,7 +248,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis *d.Status.CollisionCount++ // Update the collisionCount for the Deployment and let it requeue by returning the original // error. - _, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) if dErr == nil { glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) } @@ -256,12 +256,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis case err != nil: msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err) if d.Spec.ProgressDeadlineSeconds != nil { - cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) + cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) deploymentutil.SetDeploymentCondition(&d.Status, *cond) // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 - _, _ = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + _, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) } dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err @@ -273,12 +273,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision) if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil { msg := fmt.Sprintf("Created new replica set %q", createdRS.Name) - condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) + condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if needsUpdate { - _, err = dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d) + _, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) } return createdRS, err } @@ -288,7 +288,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. -func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error { +func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error { // If there is only one active replica set then we should scale that up to the full count of the // deployment. If there is no active replica set, then we should scale up the newest replica set. if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil { @@ -386,7 +386,7 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS * return nil } -func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) { // No need to scale if *(rs.Spec.Replicas) == newScale { return false, rs, nil @@ -401,7 +401,7 @@ func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.Rep return scaled, newRS, err } -func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) { +func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) { sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale @@ -413,7 +413,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc rsCopy := rs.DeepCopy() *(rsCopy.Spec.Replicas) = newScale deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) - rs, err = dc.client.ExtensionsV1beta1().ReplicaSets(rsCopy.Namespace).Update(rsCopy) + rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) @@ -425,13 +425,13 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets // where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. -func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { +func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } // Avoid deleting replica set with deletion timestamp set - aliveFilter := func(rs *extensions.ReplicaSet) bool { + aliveFilter := func(rs *apps.ReplicaSet) bool { return rs != nil && rs.ObjectMeta.DeletionTimestamp == nil } cleanableRSes := controller.FilterReplicaSets(oldRSs, aliveFilter) @@ -451,7 +451,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe continue } glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) - if err := dc.client.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { + if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. return err @@ -462,7 +462,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary -func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { +func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) if reflect.DeepEqual(d.Status, newStatus) { @@ -471,12 +471,12 @@ func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.Replic newDeployment := d newDeployment.Status = newStatus - _, err := dc.client.ExtensionsV1beta1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) + _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets. -func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus { +func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) apps.DeploymentStatus { availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) unavailableReplicas := totalReplicas - availableReplicas @@ -486,11 +486,11 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS unavailableReplicas = 0 } - status := extensions.DeploymentStatus{ + status := apps.DeploymentStatus{ // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. ObservedGeneration: deployment.Generation, Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs), - UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}), + UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*apps.ReplicaSet{newRS}), ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, @@ -504,10 +504,10 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS } if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) { - minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") + minAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") deploymentutil.SetDeploymentCondition(&status, *minAvailability) } else { - noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") + noMinAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") deploymentutil.SetDeploymentCondition(&status, *noMinAvailability) } @@ -519,7 +519,7 @@ func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaS // // rsList should come from getReplicaSetsForDeployment(d). // podMap should come from getPodMapForDeployment(d, rsList). -func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) { +func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false) if err != nil { return false, err diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go index 6f5cc96b344..4cd4c0ab2d9 100644 --- a/pkg/controller/deployment/sync_test.go +++ b/pkg/controller/deployment/sync_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/informers" @@ -41,7 +41,7 @@ func TestScale(t *testing.T) { oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC) olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC) - var updatedTemplate = func(replicas int) *extensions.Deployment { + var updatedTemplate = func(replicas int) *apps.Deployment { d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"}) d.Spec.Template.Labels["another"] = "label" return d @@ -49,14 +49,14 @@ func TestScale(t *testing.T) { tests := []struct { name string - deployment *extensions.Deployment - oldDeployment *extensions.Deployment + deployment *apps.Deployment + oldDeployment *apps.Deployment - newRS *extensions.ReplicaSet - oldRSs []*extensions.ReplicaSet + newRS *apps.ReplicaSet + oldRSs []*apps.ReplicaSet - expectedNew *extensions.ReplicaSet - expectedOld []*extensions.ReplicaSet + expectedNew *apps.ReplicaSet + expectedOld []*apps.ReplicaSet wasntUpdated map[string]bool desiredReplicasAnnotations map[string]int32 @@ -67,10 +67,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{}, + oldRSs: []*apps.ReplicaSet{}, expectedNew: rs("foo-v1", 12, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{}, + expectedOld: []*apps.ReplicaSet{}, }, { name: "normal scaling event: 10 -> 5", @@ -78,10 +78,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil), newRS: rs("foo-v1", 10, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{}, + oldRSs: []*apps.ReplicaSet{}, expectedNew: rs("foo-v1", 5, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{}, + expectedOld: []*apps.ReplicaSet{}, }, { name: "proportional scaling: 5 -> 10", @@ -89,10 +89,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, }, { name: "proportional scaling: 5 -> 3", @@ -100,10 +100,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v2", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 1, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)}, }, { name: "proportional scaling: 9 -> 4", @@ -111,10 +111,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil), newRS: rs("foo-v2", 8, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 4, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)}, }, { name: "proportional scaling: 7 -> 10", @@ -122,10 +122,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 3, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, }, { name: "proportional scaling: 13 -> 8", @@ -133,10 +133,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales up the new replica set. { @@ -145,10 +145,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, }, // Scales down the older replica set. { @@ -157,10 +157,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil), newRS: rs("foo-v3", 1, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 1, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up the latest replica set first. { @@ -169,10 +169,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil), newRS: nil, - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, expectedNew: nil, - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)}, }, // Scales down to zero { @@ -181,10 +181,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil), newRS: rs("foo-v3", 3, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 0, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, }, // Scales up from zero { @@ -193,10 +193,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil), newRS: rs("foo-v3", 0, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 6, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)}, wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true}, }, // Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 ) @@ -208,10 +208,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil), newRS: rs("foo-v3", 2, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, expectedNew: rs("foo-v3", 2, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)}, wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true}, desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)}, @@ -222,10 +222,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil), newRS: rs("foo-v2", 6, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 11, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)}, }, { name: "change both surge and size", @@ -233,10 +233,10 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil), newRS: rs("foo-v2", 5, nil, newTimestamp), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 22, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)}, }, { name: "change both size and template", @@ -244,25 +244,25 @@ func TestScale(t *testing.T) { oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}), newRS: nil, - oldRSs: []*extensions.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)}, expectedNew: nil, - expectedOld: []*extensions.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)}, }, { name: "saturated but broken new replica set does not affect old pods", deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), - newRS: func() *extensions.ReplicaSet { + newRS: func() *apps.ReplicaSet { rs := rs("foo-v2", 2, nil, newTimestamp) rs.Status.AvailableReplicas = 0 return rs }(), - oldRSs: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, expectedNew: rs("foo-v2", 2, nil, newTimestamp), - expectedOld: []*extensions.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, + expectedOld: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)}, }, } @@ -313,7 +313,7 @@ func TestScale(t *testing.T) { } // Get all the UPDATE actions and update nameToSize with all the updated sizes. for _, action := range fake.Actions() { - rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) + rs := action.(testclient.UpdateAction).GetObject().(*apps.ReplicaSet) if !test.wasntUpdated[rs.Name] { nameToSize[rs.Name] = *(rs.Spec.Replicas) } @@ -345,12 +345,12 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { alreadyDeleted.DeletionTimestamp = &now tests := []struct { - oldRSs []*extensions.ReplicaSet + oldRSs []*apps.ReplicaSet revisionHistoryLimit int32 expectedDeletions int }{ { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 0, 0, selector), newRSWithStatus("foo-2", 0, 0, selector), newRSWithStatus("foo-3", 0, 0, selector), @@ -360,7 +360,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { }, { // Only delete the replica set with Spec.Replicas = Status.Replicas = 0. - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 0, 0, selector), newRSWithStatus("foo-2", 0, 1, selector), newRSWithStatus("foo-3", 1, 0, selector), @@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { }, { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 0, 0, selector), newRSWithStatus("foo-2", 0, 0, selector), }, @@ -379,7 +379,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { expectedDeletions: 2, }, { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ newRSWithStatus("foo-1", 1, 1, selector), newRSWithStatus("foo-2", 1, 1, selector), }, @@ -387,7 +387,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { expectedDeletions: 0, }, { - oldRSs: []*extensions.ReplicaSet{ + oldRSs: []*apps.ReplicaSet{ alreadyDeleted, }, revisionHistoryLimit: 0, @@ -401,7 +401,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { fake := &fake.Clientset{} informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc()) - controller, err := NewDeploymentController(informers.Extensions().V1beta1().Deployments(), informers.Extensions().V1beta1().ReplicaSets(), informers.Core().V1().Pods(), fake) + controller, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), fake) if err != nil { t.Fatalf("error creating Deployment controller: %v", err) } @@ -411,7 +411,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) { controller.rsListerSynced = alwaysReady controller.podListerSynced = alwaysReady for _, rs := range test.oldRSs { - informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs) + informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs) } stopCh := make(chan struct{}) diff --git a/pkg/controller/deployment/util/BUILD b/pkg/controller/deployment/util/BUILD index eabed9df65a..e3bd8440119 100644 --- a/pkg/controller/deployment/util/BUILD +++ b/pkg/controller/deployment/util/BUILD @@ -19,8 +19,8 @@ go_library( "//pkg/controller:go_default_library", "//pkg/util/labels:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -29,10 +29,10 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", "//vendor/k8s.io/client-go/util/retry:go_default_library", ], @@ -48,8 +48,8 @@ go_test( deps = [ "//pkg/controller:go_default_library", "//pkg/util/hash:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d5bbabfdd79..a435e5eacef 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -25,8 +25,8 @@ import ( "github.com/golang/glog" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +34,7 @@ import ( "k8s.io/apimachinery/pkg/types" intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" - extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" "k8s.io/client-go/util/integer" internalextensions "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller" @@ -98,8 +98,8 @@ const ( ) // NewDeploymentCondition creates a new deployment condition. -func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition { - return &extensions.DeploymentCondition{ +func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition { + return &apps.DeploymentCondition{ Type: condType, Status: status, LastUpdateTime: metav1.Now(), @@ -110,7 +110,7 @@ func NewDeploymentCondition(condType extensions.DeploymentConditionType, status } // GetDeploymentCondition returns the condition with the provided type. -func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensions.DeploymentConditionType) *extensions.DeploymentCondition { +func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition { for i := range status.Conditions { c := status.Conditions[i] if c.Type == condType { @@ -122,7 +122,7 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi // SetDeploymentCondition updates the deployment to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason then we are not going to update. -func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) { +func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) { currentCond := GetDeploymentCondition(*status, condition.Type) if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { return @@ -136,13 +136,13 @@ func SetDeploymentCondition(status *extensions.DeploymentStatus, condition exten } // RemoveDeploymentCondition removes the deployment condition with the provided type. -func RemoveDeploymentCondition(status *extensions.DeploymentStatus, condType extensions.DeploymentConditionType) { +func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) { status.Conditions = filterOutCondition(status.Conditions, condType) } // filterOutCondition returns a new slice of deployment conditions without conditions with the provided type. -func filterOutCondition(conditions []extensions.DeploymentCondition, condType extensions.DeploymentConditionType) []extensions.DeploymentCondition { - var newConditions []extensions.DeploymentCondition +func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition { + var newConditions []apps.DeploymentCondition for _, c := range conditions { if c.Type == condType { continue @@ -154,9 +154,9 @@ func filterOutCondition(conditions []extensions.DeploymentCondition, condType ex // ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition. // Useful for promoting replica set failure conditions into deployments. -func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentConditionType(cond.Type), +func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentConditionType(cond.Type), Status: cond.Status, LastTransitionTime: cond.LastTransitionTime, LastUpdateTime: cond.LastTransitionTime, @@ -166,7 +166,7 @@ func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extens } // SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *extensions.Deployment, revision string) bool { +func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool { updated := false if deployment.Annotations == nil { @@ -181,7 +181,7 @@ func SetDeploymentRevision(deployment *extensions.Deployment, revision string) b } // MaxRevision finds the highest revision in the replica sets -func MaxRevision(allRSs []*extensions.ReplicaSet) int64 { +func MaxRevision(allRSs []*apps.ReplicaSet) int64 { max := int64(0) for _, rs := range allRSs { if v, err := Revision(rs); err != nil { @@ -195,7 +195,7 @@ func MaxRevision(allRSs []*extensions.ReplicaSet) int64 { } // LastRevision finds the second max revision number in all replica sets (the last revision) -func LastRevision(allRSs []*extensions.ReplicaSet) int64 { +func LastRevision(allRSs []*apps.ReplicaSet) int64 { max, secMax := int64(0), int64(0) for _, rs := range allRSs { if v, err := Revision(rs); err != nil { @@ -226,7 +226,7 @@ func Revision(obj runtime.Object) (int64, error) { // SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if replica set's annotation is changed. -func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, newRevision string, exists bool) bool { +func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool { // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS) // Then, update replica set's revision annotation @@ -283,6 +283,7 @@ var annotationsToSkip = map[string]bool{ RevisionHistoryAnnotation: true, DesiredReplicasAnnotation: true, MaxReplicasAnnotation: true, + apps.DeprecatedRollbackTo: true, } // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key @@ -295,7 +296,7 @@ func skipCopyAnnotation(key string) bool { // copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations, // and returns true if replica set's annotation is changed. // Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { +func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool { rsAnnotationsChanged := false if rs.Annotations == nil { rs.Annotations = make(map[string]string) @@ -316,7 +317,7 @@ func copyDeploymentAnnotationsToReplicaSet(deployment *extensions.Deployment, rs // SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations. // This action should be done if and only if the deployment is rolling back to this rs. // Note that apply and revision annotations are not changed. -func SetDeploymentAnnotationsTo(deployment *extensions.Deployment, rollbackToRS *extensions.ReplicaSet) { +func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) { deployment.Annotations = getSkippedAnnotations(deployment.Annotations) for k, v := range rollbackToRS.Annotations { if !skipCopyAnnotation(k) { @@ -337,7 +338,7 @@ func getSkippedAnnotations(annotations map[string]string) map[string]string { // FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active // replica set. If there are more active replica sets, then we should proportionally scale them. -func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) *extensions.ReplicaSet { +func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet { if newRS == nil && len(oldRSs) == 0 { return nil } @@ -360,15 +361,15 @@ func FindActiveOrLatest(newRS *extensions.ReplicaSet, oldRSs []*extensions.Repli } // GetDesiredReplicasAnnotation returns the number of desired replicas -func GetDesiredReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) { +func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) { return getIntFromAnnotation(rs, DesiredReplicasAnnotation) } -func getMaxReplicasAnnotation(rs *extensions.ReplicaSet) (int32, bool) { +func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) { return getIntFromAnnotation(rs, MaxReplicasAnnotation) } -func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int32, bool) { +func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) { annotationValue, ok := rs.Annotations[annotationKey] if !ok { return int32(0), false @@ -382,7 +383,7 @@ func getIntFromAnnotation(rs *extensions.ReplicaSet, annotationKey string) (int3 } // SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations -func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool { +func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool { updated := false if rs.Annotations == nil { rs.Annotations = make(map[string]string) @@ -401,7 +402,7 @@ func SetReplicasAnnotations(rs *extensions.ReplicaSet, desiredReplicas, maxRepli } // AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated -func ReplicasAnnotationsNeedUpdate(rs *extensions.ReplicaSet, desiredReplicas, maxReplicas int32) bool { +func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool { if rs.Annotations == nil { return true } @@ -417,7 +418,7 @@ func ReplicasAnnotationsNeedUpdate(rs *extensions.ReplicaSet, desiredReplicas, m } // MaxUnavailable returns the maximum unavailable pods a rolling deployment can take. -func MaxUnavailable(deployment extensions.Deployment) int32 { +func MaxUnavailable(deployment apps.Deployment) int32 { if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { return int32(0) } @@ -430,7 +431,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 { } // MinAvailable returns the minimum available pods of a given deployment -func MinAvailable(deployment *extensions.Deployment) int32 { +func MinAvailable(deployment *apps.Deployment) int32 { if !IsRollingUpdate(deployment) { return int32(0) } @@ -438,7 +439,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 { } // MaxSurge returns the maximum surge pods a rolling deployment can take. -func MaxSurge(deployment extensions.Deployment) int32 { +func MaxSurge(deployment apps.Deployment) int32 { if !IsRollingUpdate(&deployment) { return int32(0) } @@ -450,7 +451,7 @@ func MaxSurge(deployment extensions.Deployment) int32 { // GetProportion will estimate the proportion for the provided replica set using 1. the current size // of the parent deployment, 2. the replica count that needs be added on the replica sets of the // deployment, and 3. the total replicas added in the replica sets of the deployment so far. -func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { +func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { return int32(0) } @@ -472,7 +473,7 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen // getReplicaSetFraction estimates the fraction of replicas a replica set can have in // 1. a scaling event during a rollout or 2. when scaling a paused deployment. -func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 { +func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 { // If we are scaling down to zero then the fraction of this replica set is its whole size (negative) if *(d.Spec.Replicas) == int32(0) { return -*(rs.Spec.Replicas) @@ -497,7 +498,7 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in // GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. // The third returned value is the new replica set, and it may be nil if it doesn't exist yet. -func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) { +func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) { rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, nil, nil, err @@ -509,7 +510,7 @@ func GetAllReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex // GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { +func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) { rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, nil, err @@ -520,7 +521,7 @@ func GetOldReplicaSets(deployment *extensions.Deployment, c extensionsv1beta1.Ex // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. // Returns nil if the new replica set doesn't exist yet. -func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.ExtensionsV1beta1Interface) (*extensions.ReplicaSet, error) { +func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) { rsList, err := ListReplicaSets(deployment, RsListFromClient(c)) if err != nil { return nil, err @@ -529,13 +530,13 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c extensionsv1beta1.Ext } // RsListFromClient returns an rsListFunc that wraps the given client. -func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc { - return func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { +func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc { + return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) { rsList, err := c.ReplicaSets(namespace).List(options) if err != nil { return nil, err } - var ret []*extensions.ReplicaSet + var ret []*apps.ReplicaSet for i := range rsList.Items { ret = append(ret, &rsList.Items[i]) } @@ -544,14 +545,14 @@ func RsListFromClient(c extensionsv1beta1.ExtensionsV1beta1Interface) RsListFunc } // TODO: switch this to full namespacers -type RsListFunc func(string, metav1.ListOptions) ([]*extensions.ReplicaSet, error) +type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error) type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error) // ListReplicaSets returns a slice of RSes the given deployment targets. // Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), // because only the controller itself should do that. // However, it does filter out anything whose ControllerRef doesn't match. -func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([]*extensions.ReplicaSet, error) { +func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) { // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830. namespace := deployment.Namespace @@ -565,7 +566,7 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList RsListFunc) ([ return nil, err } // Only include those whose ControllerRef matches the Deployment. - owned := make([]*extensions.ReplicaSet, 0, len(all)) + owned := make([]*apps.ReplicaSet, 0, len(all)) for _, rs := range all { if metav1.IsControlledBy(rs, deployment) { owned = append(owned, rs) @@ -603,7 +604,7 @@ func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSLis // Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan), // because only the controller itself should do that. // However, it does filter out anything whose ControllerRef doesn't match. -func ListPods(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) { +func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) { namespace := deployment.Namespace selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { @@ -640,13 +641,13 @@ func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() // Remove hash labels from template.Labels before comparing - delete(t1Copy.Labels, extensions.DefaultDeploymentUniqueLabelKey) - delete(t2Copy.Labels, extensions.DefaultDeploymentUniqueLabelKey) + delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) + delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey) return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) } // FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) *extensions.ReplicaSet { +func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet { sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList)) for i := range rsList { if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) { @@ -663,9 +664,9 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet) { - var requiredRSs []*extensions.ReplicaSet - var allRSs []*extensions.ReplicaSet +func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) { + var requiredRSs []*apps.ReplicaSet + var allRSs []*apps.ReplicaSet newRS := FindNewReplicaSet(deployment, rsList) for _, rs := range rsList { // Filter out new replica set @@ -681,17 +682,17 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions. } // SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. -func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment { +func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment { deployment.Spec.Template.ObjectMeta = template.ObjectMeta deployment.Spec.Template.Spec = template.Spec deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( deployment.Spec.Template.ObjectMeta.Labels, - extensions.DefaultDeploymentUniqueLabelKey) + apps.DefaultDeploymentUniqueLabelKey) return deployment } // GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -702,7 +703,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { } // GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalActualReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -713,7 +714,7 @@ func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) i } // GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets. -func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalReadyReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -724,7 +725,7 @@ func GetReadyReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) in } // GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets. -func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { +func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 { totalAvailableReplicas := int32(0) for _, rs := range replicaSets { if rs != nil { @@ -735,13 +736,13 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet } // IsRollingUpdate returns true if the strategy type is a rolling update. -func IsRollingUpdate(deployment *extensions.Deployment) bool { - return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType +func IsRollingUpdate(deployment *apps.Deployment) bool { + return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType } // DeploymentComplete considers a deployment to be complete once all of its desired replicas // are updated and available, and no old pods are running. -func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { +func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && newStatus.Replicas == *(deployment.Spec.Replicas) && newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && @@ -752,7 +753,7 @@ func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions // current with the new status of the deployment that the controller is observing. More specifically, // when new pods are scaled up or become ready or available, or old pods are scaled down, then we // consider the deployment is progressing. -func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { +func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { oldStatus := deployment.Status // Old replicas that need to be scaled down @@ -771,7 +772,7 @@ var nowFn = func() time.Time { return time.Now() } // DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress // is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already // exists. -func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { +func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool { if deployment.Spec.ProgressDeadlineSeconds == nil { return false } @@ -779,7 +780,7 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions // Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress. // If it's already set with a TimedOutReason reason, we have already timed out, no need to check // again. - condition := GetDeploymentCondition(*newStatus, extensions.DeploymentProgressing) + condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing) if condition == nil { return false } @@ -817,9 +818,9 @@ func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions // When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new RS is saturated: newRS's replicas == deployment's replicas // 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { +func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) { switch deployment.Spec.Strategy.Type { - case extensions.RollingUpdateDeploymentStrategyType: + case apps.RollingUpdateDeploymentStrategyType: // Check if we can scale up. maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) if err != nil { @@ -837,7 +838,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // Do not exceed the number of desired replicas. scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas)))) return *(newRS.Spec.Replicas) + scaleUpCount, nil - case extensions.RecreateDeploymentStrategyType: + case apps.RecreateDeploymentStrategyType: return *(deployment.Spec.Replicas), nil default: return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) @@ -848,7 +849,7 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re // Both the deployment and the replica set have to believe this replica set can own all of the desired // replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet // need to be available. -func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) bool { +func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool { if rs == nil { return false } @@ -864,7 +865,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b // WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. // Returns error if polling timesout. -func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { +func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. return wait.PollImmediate(interval, timeout, func() (bool, error) { deployment, err := getDeploymentFunc() diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go index 0f71f8ae763..1d90e848d0d 100644 --- a/pkg/controller/deployment/util/deployment_util_test.go +++ b/pkg/controller/deployment/util/deployment_util_test.go @@ -25,8 +25,8 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -53,7 +53,7 @@ func addListPodsReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Cl } func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { - rsList, ok := obj.(*extensions.ReplicaSetList) + rsList, ok := obj.(*apps.ReplicaSetList) fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { name := action.(core.GetAction).GetName() if ok { @@ -71,7 +71,7 @@ func addGetRSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clien func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset { fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - obj := action.(core.UpdateAction).GetObject().(*extensions.ReplicaSet) + obj := action.(core.UpdateAction).GetObject().(*apps.ReplicaSet) return true, obj, nil }) return fakeClient @@ -85,13 +85,13 @@ func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset { return fakeClient } -func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet { - return extensions.ReplicaSet{ +func generateRSWithLabel(labels map[string]string, image string) apps.ReplicaSet { + return apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: names.SimpleNameGenerator.GenerateName("replicaset"), Labels: labels, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: func(i int32) *int32 { return &i }(1), Selector: &metav1.LabelSelector{MatchLabels: labels}, Template: v1.PodTemplateSpec{ @@ -113,10 +113,10 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl } } -func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference { +func newDControllerRef(d *apps.Deployment) *metav1.OwnerReference { isController := true return &metav1.OwnerReference{ - APIVersion: "extensions/v1beta1", + APIVersion: "apps/v1", Kind: "Deployment", Name: d.GetName(), UID: d.GetUID(), @@ -125,16 +125,16 @@ func newDControllerRef(d *extensions.Deployment) *metav1.OwnerReference { } // generateRS creates a replica set, with the input deployment's template as its template -func generateRS(deployment extensions.Deployment) extensions.ReplicaSet { +func generateRS(deployment apps.Deployment) apps.ReplicaSet { template := deployment.Spec.Template.DeepCopy() - return extensions.ReplicaSet{ + return apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ UID: randomUID(), Name: names.SimpleNameGenerator.GenerateName("replicaset"), Labels: template.Labels, OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Replicas: new(int32), Template: *template, Selector: &metav1.LabelSelector{MatchLabels: template.Labels}, @@ -147,15 +147,15 @@ func randomUID() types.UID { } // generateDeployment creates a deployment, with the input image as its template -func generateDeployment(image string) extensions.Deployment { +func generateDeployment(image string) apps.Deployment { podLabels := map[string]string{"name": image} terminationSec := int64(30) - return extensions.Deployment{ + return apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: image, Annotations: make(map[string]string), }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(1), Selector: &metav1.LabelSelector{MatchLabels: podLabels}, Template: v1.PodTemplateSpec{ @@ -188,14 +188,14 @@ func TestGetNewRS(t *testing.T) { tests := []struct { Name string objs []runtime.Object - expected *extensions.ReplicaSet + expected *apps.ReplicaSet }{ { "No new ReplicaSet", []runtime.Object{ &v1.PodList{}, - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ generateRS(generateDeployment("foo")), generateRS(generateDeployment("bar")), }, @@ -207,8 +207,8 @@ func TestGetNewRS(t *testing.T) { "Has new ReplicaSet", []runtime.Object{ &v1.PodList{}, - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ generateRS(generateDeployment("foo")), generateRS(generateDeployment("bar")), generateRS(generateDeployment("abc")), @@ -228,7 +228,7 @@ func TestGetNewRS(t *testing.T) { fakeClient = addListRSReactor(fakeClient, test.objs[1]) fakeClient = addUpdatePodsReactor(fakeClient) fakeClient = addUpdateRSReactor(fakeClient) - rs, err := GetNewReplicaSet(&newDeployment, fakeClient.ExtensionsV1beta1()) + rs, err := GetNewReplicaSet(&newDeployment, fakeClient.AppsV1()) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -262,13 +262,13 @@ func TestGetOldRSs(t *testing.T) { tests := []struct { Name string objs []runtime.Object - expected []*extensions.ReplicaSet + expected []*apps.ReplicaSet }{ { "No old ReplicaSets", []runtime.Object{ - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ generateRS(generateDeployment("foo")), newRS, generateRS(generateDeployment("bar")), @@ -280,8 +280,8 @@ func TestGetOldRSs(t *testing.T) { { "Has old ReplicaSet", []runtime.Object{ - &extensions.ReplicaSetList{ - Items: []extensions.ReplicaSet{ + &apps.ReplicaSetList{ + Items: []apps.ReplicaSet{ oldRS2, oldRS, existedRS, @@ -291,7 +291,7 @@ func TestGetOldRSs(t *testing.T) { }, }, }, - []*extensions.ReplicaSet{&oldRS, &oldRS2}, + []*apps.ReplicaSet{&oldRS, &oldRS2}, }, } @@ -301,7 +301,7 @@ func TestGetOldRSs(t *testing.T) { fakeClient = addListRSReactor(fakeClient, test.objs[0]) fakeClient = addGetRSReactor(fakeClient, test.objs[0]) fakeClient = addUpdateRSReactor(fakeClient) - _, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.ExtensionsV1beta1()) + _, rss, err := GetOldReplicaSets(&newDeployment, fakeClient.AppsV1()) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -340,56 +340,56 @@ func TestEqualIgnoreHash(t *testing.T) { }{ { "Same spec, same labels", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), true, }, { "Same spec, only pod-template-hash label value is different", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), true, }, { "Same spec, the former doesn't have pod-template-hash label", generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), true, }, { "Same spec, the label is different, the former doesn't have pod-template-hash label, same number of labels", generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2"}), false, }, { "Same spec, the label is different, the latter doesn't have pod-template-hash label, same number of labels", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}), generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), false, }, { "Same spec, the label is different, and the pod-template-hash label value is the same", - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), false, }, { "Different spec, same labels", - generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), false, }, { "Different spec, different pod-template-hash label value", - generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), - generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generatePodTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), false, }, { "Different spec, the former doesn't have pod-template-hash label", generatePodTemplateSpec("foo-1", "foo-node-1", map[string]string{}, map[string]string{"something": "else"}), - generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{extensions.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), + generatePodTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{apps.DefaultDeploymentUniqueLabelKey: "value-2", "something": "else"}), false, }, { @@ -431,11 +431,11 @@ func TestFindNewReplicaSet(t *testing.T) { deployment := generateDeployment("nginx") newRS := generateRS(deployment) - newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash" newRS.CreationTimestamp = later newRSDup := generateRS(deployment) - newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash" + newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash" newRSDup.CreationTimestamp = now oldDeployment := generateDeployment("nginx") @@ -445,26 +445,26 @@ func TestFindNewReplicaSet(t *testing.T) { tests := []struct { Name string - deployment extensions.Deployment - rsList []*extensions.ReplicaSet - expected *extensions.ReplicaSet + deployment apps.Deployment + rsList []*apps.ReplicaSet + expected *apps.ReplicaSet }{ { Name: "Get new ReplicaSet with the same template as Deployment spec but different pod-template-hash value", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS, &oldRS}, + rsList: []*apps.ReplicaSet{&newRS, &oldRS}, expected: &newRS, }, { Name: "Get the oldest new ReplicaSet when there are more than one ReplicaSet with the same template", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS, &oldRS, &newRSDup}, + rsList: []*apps.ReplicaSet{&newRS, &oldRS, &newRSDup}, expected: &newRSDup, }, { Name: "Get nil new ReplicaSet", deployment: deployment, - rsList: []*extensions.ReplicaSet{&oldRS}, + rsList: []*apps.ReplicaSet{&oldRS}, expected: nil, }, } @@ -486,11 +486,11 @@ func TestFindOldReplicaSets(t *testing.T) { deployment := generateDeployment("nginx") newRS := generateRS(deployment) *(newRS.Spec.Replicas) = 1 - newRS.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "hash" + newRS.Labels[apps.DefaultDeploymentUniqueLabelKey] = "hash" newRS.CreationTimestamp = later newRSDup := generateRS(deployment) - newRSDup.Labels[extensions.DefaultDeploymentUniqueLabelKey] = "different-hash" + newRSDup.Labels[apps.DefaultDeploymentUniqueLabelKey] = "different-hash" newRSDup.CreationTimestamp = now oldDeployment := generateDeployment("nginx") @@ -501,37 +501,37 @@ func TestFindOldReplicaSets(t *testing.T) { tests := []struct { Name string - deployment extensions.Deployment - rsList []*extensions.ReplicaSet + deployment apps.Deployment + rsList []*apps.ReplicaSet podList *v1.PodList - expected []*extensions.ReplicaSet - expectedRequire []*extensions.ReplicaSet + expected []*apps.ReplicaSet + expectedRequire []*apps.ReplicaSet }{ { Name: "Get old ReplicaSets", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS, &oldRS}, - expected: []*extensions.ReplicaSet{&oldRS}, + rsList: []*apps.ReplicaSet{&newRS, &oldRS}, + expected: []*apps.ReplicaSet{&oldRS}, expectedRequire: nil, }, { Name: "Get old ReplicaSets with no new ReplicaSet", deployment: deployment, - rsList: []*extensions.ReplicaSet{&oldRS}, - expected: []*extensions.ReplicaSet{&oldRS}, + rsList: []*apps.ReplicaSet{&oldRS}, + expected: []*apps.ReplicaSet{&oldRS}, expectedRequire: nil, }, { Name: "Get old ReplicaSets with two new ReplicaSets, only the oldest new ReplicaSet is seen as new ReplicaSet", deployment: deployment, - rsList: []*extensions.ReplicaSet{&oldRS, &newRS, &newRSDup}, - expected: []*extensions.ReplicaSet{&oldRS, &newRS}, - expectedRequire: []*extensions.ReplicaSet{&newRS}, + rsList: []*apps.ReplicaSet{&oldRS, &newRS, &newRSDup}, + expected: []*apps.ReplicaSet{&oldRS, &newRS}, + expectedRequire: []*apps.ReplicaSet{&newRS}, }, { Name: "Get empty old ReplicaSets", deployment: deployment, - rsList: []*extensions.ReplicaSet{&newRS}, + rsList: []*apps.ReplicaSet{&newRS}, expected: nil, expectedRequire: nil, }, @@ -554,7 +554,7 @@ func TestFindOldReplicaSets(t *testing.T) { } // equal compares the equality of two ReplicaSet slices regardless of their ordering -func equal(rss1, rss2 []*extensions.ReplicaSet) bool { +func equal(rss1, rss2 []*apps.ReplicaSet) bool { if reflect.DeepEqual(rss1, rss2) { return true } @@ -583,19 +583,19 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) { tests := []struct { Name string - sets []*extensions.ReplicaSet + sets []*apps.ReplicaSet expectedCount int32 expectedActual int32 }{ { "1:2 Replicas", - []*extensions.ReplicaSet{&rs1}, + []*apps.ReplicaSet{&rs1}, 1, 2, }, { "3:5 Replicas", - []*extensions.ReplicaSet{&rs1, &rs2}, + []*apps.ReplicaSet{&rs1, &rs2}, 3, 5, }, @@ -679,7 +679,7 @@ func TestResolveFenceposts(t *testing.T) { func TestNewRSNewReplicas(t *testing.T) { tests := []struct { Name string - strategyType extensions.DeploymentStrategyType + strategyType apps.DeploymentStrategyType depReplicas int32 newRSReplicas int32 maxSurge int @@ -687,17 +687,17 @@ func TestNewRSNewReplicas(t *testing.T) { }{ { "can not scale up - to newRSReplicas", - extensions.RollingUpdateDeploymentStrategyType, + apps.RollingUpdateDeploymentStrategyType, 1, 5, 1, 5, }, { "scale up - to depReplicas", - extensions.RollingUpdateDeploymentStrategyType, + apps.RollingUpdateDeploymentStrategyType, 6, 2, 10, 6, }, { "recreate - to depReplicas", - extensions.RecreateDeploymentStrategyType, + apps.RecreateDeploymentStrategyType, 3, 1, 1, 3, }, } @@ -709,8 +709,8 @@ func TestNewRSNewReplicas(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { *(newDeployment.Spec.Replicas) = test.depReplicas - newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType} - newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ + newDeployment.Spec.Strategy = apps.DeploymentStrategy{Type: test.strategyType} + newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i) return &x @@ -721,7 +721,7 @@ func TestNewRSNewReplicas(t *testing.T) { }(test.maxSurge), } *(newRC.Spec.Replicas) = test.newRSReplicas - rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC) + rs, err := NewRSNewReplicas(&newDeployment, []*apps.ReplicaSet{&rs5}, &newRC) if err != nil { t.Errorf("In test case %s, got unexpected error %v", test.Name, err) } @@ -733,33 +733,33 @@ func TestNewRSNewReplicas(t *testing.T) { } var ( - condProgressing = func() extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + condProgressing = func() apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionFalse, Reason: "ForSomeReason", } } - condProgressing2 = func() extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentProgressing, + condProgressing2 = func() apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentProgressing, Status: v1.ConditionTrue, Reason: "BecauseItIs", } } - condAvailable = func() extensions.DeploymentCondition { - return extensions.DeploymentCondition{ - Type: extensions.DeploymentAvailable, + condAvailable = func() apps.DeploymentCondition { + return apps.DeploymentCondition{ + Type: apps.DeploymentAvailable, Status: v1.ConditionTrue, Reason: "AwesomeController", } } - status = func() *extensions.DeploymentStatus { - return &extensions.DeploymentStatus{ - Conditions: []extensions.DeploymentCondition{condProgressing(), condAvailable()}, + status = func() *apps.DeploymentStatus { + return &apps.DeploymentStatus{ + Conditions: []apps.DeploymentCondition{condProgressing(), condAvailable()}, } } ) @@ -770,8 +770,8 @@ func TestGetCondition(t *testing.T) { tests := []struct { name string - status extensions.DeploymentStatus - condType extensions.DeploymentConditionType + status apps.DeploymentStatus + condType apps.DeploymentConditionType expected bool }{ @@ -779,7 +779,7 @@ func TestGetCondition(t *testing.T) { name: "condition exists", status: *exampleStatus, - condType: extensions.DeploymentAvailable, + condType: apps.DeploymentAvailable, expected: true, }, @@ -787,7 +787,7 @@ func TestGetCondition(t *testing.T) { name: "condition does not exist", status: *exampleStatus, - condType: extensions.DeploymentReplicaFailure, + condType: apps.DeploymentReplicaFailure, expected: false, }, @@ -808,23 +808,23 @@ func TestSetCondition(t *testing.T) { tests := []struct { name string - status *extensions.DeploymentStatus - cond extensions.DeploymentCondition + status *apps.DeploymentStatus + cond apps.DeploymentCondition - expectedStatus *extensions.DeploymentStatus + expectedStatus *apps.DeploymentStatus }{ { name: "set for the first time", - status: &extensions.DeploymentStatus{}, + status: &apps.DeploymentStatus{}, cond: condAvailable(), - expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condAvailable()}}, + expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condAvailable()}}, }, { name: "simple set", - status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}}, + status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}}, cond: condAvailable(), expectedStatus: status(), @@ -832,10 +832,10 @@ func TestSetCondition(t *testing.T) { { name: "overwrite", - status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}}, + status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}}, cond: condProgressing2(), - expectedStatus: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing2()}}, + expectedStatus: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing2()}}, }, } @@ -853,32 +853,32 @@ func TestRemoveCondition(t *testing.T) { tests := []struct { name string - status *extensions.DeploymentStatus - condType extensions.DeploymentConditionType + status *apps.DeploymentStatus + condType apps.DeploymentConditionType - expectedStatus *extensions.DeploymentStatus + expectedStatus *apps.DeploymentStatus }{ { name: "remove from empty status", - status: &extensions.DeploymentStatus{}, - condType: extensions.DeploymentProgressing, + status: &apps.DeploymentStatus{}, + condType: apps.DeploymentProgressing, - expectedStatus: &extensions.DeploymentStatus{}, + expectedStatus: &apps.DeploymentStatus{}, }, { name: "simple remove", - status: &extensions.DeploymentStatus{Conditions: []extensions.DeploymentCondition{condProgressing()}}, - condType: extensions.DeploymentProgressing, + status: &apps.DeploymentStatus{Conditions: []apps.DeploymentCondition{condProgressing()}}, + condType: apps.DeploymentProgressing, - expectedStatus: &extensions.DeploymentStatus{}, + expectedStatus: &apps.DeploymentStatus{}, }, { name: "doesn't remove anything", status: status(), - condType: extensions.DeploymentReplicaFailure, + condType: apps.DeploymentReplicaFailure, expectedStatus: status(), }, @@ -895,19 +895,19 @@ func TestRemoveCondition(t *testing.T) { } func TestDeploymentComplete(t *testing.T) { - deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *extensions.Deployment { - return &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *apps.Deployment { + return &apps.Deployment{ + Spec: apps.DeploymentSpec{ Replicas: &desired, - Strategy: extensions.DeploymentStrategy{ - RollingUpdate: &extensions.RollingUpdateDeployment{ + Strategy: apps.DeploymentStrategy{ + RollingUpdate: &apps.RollingUpdateDeployment{ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)), MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)), }, - Type: extensions.RollingUpdateDeploymentStrategyType, + Type: apps.RollingUpdateDeploymentStrategyType, }, }, - Status: extensions.DeploymentStatus{ + Status: apps.DeploymentStatus{ Replicas: current, UpdatedReplicas: updated, AvailableReplicas: available, @@ -918,7 +918,7 @@ func TestDeploymentComplete(t *testing.T) { tests := []struct { name string - d *extensions.Deployment + d *apps.Deployment expected bool }{ @@ -972,9 +972,9 @@ func TestDeploymentComplete(t *testing.T) { } func TestDeploymentProgressing(t *testing.T) { - deployment := func(current, updated, ready, available int32) *extensions.Deployment { - return &extensions.Deployment{ - Status: extensions.DeploymentStatus{ + deployment := func(current, updated, ready, available int32) *apps.Deployment { + return &apps.Deployment{ + Status: apps.DeploymentStatus{ Replicas: current, UpdatedReplicas: updated, ReadyReplicas: ready, @@ -982,8 +982,8 @@ func TestDeploymentProgressing(t *testing.T) { }, } } - newStatus := func(current, updated, ready, available int32) extensions.DeploymentStatus { - return extensions.DeploymentStatus{ + newStatus := func(current, updated, ready, available int32) apps.DeploymentStatus { + return apps.DeploymentStatus{ Replicas: current, UpdatedReplicas: updated, ReadyReplicas: ready, @@ -994,8 +994,8 @@ func TestDeploymentProgressing(t *testing.T) { tests := []struct { name string - d *extensions.Deployment - newStatus extensions.DeploymentStatus + d *apps.Deployment + newStatus apps.DeploymentStatus expected bool }{ @@ -1075,13 +1075,13 @@ func TestDeploymentTimedOut(t *testing.T) { timeFn := func(min, sec int) time.Time { return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC) } - deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) extensions.Deployment { - return extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + deployment := func(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason string, pds *int32, from time.Time) apps.Deployment { + return apps.Deployment{ + Spec: apps.DeploymentSpec{ ProgressDeadlineSeconds: pds, }, - Status: extensions.DeploymentStatus{ - Conditions: []extensions.DeploymentCondition{ + Status: apps.DeploymentStatus{ + Conditions: []apps.DeploymentCondition{ { Type: condType, Status: status, @@ -1096,7 +1096,7 @@ func TestDeploymentTimedOut(t *testing.T) { tests := []struct { name string - d extensions.Deployment + d apps.Deployment nowFn func() time.Time expected bool @@ -1104,28 +1104,28 @@ func TestDeploymentTimedOut(t *testing.T) { { name: "no progressDeadlineSeconds specified - no timeout", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)), nowFn: func() time.Time { return timeFn(1, 20) }, expected: false, }, { name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 9)), nowFn: func() time.Time { return timeFn(1, 20) }, expected: true, }, { name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &ten, timeFn(1, 11)), nowFn: func() time.Time { return timeFn(1, 20) }, expected: false, }, { name: "previous status was a complete deployment", - d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}), + d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, NewRSAvailableReason, nil, time.Time{}), expected: false, }, } @@ -1141,23 +1141,23 @@ func TestDeploymentTimedOut(t *testing.T) { } func TestMaxUnavailable(t *testing.T) { - deployment := func(replicas int32, maxUnavailable intstr.IntOrString) extensions.Deployment { - return extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + deployment := func(replicas int32, maxUnavailable intstr.IntOrString) apps.Deployment { + return apps.Deployment{ + Spec: apps.DeploymentSpec{ Replicas: func(i int32) *int32 { return &i }(replicas), - Strategy: extensions.DeploymentStrategy{ - RollingUpdate: &extensions.RollingUpdateDeployment{ + Strategy: apps.DeploymentStrategy{ + RollingUpdate: &apps.RollingUpdateDeployment{ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)), MaxUnavailable: &maxUnavailable, }, - Type: extensions.RollingUpdateDeploymentStrategyType, + Type: apps.RollingUpdateDeploymentStrategyType, }, }, } } tests := []struct { name string - deployment extensions.Deployment + deployment apps.Deployment expected int32 }{ { @@ -1182,10 +1182,10 @@ func TestMaxUnavailable(t *testing.T) { }, { name: "maxUnavailable with Recreate deployment strategy", - deployment: extensions.Deployment{ - Spec: extensions.DeploymentSpec{ - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RecreateDeploymentStrategyType, + deployment: apps.Deployment{ + Spec: apps.DeploymentSpec{ + Strategy: apps.DeploymentStrategy{ + Type: apps.RecreateDeploymentStrategyType, }, }, }, @@ -1285,14 +1285,14 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { tests := []struct { name string - replicaSet *extensions.ReplicaSet + replicaSet *apps.ReplicaSet expected bool }{ { name: "test Annotations nil", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -1300,13 +1300,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test desiredReplicas update", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: "8", MaxReplicasAnnotation: maxReplicas}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -1314,13 +1314,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test maxReplicas update", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: "16"}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -1328,13 +1328,13 @@ func TestReplicasAnnotationsNeedUpdate(t *testing.T) { }, { name: "test needn't update", - replicaSet: &extensions.ReplicaSet{ + replicaSet: &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "test", Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: maxReplicas}, }, - Spec: extensions.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, diff --git a/pkg/controller/deployment/util/replicaset_util.go b/pkg/controller/deployment/util/replicaset_util.go index 83e68a0a209..bf05a59278a 100644 --- a/pkg/controller/deployment/util/replicaset_util.go +++ b/pkg/controller/deployment/util/replicaset_util.go @@ -19,21 +19,21 @@ package util import ( "github.com/golang/glog" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" errorsutil "k8s.io/apimachinery/pkg/util/errors" - unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - extensionslisters "k8s.io/client-go/listers/extensions/v1beta1" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" + appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/util/retry" ) // TODO: use client library instead when it starts to support update retries // see https://github.com/kubernetes/kubernetes/issues/21479 -type updateRSFunc func(rs *extensions.ReplicaSet) error +type updateRSFunc func(rs *apps.ReplicaSet) error // UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored. // The returned bool value can be used to tell if the RS is actually updated. -func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rsLister extensionslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*extensions.ReplicaSet, error) { - var rs *extensions.ReplicaSet +func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) { + var rs *apps.ReplicaSet retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error { var err error From 1c531fc970befe887f2ba26a29d7eb8577d59eb1 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 20 Mar 2018 09:45:19 -0700 Subject: [PATCH 104/307] kubectl: Use apps/v1 Deployment/ReplicaSet. This is necessary since kubectl shares code with the controllers, and the controllers have been updated to use apps/v1. --- pkg/kubectl/BUILD | 2 - pkg/kubectl/history.go | 6 +- pkg/kubectl/rollback.go | 10 +- pkg/kubectl/rollout_status.go | 22 +-- pkg/kubectl/rollout_status_test.go | 135 ++++++++---------- pkg/printers/internalversion/BUILD | 3 +- pkg/printers/internalversion/describe.go | 12 +- pkg/printers/internalversion/describe_test.go | 10 +- 8 files changed, 90 insertions(+), 110 deletions(-) diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index 0c667cb3a78..15a6901fc87 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -177,8 +177,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", diff --git a/pkg/kubectl/history.go b/pkg/kubectl/history.go index 9344e194ad8..4fcd4e526d7 100644 --- a/pkg/kubectl/history.go +++ b/pkg/kubectl/history.go @@ -102,12 +102,12 @@ type DeploymentHistoryViewer struct { // ViewHistory returns a revision-to-replicaset map as the revision history of a deployment // TODO: this should be a describer func (h *DeploymentHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { - versionedExtensionsClient := h.c.ExtensionsV1beta1() - deployment, err := versionedExtensionsClient.Deployments(namespace).Get(name, metav1.GetOptions{}) + versionedAppsClient := h.c.AppsV1() + deployment, err := versionedAppsClient.Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("failed to retrieve deployment %s: %v", name, err) } - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedExtensionsClient) + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, versionedAppsClient) if err != nil { return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err) } diff --git a/pkg/kubectl/rollback.go b/pkg/kubectl/rollback.go index 10ac083bebb..2d984d0d66e 100644 --- a/pkg/kubectl/rollback.go +++ b/pkg/kubectl/rollback.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extv1beta1 "k8s.io/api/extensions/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -115,10 +115,10 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m if d.Spec.Paused { return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume deployment/%s' and try again", d.Name) } - deploymentRollback := &extv1beta1.DeploymentRollback{ + deploymentRollback := &extensionsv1beta1.DeploymentRollback{ Name: d.Name, UpdatedAnnotations: updatedAnnotations, - RollbackTo: extv1beta1.RollbackConfig{ + RollbackTo: extensionsv1beta1.RollbackConfig{ Revision: toRevision, }, } @@ -184,12 +184,12 @@ func isRollbackEvent(e *api.Event) (bool, string) { } func simpleDryRun(deployment *extensions.Deployment, c kubernetes.Interface, toRevision int64) (string, error) { - externalDeployment := &extv1beta1.Deployment{} + externalDeployment := &appsv1.Deployment{} if err := legacyscheme.Scheme.Convert(deployment, externalDeployment, nil); err != nil { return "", fmt.Errorf("failed to convert deployment, %v", err) } - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.ExtensionsV1beta1()) + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(externalDeployment, c.AppsV1()) if err != nil { return "", fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err) } diff --git a/pkg/kubectl/rollout_status.go b/pkg/kubectl/rollout_status.go index 24fcdf568f5..b65b91fd4eb 100644 --- a/pkg/kubectl/rollout_status.go +++ b/pkg/kubectl/rollout_status.go @@ -19,12 +19,12 @@ package kubectl import ( "fmt" + appsv1 "k8s.io/api/apps/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" - clientappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - clientextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/controller/deployment/util" ) @@ -38,28 +38,28 @@ type StatusViewer interface { func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) { switch kind { case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"): - return &DeploymentStatusViewer{c.ExtensionsV1beta1()}, nil + return &DeploymentStatusViewer{c.AppsV1()}, nil case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"): - return &DaemonSetStatusViewer{c.ExtensionsV1beta1()}, nil + return &DaemonSetStatusViewer{c.AppsV1()}, nil case apps.Kind("StatefulSet"): - return &StatefulSetStatusViewer{c.AppsV1beta1()}, nil + return &StatefulSetStatusViewer{c.AppsV1()}, nil } return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) } // DeploymentStatusViewer implements the StatusViewer interface. type DeploymentStatusViewer struct { - c clientextensionsv1beta1.DeploymentsGetter + c clientappsv1.DeploymentsGetter } // DaemonSetStatusViewer implements the StatusViewer interface. type DaemonSetStatusViewer struct { - c clientextensionsv1beta1.DaemonSetsGetter + c clientappsv1.DaemonSetsGetter } // StatefulSetStatusViewer implements the StatusViewer interface. type StatefulSetStatusViewer struct { - c clientappsv1beta1.StatefulSetsGetter + c clientappsv1.StatefulSetsGetter } // Status returns a message describing deployment status, and a bool value indicating if the status is considered done. @@ -78,7 +78,7 @@ func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) } } if deployment.Generation <= deployment.Status.ObservedGeneration { - cond := util.GetDeploymentCondition(deployment.Status, extensionsv1beta1.DeploymentProgressing) + cond := util.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing) if cond != nil && cond.Reason == util.TimedOutReason { return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", name) } @@ -104,7 +104,7 @@ func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) ( if err != nil { return "", false, err } - if daemon.Spec.UpdateStrategy.Type != extensionsv1beta1.RollingUpdateDaemonSetStrategyType { + if daemon.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType { return "", true, fmt.Errorf("Status is available only for RollingUpdate strategy type") } if daemon.Generation <= daemon.Status.ObservedGeneration { @@ -128,7 +128,7 @@ func (s *StatefulSetStatusViewer) Status(namespace, name string, revision int64) if sts.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType { return "", true, fmt.Errorf("%s updateStrategy does not have a Status`", apps.OnDeleteStatefulSetStrategyType) } - if sts.Status.ObservedGeneration == nil || sts.Generation > *sts.Status.ObservedGeneration { + if sts.Status.ObservedGeneration == 0 || sts.Generation > sts.Status.ObservedGeneration { return "Waiting for statefulset spec update to be observed...\n", false, nil } if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { diff --git a/pkg/kubectl/rollout_status_test.go b/pkg/kubectl/rollout_status_test.go index 38fd2348f6d..729709a7af3 100644 --- a/pkg/kubectl/rollout_status_test.go +++ b/pkg/kubectl/rollout_status_test.go @@ -20,9 +20,8 @@ import ( "fmt" "testing" - apps "k8s.io/api/apps/v1beta1" + apps "k8s.io/api/apps/v1" api "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" ) @@ -31,14 +30,14 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { tests := []struct { generation int64 specReplicas int32 - status extensions.DeploymentStatus + status apps.DeploymentStatus msg string done bool }{ { generation: 0, specReplicas: 1, - status: extensions.DeploymentStatus{ + status: apps.DeploymentStatus{ ObservedGeneration: 1, Replicas: 1, UpdatedReplicas: 0, @@ -52,7 +51,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { { generation: 1, specReplicas: 1, - status: extensions.DeploymentStatus{ + status: apps.DeploymentStatus{ ObservedGeneration: 1, Replicas: 2, UpdatedReplicas: 1, @@ -66,7 +65,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { { generation: 1, specReplicas: 2, - status: extensions.DeploymentStatus{ + status: apps.DeploymentStatus{ ObservedGeneration: 1, Replicas: 2, UpdatedReplicas: 2, @@ -80,7 +79,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { { generation: 1, specReplicas: 2, - status: extensions.DeploymentStatus{ + status: apps.DeploymentStatus{ ObservedGeneration: 1, Replicas: 2, UpdatedReplicas: 2, @@ -94,7 +93,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { { generation: 2, specReplicas: 2, - status: extensions.DeploymentStatus{ + status: apps.DeploymentStatus{ ObservedGeneration: 1, Replicas: 2, UpdatedReplicas: 2, @@ -108,19 +107,19 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { } for _, test := range tests { - d := &extensions.Deployment{ + d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Namespace: "bar", Name: "foo", UID: "8764ae47-9092-11e4-8393-42010af018ff", Generation: test.generation, }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: &test.specReplicas, }, Status: test.status, } - client := fake.NewSimpleClientset(d).Extensions() + client := fake.NewSimpleClientset(d).Apps() dsv := &DeploymentStatusViewer{c: client} msg, done, err := dsv.Status("bar", "foo", 0) if err != nil { @@ -143,13 +142,13 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { func TestDaemonSetStatusViewerStatus(t *testing.T) { tests := []struct { generation int64 - status extensions.DaemonSetStatus + status apps.DaemonSetStatus msg string done bool }{ { generation: 0, - status: extensions.DaemonSetStatus{ + status: apps.DaemonSetStatus{ ObservedGeneration: 1, UpdatedNumberScheduled: 0, DesiredNumberScheduled: 1, @@ -161,7 +160,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { }, { generation: 1, - status: extensions.DaemonSetStatus{ + status: apps.DaemonSetStatus{ ObservedGeneration: 1, UpdatedNumberScheduled: 2, DesiredNumberScheduled: 2, @@ -173,7 +172,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { }, { generation: 1, - status: extensions.DaemonSetStatus{ + status: apps.DaemonSetStatus{ ObservedGeneration: 1, UpdatedNumberScheduled: 2, DesiredNumberScheduled: 2, @@ -185,7 +184,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { }, { generation: 2, - status: extensions.DaemonSetStatus{ + status: apps.DaemonSetStatus{ ObservedGeneration: 1, UpdatedNumberScheduled: 2, DesiredNumberScheduled: 2, @@ -200,21 +199,21 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { for i := range tests { test := tests[i] t.Logf("testing scenario %d", i) - d := &extensions.DaemonSet{ + d := &apps.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: "bar", Name: "foo", UID: "8764ae47-9092-11e4-8393-42010af018ff", Generation: test.generation, }, - Spec: extensions.DaemonSetSpec{ - UpdateStrategy: extensions.DaemonSetUpdateStrategy{ - Type: extensions.RollingUpdateDaemonSetStrategyType, + Spec: apps.DaemonSetSpec{ + UpdateStrategy: apps.DaemonSetUpdateStrategy{ + Type: apps.RollingUpdateDaemonSetStrategyType, }, }, Status: test.status, } - client := fake.NewSimpleClientset(d).Extensions() + client := fake.NewSimpleClientset(d).Apps() dsv := &DaemonSetStatusViewer{c: client} msg, done, err := dsv.Status("bar", "foo", 0) if err != nil { @@ -249,14 +248,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { generation: 1, strategy: apps.StatefulSetUpdateStrategy{Type: apps.OnDeleteStatefulSetStrategyType}, status: apps.StatefulSetStatus{ - ObservedGeneration: func() *int64 { - generation := int64(1) - return &generation - }(), - Replicas: 0, - ReadyReplicas: 1, - CurrentReplicas: 0, - UpdatedReplicas: 0, + ObservedGeneration: 1, + Replicas: 0, + ReadyReplicas: 1, + CurrentReplicas: 0, + UpdatedReplicas: 0, }, msg: "", @@ -268,14 +264,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { generation: 2, strategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, status: apps.StatefulSetStatus{ - ObservedGeneration: func() *int64 { - generation := int64(1) - return &generation - }(), - Replicas: 3, - ReadyReplicas: 3, - CurrentReplicas: 3, - UpdatedReplicas: 0, + ObservedGeneration: 1, + Replicas: 3, + ReadyReplicas: 3, + CurrentReplicas: 3, + UpdatedReplicas: 0, }, msg: "Waiting for statefulset spec update to be observed...\n", @@ -287,14 +280,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { generation: 1, strategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, status: apps.StatefulSetStatus{ - ObservedGeneration: func() *int64 { - generation := int64(2) - return &generation - }(), - Replicas: 3, - ReadyReplicas: 2, - CurrentReplicas: 3, - UpdatedReplicas: 0, + ObservedGeneration: 2, + Replicas: 3, + ReadyReplicas: 2, + CurrentReplicas: 3, + UpdatedReplicas: 0, }, msg: fmt.Sprintf("Waiting for %d pods to be ready...\n", 1), @@ -310,14 +300,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { return &apps.RollingUpdateStatefulSetStrategy{Partition: &partition} }()}, status: apps.StatefulSetStatus{ - ObservedGeneration: func() *int64 { - generation := int64(2) - return &generation - }(), - Replicas: 3, - ReadyReplicas: 3, - CurrentReplicas: 2, - UpdatedReplicas: 1, + ObservedGeneration: 2, + Replicas: 3, + ReadyReplicas: 3, + CurrentReplicas: 2, + UpdatedReplicas: 1, }, msg: fmt.Sprintf("partitioned roll out complete: %d new pods have been updated...\n", 1), @@ -333,14 +320,11 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { return &apps.RollingUpdateStatefulSetStrategy{Partition: &partition} }()}, status: apps.StatefulSetStatus{ - ObservedGeneration: func() *int64 { - generation := int64(2) - return &generation - }(), - Replicas: 3, - ReadyReplicas: 3, - CurrentReplicas: 3, - UpdatedReplicas: 0, + ObservedGeneration: 2, + Replicas: 3, + ReadyReplicas: 3, + CurrentReplicas: 3, + UpdatedReplicas: 0, }, msg: fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", 0, 1), @@ -352,16 +336,13 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { generation: 1, strategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType}, status: apps.StatefulSetStatus{ - ObservedGeneration: func() *int64 { - generation := int64(2) - return &generation - }(), - Replicas: 3, - ReadyReplicas: 3, - CurrentReplicas: 3, - UpdatedReplicas: 3, - CurrentRevision: "foo", - UpdateRevision: "foo", + ObservedGeneration: 2, + Replicas: 3, + ReadyReplicas: 3, + CurrentReplicas: 3, + UpdatedReplicas: 3, + CurrentRevision: "foo", + UpdateRevision: "foo", }, msg: fmt.Sprintf("statefulset rolling update complete %d pods at revision %s...\n", 3, "foo"), @@ -375,7 +356,7 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { s.Status = test.status s.Spec.UpdateStrategy = test.strategy s.Generation = test.generation - client := fake.NewSimpleClientset(s).AppsV1beta1() + client := fake.NewSimpleClientset(s).AppsV1() dsv := &StatefulSetStatusViewer{c: client} msg, done, err := dsv.Status(s.Namespace, s.Name, 0) if test.err && err == nil { @@ -394,19 +375,19 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { } func TestDaemonSetStatusViewerStatusWithWrongUpdateStrategyType(t *testing.T) { - d := &extensions.DaemonSet{ + d := &apps.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: "bar", Name: "foo", UID: "8764ae47-9092-11e4-8393-42010af018ff", }, - Spec: extensions.DaemonSetSpec{ - UpdateStrategy: extensions.DaemonSetUpdateStrategy{ - Type: extensions.OnDeleteDaemonSetStrategyType, + Spec: apps.DaemonSetSpec{ + UpdateStrategy: apps.DaemonSetUpdateStrategy{ + Type: apps.OnDeleteDaemonSetStrategyType, }, }, } - client := fake.NewSimpleClientset(d).Extensions() + client := fake.NewSimpleClientset(d).Apps() dsv := &DaemonSetStatusViewer{c: client} msg, done, err := dsv.Status("bar", "foo", 0) errMsg := "Status is available only for RollingUpdate strategy type" diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index 4b7da526817..e28ee5404eb 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -32,8 +32,8 @@ go_test( "//pkg/printers:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -86,6 +86,7 @@ go_library( "//pkg/util/slice:go_default_library", "//vendor/github.com/fatih/camelcase:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/apps/v1beta1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index ca414aef62f..14b11b39e65 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -34,7 +34,7 @@ import ( "github.com/fatih/camelcase" - versionedextension "k8s.io/api/extensions/v1beta1" + appsv1 "k8s.io/api/apps/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -3084,7 +3084,7 @@ type DeploymentDescriber struct { } func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (string, error) { - d, err := dd.external.ExtensionsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{}) + d, err := dd.external.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { return "", err } @@ -3105,7 +3105,7 @@ func (dd *DeploymentDescriber) Describe(namespace, name string, describerSetting return describeDeployment(d, selector, internalDeployment, events, dd) } -func describeDeployment(d *versionedextension.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) { +func describeDeployment(d *appsv1.Deployment, selector labels.Selector, internalDeployment *extensions.Deployment, events *api.EventList, dd *DeploymentDescriber) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", d.ObjectMeta.Name) @@ -3129,10 +3129,10 @@ func describeDeployment(d *versionedextension.Deployment, selector labels.Select w.Write(LEVEL_1, "%v \t%v\t%v\n", c.Type, c.Status, c.Reason) } } - oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.ExtensionsV1beta1()) + oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd.external.AppsV1()) if err == nil { w.Write(LEVEL_0, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) - var newRSs []*versionedextension.ReplicaSet + var newRSs []*appsv1.ReplicaSet if newRS != nil { newRSs = append(newRSs, newRS) } @@ -3146,7 +3146,7 @@ func describeDeployment(d *versionedextension.Deployment, selector labels.Select }) } -func printReplicaSetsByLabels(matchingRSs []*versionedextension.ReplicaSet) string { +func printReplicaSetsByLabels(matchingRSs []*appsv1.ReplicaSet) string { // Format the matching ReplicaSets into strings. rsStrings := make([]string, 0, len(matchingRSs)) for _, rs := range matchingRSs { diff --git a/pkg/printers/internalversion/describe_test.go b/pkg/printers/internalversion/describe_test.go index f289b5ac668..37472877e19 100644 --- a/pkg/printers/internalversion/describe_test.go +++ b/pkg/printers/internalversion/describe_test.go @@ -25,8 +25,8 @@ import ( "testing" "time" + appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1345,12 +1345,12 @@ func TestPersistentVolumeClaimDescriber(t *testing.T) { func TestDescribeDeployment(t *testing.T) { fake := fake.NewSimpleClientset() - versionedFake := versionedfake.NewSimpleClientset(&v1beta1.Deployment{ + versionedFake := versionedfake.NewSimpleClientset(&appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, - Spec: v1beta1.DeploymentSpec{ + Spec: appsv1.DeploymentSpec{ Replicas: utilpointer.Int32Ptr(1), Selector: &metav1.LabelSelector{}, Template: v1.PodTemplateSpec{ @@ -1977,12 +1977,12 @@ func TestDescribeEvents(t *testing.T) { }, "DeploymentDescriber": &DeploymentDescriber{ fake.NewSimpleClientset(events), - versionedfake.NewSimpleClientset(&v1beta1.Deployment{ + versionedfake.NewSimpleClientset(&appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, - Spec: v1beta1.DeploymentSpec{ + Spec: appsv1.DeploymentSpec{ Replicas: utilpointer.Int32Ptr(1), Selector: &metav1.LabelSelector{}, }, From a6a5190494e1f53fed22d61e27da4fe9d64e0fff Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 20 Mar 2018 10:06:17 -0700 Subject: [PATCH 105/307] test/e2e: Use apps/v1 Deployment/ReplicaSet. This must be done at the same time as the controller update, since they share code. --- test/e2e/apimachinery/BUILD | 1 + test/e2e/apimachinery/aggregator.go | 14 +- test/e2e/apimachinery/webhook.go | 14 +- test/e2e/apps/BUILD | 1 + test/e2e/apps/deployment.go | 149 +++++++++--------- test/e2e/apps/replica_set.go | 28 ---- test/e2e/auth/BUILD | 1 + test/e2e/auth/audit.go | 18 +-- test/e2e/framework/BUILD | 2 +- test/e2e/framework/deployment_util.go | 66 ++++---- test/e2e/framework/rs_util.go | 21 ++- test/e2e/storage/mounted_volume_resize.go | 6 +- test/e2e/storage/vsphere/BUILD | 2 +- .../vsphere/vsphere_volume_node_poweroff.go | 6 +- test/e2e/upgrades/apps/deployments.go | 16 +- test/utils/deployment.go | 70 ++++---- 16 files changed, 196 insertions(+), 219 deletions(-) diff --git a/test/e2e/apimachinery/BUILD b/test/e2e/apimachinery/BUILD index c965e28b0ac..8d2164afae9 100644 --- a/test/e2e/apimachinery/BUILD +++ b/test/e2e/apimachinery/BUILD @@ -39,6 +39,7 @@ go_library( "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/authorization/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/batch/v1beta1:go_default_library", diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index d7924800bed..bd3a674de0c 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -24,8 +24,8 @@ import ( "strings" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,7 +87,7 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse // delete the APIService first to avoid causing discovery errors _ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil) - _ = client.ExtensionsV1beta1().Deployments(namespace).Delete("sample-apiserver", nil) + _ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil) _ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil) _ = client.CoreV1().Services(namespace).Delete("sample-api", nil) _ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil) @@ -171,14 +171,14 @@ func TestSampleAPIServer(f *framework.Framework, image string) { Image: etcdImage, }, } - d := &extensions.Deployment{ + d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: deploymentName, }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: &replicas, - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RollingUpdateDeploymentStrategyType, + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -192,7 +192,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) { }, }, } - deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d) + deployment, err := client.AppsV1().Deployments(namespace).Create(d) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 5dbda0239c3..2e3582a1afe 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -23,8 +23,8 @@ import ( "time" "k8s.io/api/admissionregistration/v1beta1" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -263,14 +263,14 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert Image: image, }, } - d := &extensions.Deployment{ + d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: deploymentName, }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: &replicas, - Strategy: extensions.DeploymentStrategy{ - Type: extensions.RollingUpdateDeploymentStrategyType, + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -284,7 +284,7 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert }, }, } - deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d) + deployment, err := client.AppsV1().Deployments(namespace).Create(d) framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace) By("Wait for the deployment to be ready") err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image) @@ -976,7 +976,7 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig func cleanWebhookTest(client clientset.Interface, namespaceName string) { _ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil) - _ = client.ExtensionsV1beta1().Deployments(namespaceName).Delete(deploymentName, nil) + _ = client.AppsV1().Deployments(namespaceName).Delete(deploymentName, nil) _ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil) _ = client.RbacV1beta1().RoleBindings("kube-system").Delete(roleBindingName, nil) } diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 66310531f3f..593b05aae77 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -25,6 +25,7 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index a881f4747d6..8ffd6bf74a0 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -25,6 +25,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -36,7 +37,7 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" - extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" + appsinternal "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/kubectl" @@ -51,7 +52,7 @@ const ( ) var ( - nilRs *extensions.ReplicaSet + nilRs *apps.ReplicaSet ) var _ = SIGDescribe("Deployment", func() { @@ -101,7 +102,7 @@ var _ = SIGDescribe("Deployment", func() { }) func failureTrap(c clientset.Interface, ns string) { - deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) return @@ -110,7 +111,7 @@ func failureTrap(c clientset.Interface, ns string) { d := deployments.Items[i] framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d)) - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1()) + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1()) if err != nil { framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) return @@ -127,7 +128,7 @@ func failureTrap(c clientset.Interface, ns string) { return } framework.Logf("Log out all the ReplicaSets if there is no deployment created") - rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) return @@ -160,11 +161,11 @@ func newDeploymentRollback(name string, annotations map[string]string, revision } func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, deploymentName string) { - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) framework.Logf("Deleting deployment %s", deploymentName) - reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient, scaleClient) + reaper, err := kubectl.ReaperFor(appsinternal.Kind("Deployment"), internalClient, scaleClient) Expect(err).NotTo(HaveOccurred()) timeout := 1 * time.Minute @@ -172,14 +173,14 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte Expect(err).NotTo(HaveOccurred()) framework.Logf("Ensuring deployment %s was deleted", deploymentName) - _, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) Expect(err).To(HaveOccurred()) Expect(errors.IsNotFound(err)).To(BeTrue()) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) Expect(err).NotTo(HaveOccurred()) options := metav1.ListOptions{LabelSelector: selector.String()} - rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options) + rss, err := c.AppsV1().ReplicaSets(ns).List(options) Expect(err).NotTo(HaveOccurred()) Expect(rss.Items).Should(HaveLen(0)) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) @@ -208,9 +209,9 @@ func testDeleteDeployment(f *framework.Framework) { podLabels := map[string]string{"name": NginxImageName} replicas := int32(1) framework.Logf("Creating simple deployment %s", deploymentName) - d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) + d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} - deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + deploy, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 1 @@ -220,9 +221,9 @@ func testDeleteDeployment(f *framework.Framework) { err = framework.WaitForDeploymentComplete(c, deploy) Expect(err).NotTo(HaveOccurred()) - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) Expect(err).NotTo(HaveOccurred()) Expect(newRS).NotTo(Equal(nilRs)) stopDeployment(c, internalClient, f.ScalesGetter, ns, deploymentName) @@ -243,10 +244,10 @@ func testRollingUpdateDeployment(f *framework.Framework) { rsRevision := "3546343826724305832" annotations := make(map[string]string) annotations[deploymentutil.RevisionAnnotation] = rsRevision - rs := newExtensionsRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage) + rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage) rs.Annotations = annotations framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) - _, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs) + _, err := c.AppsV1().ReplicaSets(ns).Create(rs) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) @@ -255,8 +256,8 @@ func testRollingUpdateDeployment(f *framework.Framework) { // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-rolling-update-deployment" framework.Logf("Creating deployment %q", deploymentName) - d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType) - deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) + deploy, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 3546343826724305833. @@ -270,9 +271,9 @@ func testRollingUpdateDeployment(f *framework.Framework) { // There should be 1 old RS (nginx-controller, which is adopted) framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1()) + _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) Expect(err).NotTo(HaveOccurred()) Expect(len(allOldRSs)).Should(Equal(1)) } @@ -284,8 +285,8 @@ func testRecreateDeployment(f *framework.Framework) { // Create a deployment that brings up redis pods. deploymentName := "test-recreate-deployment" framework.Logf("Creating deployment %q", deploymentName) - d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType) - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType) + deployment, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 1 @@ -298,7 +299,7 @@ func testRecreateDeployment(f *framework.Framework) { // Update deployment to delete redis pods and bring up nginx pods. framework.Logf("Triggering a new rollout for deployment %q", deploymentName) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = NginxImageName update.Spec.Template.Spec.Containers[0].Image = NginxImage }) @@ -321,7 +322,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { rsName := "test-cleanup-controller" replicas := int32(1) revisionHistoryLimit := utilpointer.Int32Ptr(0) - _, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newExtensionsRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)) + _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. @@ -368,9 +369,9 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { } } }() - d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType) + d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType) d.Spec.RevisionHistoryLimit = revisionHistoryLimit - _, err = c.ExtensionsV1beta1().Deployments(ns).Create(d) + _, err = c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) @@ -392,7 +393,7 @@ func testRolloverDeployment(f *framework.Framework) { rsName := "test-rollover-controller" rsReplicas := int32(1) - _, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newExtensionsRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)) + _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas) @@ -407,19 +408,19 @@ func testRolloverDeployment(f *framework.Framework) { deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave" deploymentReplicas := int32(1) deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent" - deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType + deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType framework.Logf("Creating deployment %q", deploymentName) newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) - newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ + newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{ MaxUnavailable: intOrStrP(0), MaxSurge: intOrStrP(1), } newDeployment.Spec.MinReadySeconds = int32(10) - _, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment) + _, err = c.AppsV1().Deployments(ns).Create(newDeployment) Expect(err).NotTo(HaveOccurred()) // Verify that the pods were scaled up and down as expected. - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 @@ -430,17 +431,17 @@ func testRolloverDeployment(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) framework.Logf("Ensure that both replica sets have 1 created replica") - oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) + oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) ensureReplicas(oldRS, int32(1)) - newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) Expect(err).NotTo(HaveOccurred()) ensureReplicas(newRS, int32(1)) // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods. framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage }) @@ -461,16 +462,16 @@ func testRolloverDeployment(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) framework.Logf("Ensure that both old replica sets have no replicas") - oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) + oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) ensureReplicas(oldRS, int32(0)) // Not really the new replica set anymore but we GET by name so that's fine. - newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{}) + newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) ensureReplicas(newRS, int32(0)) } -func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) { +func ensureReplicas(rs *apps.ReplicaSet, replicas int32) { Expect(*rs.Spec.Replicas).Should(Equal(replicas)) Expect(rs.Status.Replicas).Should(Equal(replicas)) } @@ -490,12 +491,12 @@ func testRollbackDeployment(f *framework.Framework) { deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName deploymentReplicas := int32(1) deploymentImage := NginxImage - deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType + deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType framework.Logf("Creating deployment %s", deploymentName) d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) createAnnotation := map[string]string{"action": "create", "author": "node"} d.Annotations = createAnnotation - deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + deploy, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) // Wait for it to be updated to revision 1 @@ -513,7 +514,7 @@ func testRollbackDeployment(f *framework.Framework) { updatedDeploymentImage := RedisImage updatedDeploymentImageName := RedisImageName updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"} - deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage update.Annotations = updateAnnotation @@ -616,7 +617,7 @@ func testRollbackDeployment(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) } -func randomScale(d *extensions.Deployment, i int) { +func randomScale(d *apps.Deployment, i int) { switch r := rand.Float32(); { case r < 0.3: framework.Logf("%02d: scaling up", i) @@ -641,12 +642,12 @@ func testIterativeDeployments(f *framework.Framework) { // Create a nginx deployment. deploymentName := "nginx" thirty := int32(30) - d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) + d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) d.Spec.ProgressDeadlineSeconds = &thirty d.Spec.RevisionHistoryLimit = &two d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + deployment, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) iterations := 20 @@ -659,7 +660,7 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.2: // trigger a new deployment framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) randomScale(update, i) @@ -669,16 +670,18 @@ func testIterativeDeployments(f *framework.Framework) { case n < 0.4: // rollback to the previous version framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { - rollbackTo := &extensions.RollbackConfig{Revision: 0} - update.Spec.RollbackTo = rollbackTo + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { + if update.Annotations == nil { + update.Annotations = make(map[string]string) + } + update.Annotations[apps.DeprecatedRollbackTo] = "0" }) Expect(err).NotTo(HaveOccurred()) case n < 0.6: // just scaling framework.Logf("%02d: scaling deployment %q", i, deployment.Name) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { randomScale(update, i) }) Expect(err).NotTo(HaveOccurred()) @@ -687,14 +690,14 @@ func testIterativeDeployments(f *framework.Framework) { // toggling the deployment if deployment.Spec.Paused { framework.Logf("%02d: pausing deployment %q", i, deployment.Name) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = true randomScale(update, i) }) Expect(err).NotTo(HaveOccurred()) } else { framework.Logf("%02d: resuming deployment %q", i, deployment.Name) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false randomScale(update, i) }) @@ -728,10 +731,10 @@ func testIterativeDeployments(f *framework.Framework) { } // unpause the deployment if we end up pausing it - deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) if deployment.Spec.Paused { - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Paused = false }) } @@ -743,7 +746,7 @@ func testIterativeDeployments(f *framework.Framework) { Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) framework.Logf("Checking deployment %q for a complete condition", deploymentName) - Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred()) + Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(HaveOccurred()) } func testDeploymentsControllerRef(f *framework.Framework) { @@ -754,8 +757,8 @@ func testDeploymentsControllerRef(f *framework.Framework) { framework.Logf("Creating Deployment %q", deploymentName) podLabels := map[string]string{"name": NginxImageName} replicas := int32(1) - d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) - deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) + deploy, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForDeploymentComplete(c, deploy) Expect(err).NotTo(HaveOccurred()) @@ -781,8 +784,8 @@ func testDeploymentsControllerRef(f *framework.Framework) { deploymentName = "test-adopt-deployment" framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) - d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) - deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d) + d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) + deploy, err = c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForDeploymentComplete(c, deploy) Expect(err).NotTo(HaveOccurred()) @@ -811,13 +814,13 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Create a nginx deployment. deploymentName := "nginx-deployment" - d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) - d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment) + d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType) + d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment) d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + deployment, err := c.AppsV1().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) framework.Logf("Waiting for observed generation %d", deployment.Generation) @@ -831,13 +834,13 @@ func testProportionalScalingDeployment(f *framework.Framework) { framework.Logf("Waiting for deployment %q to complete", deployment.Name) Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) - firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) Expect(err).NotTo(HaveOccurred()) // Update the deployment with a non-existent image so that the new replica set // will be blocked to simulate a partial rollout. framework.Logf("Updating deployment %q with a non-existent image", deploymentName) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = "nginx:404" }) Expect(err).NotTo(HaveOccurred()) @@ -860,13 +863,13 @@ func testProportionalScalingDeployment(f *framework.Framework) { // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) - firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS) + err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) Expect(err).NotTo(HaveOccurred()) // Checking state of second rollout's replicaset. - secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) Expect(err).NotTo(HaveOccurred()) maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false) @@ -883,9 +886,9 @@ func testProportionalScalingDeployment(f *framework.Framework) { // The desired replicas wait makes sure that the RS controller has created expected number of pods. framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) - secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) + secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS) + err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) Expect(err).NotTo(HaveOccurred()) // Check the deployment's minimum availability. @@ -897,15 +900,15 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Scale the deployment to 30 replicas. newReplicas = int32(30) framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) Expect(err).NotTo(HaveOccurred()) framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) - firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) + secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. @@ -943,18 +946,18 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m } } -func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList { +func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *apps.ReplicaSetList { selector := labels.Set(label).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} - rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options) + rsList, err := c.AppsV1().ReplicaSets(ns).List(options) Expect(err).NotTo(HaveOccurred()) Expect(len(rsList.Items)).To(BeNumerically(">", 0)) return rsList } -func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error { +func orphanDeploymentReplicaSets(c clientset.Interface, d *apps.Deployment) error { trueVar := true deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar} deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) - return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions) + return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions) } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 53b1075173e..a8c23ea7401 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -22,7 +22,6 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -66,33 +65,6 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa } } -// TODO(#55714): Remove this when Deployment tests use apps/v1 ReplicaSet. -func newExtensionsRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet { - zero := int64(0) - return &extensions.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: rsName, - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: &replicas, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: rsPodLabels, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &zero, - Containers: []v1.Container{ - { - Name: imageName, - Image: image, - }, - }, - }, - }, - }, - } -} - func newPodQuota(name, number string) *v1.ResourceQuota { return &v1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/auth/BUILD b/test/e2e/auth/BUILD index f16056db11f..9e52a84464e 100644 --- a/test/e2e/auth/BUILD +++ b/test/e2e/auth/BUILD @@ -29,6 +29,7 @@ go_library( "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/batch/v1:go_default_library", "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 80a4bbc2889..ceafb65762c 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -23,8 +23,8 @@ import ( "strings" "time" + apps "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apiextensions-apiserver/test/integration/testserver" @@ -222,29 +222,29 @@ var _ = SIGDescribe("Advanced Audit", func() { { func() { podLabels := map[string]string{"name": "audit-deployment-pod"} - d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), extensions.RecreateDeploymentStrategyType) + d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType) - _, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Create(d) + _, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d) framework.ExpectNoError(err, "failed to create audit-deployment") - _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Get(d.Name, metav1.GetOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Get(d.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-deployment") - deploymentChan, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Watch(watchOptions) + deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(watchOptions) framework.ExpectNoError(err, "failed to create watch for deployments") for range deploymentChan.ResultChan() { } - _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Update(d) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Update(d) framework.ExpectNoError(err, "failed to update audit-deployment") - _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch) + _, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch) framework.ExpectNoError(err, "failed to patch deployment") - _, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).List(metav1.ListOptions{}) + _, err = f.ClientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{}) framework.ExpectNoError(err, "failed to create list deployments") - err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{}) + err = f.ClientSet.AppsV1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete deployments") }, []auditEvent{ diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index f0b7207558f..49970164564 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -141,9 +141,9 @@ go_library( "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/restmapper:go_default_library", diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index bd9b50c1bb9..89695d28d6c 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -22,36 +22,36 @@ import ( . "github.com/onsi/ginkgo" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" - extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" + appsinternal "k8s.io/kubernetes/pkg/apis/apps" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" testutils "k8s.io/kubernetes/test/utils" ) -func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) { +func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) { return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout) } // Waits for the deployment to clean up old rcs. func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { - var oldRSs []*extensions.ReplicaSet - var d *extensions.Deployment + var oldRSs []*apps.ReplicaSet + var d *apps.Deployment pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return false, err } d = deployment - _, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1()) + _, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) if err != nil { return false, err } @@ -64,7 +64,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string return pollErr } -func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) { +func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) { testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf) } @@ -72,7 +72,7 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration) } -func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error { +func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error { return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout) } @@ -83,16 +83,16 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout) } -func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment { +func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment { zero := int64(0) - return &extensions.Deployment{ + return &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: deploymentName, }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: &replicas, Selector: &metav1.LabelSelector{MatchLabels: podLabels}, - Strategy: extensions.DeploymentStrategy{ + Strategy: apps.DeploymentStrategy{ Type: strategyType, }, Template: v1.PodTemplateSpec{ @@ -116,13 +116,13 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s // Waits for the deployment to complete, and don't check if rolling update strategy is broken. // Rolling update strategy is used only during a rolling update, and can be violated in other situations, // such as shortly after a scaling event or the deployment is just created. -func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error { +func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error { return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout) } // Waits for the deployment to complete, and check rolling update strategy isn't broken at any times. // Rolling update strategy should not be broken during a rolling update. -func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error { +func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error { return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout) } @@ -139,12 +139,12 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName // WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with // old pods. -func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error { - if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType { +func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error { + if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType { return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type) } - w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion})) + w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion})) if err != nil { return err } @@ -152,12 +152,12 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er status := d.Status condition := func(event watch.Event) (bool, error) { - d := event.Object.(*extensions.Deployment) + d := event.Object.(*apps.Deployment) status = d.Status if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas { - _, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1()) - newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1()) + _, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1()) + newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1()) if err == nil && nerr == nil { Logf("%+v", d) logReplicaSetsOfDeployment(d, allOldRSs, newRS) @@ -179,7 +179,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er } func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { - return ScaleResource(clientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments")) + return ScaleResource(clientset, scalesGetter, ns, name, size, wait, appsinternal.Kind("Deployment"), appsinternal.Resource("deployments")) } func RunDeployment(config testutils.DeploymentConfig) error { @@ -189,13 +189,13 @@ func RunDeployment(config testutils.DeploymentConfig) error { return testutils.RunDeployment(config) } -func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) { +func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) { testutils.LogPodsOfDeployment(c, deployment, rsList, Logf) } -func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error { +func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error { err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) { - deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -213,9 +213,9 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image) } -func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) { +func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) { deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command) - deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec) + deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec) if err != nil { return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) } @@ -229,18 +229,18 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[ // MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's // name. A slice of BASH commands can be supplied as args to be run by the pod -func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment { +func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment { if len(command) == 0 { command = "while true; do sleep 1; done" } zero := int64(0) deploymentName := "deployment-" + string(uuid.NewUUID()) - deploymentSpec := &extensions.Deployment{ + deploymentSpec := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: deploymentName, Namespace: namespace, }, - Spec: extensions.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: &replicas, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -280,8 +280,8 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma } // GetPodsForDeployment gets pods for the given deployment -func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Deployment) (*v1.PodList, error) { - replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.ExtensionsV1beta1()) +func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) { + replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1()) if err != nil { return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err) } @@ -291,7 +291,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { return client.CoreV1().Pods(namespace).List(options) } - rsList := []*extensions.ReplicaSet{replicaSet} + rsList := []*apps.ReplicaSet{replicaSet} podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) if err != nil { return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err) diff --git a/test/e2e/framework/rs_util.go b/test/e2e/framework/rs_util.go index d8ae052a7ed..f206dc89fca 100644 --- a/test/e2e/framework/rs_util.go +++ b/test/e2e/framework/rs_util.go @@ -23,16 +23,15 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" testutils "k8s.io/kubernetes/test/utils" ) -type updateRsFunc func(d *extensions.ReplicaSet) +type updateRsFunc func(d *apps.ReplicaSet) func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) { return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout) @@ -40,11 +39,11 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, // CheckNewRSAnnotations check if the new RS's annotation is as expected func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error { - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return err } - newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) if err != nil { return err } @@ -60,7 +59,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp // WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready. func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) { - rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) if err != nil { return false, err } @@ -73,7 +72,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { } // WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. -func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error { +func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error { desiredGeneration := replicaSet.Generation err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) { rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) @@ -89,10 +88,10 @@ func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGette } // WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum -func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error { +func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error { desiredGeneration := replicaSet.Generation err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) { - rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -105,10 +104,10 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *exte } // WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum -func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error { +func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error { desiredGeneration := replicaSet.Generation err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) { - rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 895e561cfde..040ca9b3ffe 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -21,8 +21,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume By("Creating a deployment with the provisioned volume") deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") - defer c.ExtensionsV1beta1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) + defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) By("Expanding current pvc") newSize := resource.MustParse("6Gi") @@ -152,7 +152,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume }) }) -func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *extensions.Deployment) (v1.Pod, error) { +func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) { var runningPod v1.Pod waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { podList, err := framework.GetPodsForDeployment(client, deployment) diff --git a/test/e2e/storage/vsphere/BUILD b/test/e2e/storage/vsphere/BUILD index 54b1afeabc7..a910c256eb1 100644 --- a/test/e2e/storage/vsphere/BUILD +++ b/test/e2e/storage/vsphere/BUILD @@ -53,8 +53,8 @@ go_library( "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index e9788b3ab2a..4edd5e3708f 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -26,8 +26,8 @@ import ( "github.com/vmware/govmomi/object" vimtypes "github.com/vmware/govmomi/vim25/types" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -140,7 +140,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", }) // Wait until the pod failed over to a different node, or time out after 3 minutes -func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode string) (string, error) { +func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) { var ( err error newNode string @@ -175,7 +175,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep } // getNodeForDeployment returns node name for the Deployment -func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) { +func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) { podList, err := framework.GetPodsForDeployment(client, deployment) if err != nil { return "", err diff --git a/test/e2e/upgrades/apps/deployments.go b/test/e2e/upgrades/apps/deployments.go index 9063f16a97c..d31e3258f5f 100644 --- a/test/e2e/upgrades/apps/deployments.go +++ b/test/e2e/upgrades/apps/deployments.go @@ -19,7 +19,7 @@ package upgrades import ( "fmt" - extensions "k8s.io/api/extensions/v1beta1" + apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -54,11 +54,11 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim) ns := f.Namespace.Name - deploymentClient := c.ExtensionsV1beta1().Deployments(ns) - rsClient := c.ExtensionsV1beta1().ReplicaSets(ns) + deploymentClient := c.AppsV1().Deployments(ns) + rsClient := c.AppsV1().ReplicaSets(ns) By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns)) - d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, extensions.RollingUpdateDeploymentStrategyType) + d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType) deployment, err := deploymentClient.Create(d) framework.ExpectNoError(err) @@ -81,7 +81,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) { // Trigger a new rollout so that we have some history. By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName)) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = "updated-name" }) framework.ExpectNoError(err) @@ -121,8 +121,8 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ c := f.ClientSet ns := f.Namespace.Name - deploymentClient := c.ExtensionsV1beta1().Deployments(ns) - rsClient := c.ExtensionsV1beta1().ReplicaSets(ns) + deploymentClient := c.AppsV1().Deployments(ns) + rsClient := c.AppsV1().ReplicaSets(ns) deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -157,7 +157,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ // Verify the upgraded deployment is active by scaling up the deployment by 1 By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName)) - _, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *extensions.Deployment) { + _, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) { *deployment.Spec.Replicas = *deployment.Spec.Replicas + 1 }) framework.ExpectNoError(err) diff --git a/test/utils/deployment.go b/test/utils/deployment.go index 995186dc164..323b927d061 100644 --- a/test/utils/deployment.go +++ b/test/utils/deployment.go @@ -22,8 +22,8 @@ import ( "github.com/davecgh/go-spew/spew" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -34,7 +34,7 @@ import ( type LogfFn func(format string, args ...interface{}) -func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) { +func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, logf LogfFn) { if newRS != nil { logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS)) } else { @@ -48,7 +48,7 @@ func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []* } } -func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) { +func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet, logf LogfFn) { minReadySeconds := deployment.Spec.MinReadySeconds podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { return c.CoreV1().Pods(namespace).List(options) @@ -72,15 +72,15 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deploymen // If during a rolling update (rolling == true), returns an error if the deployment's // rolling update strategy (max unavailable or max surge) is broken at any times. // It's not seen as a rolling update if shortly after a scaling event or the deployment is just created. -func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *extensions.Deployment, rolling bool, logf LogfFn, pollInterval, pollTimeout time.Duration) error { +func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.Deployment, rolling bool, logf LogfFn, pollInterval, pollTimeout time.Duration) error { var ( - deployment *extensions.Deployment + deployment *apps.Deployment reason string ) err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - deployment, err = c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -114,9 +114,9 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *extens return nil } -func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Deployment, logf LogfFn) (string, error) { +func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment, logf LogfFn) (string, error) { var reason string - oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1()) + oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1()) if err != nil { return "", err } @@ -128,7 +128,7 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Depl allRSs := append(oldRSs, newRS) // The old/new ReplicaSets need to contain the pod-template-hash label for i := range allRSs { - if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { + if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, apps.DefaultDeploymentUniqueLabelKey) { reason = "all replica sets need to contain the pod-template-hash label" return reason, nil } @@ -153,7 +153,7 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *extensions.Depl // Waits for the deployment to complete, and check rolling update strategy isn't broken at any times. // Rolling update strategy should not be broken during a rolling update. -func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error { +func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error { rolling := true return waitForDeploymentCompleteMaybeCheckRolling(c, d, rolling, logf, pollInterval, pollTimeout) } @@ -161,7 +161,7 @@ func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensio // Waits for the deployment to complete, and don't check if rolling update strategy is broken. // Rolling update strategy is used only during a rolling update, and can be violated in other situations, // such as shortly after a scaling event or the deployment is just created. -func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error { +func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error { rolling := false return waitForDeploymentCompleteMaybeCheckRolling(c, d, rolling, logf, pollInterval, pollTimeout) } @@ -169,17 +169,17 @@ func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment, // WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. // Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error { - var deployment *extensions.Deployment - var newRS *extensions.ReplicaSet + var deployment *apps.Deployment + var newRS *apps.ReplicaSet var reason string err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err = c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return false, err } // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label - newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) if err != nil { return false, err } @@ -205,26 +205,26 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName // CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected. func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error { - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("unable to get deployment %s during revision check: %v", deploymentName, err) } // Check revision of the new replica set of this deployment - newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) if err != nil { return fmt.Errorf("unable to get new replicaset of deployment %s during revision check: %v", deploymentName, err) } return checkRevisionAndImage(deployment, newRS, revision, image) } -func checkRevisionAndImage(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, revision, image string) error { +func checkRevisionAndImage(deployment *apps.Deployment, newRS *apps.ReplicaSet, revision, image string) error { // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label if newRS == nil { return fmt.Errorf("new replicaset for deployment %q is yet to be created", deployment.Name) } - if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { - return fmt.Errorf("new replica set %q doesn't have %q label selector", newRS.Name, extensions.DefaultDeploymentUniqueLabelKey) + if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey) { + return fmt.Errorf("new replica set %q doesn't have %q label selector", newRS.Name, apps.DefaultDeploymentUniqueLabelKey) } // Check revision of this deployment, and of the new replica set of this deployment if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision { @@ -252,19 +252,19 @@ func containsImage(containers []v1.Container, imageName string) bool { return false } -type UpdateDeploymentFunc func(d *extensions.Deployment) +type UpdateDeploymentFunc func(d *apps.Deployment) -func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.Deployment, error) { - var deployment *extensions.Deployment +func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateDeploymentFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*apps.Deployment, error) { + var deployment *apps.Deployment var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if deployment, err = c.ExtensionsV1beta1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil { + if deployment, err = c.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(deployment) - if deployment, err = c.ExtensionsV1beta1().Deployments(namespace).Update(deployment); err == nil { + if deployment, err = c.AppsV1().Deployments(namespace).Update(deployment); err == nil { logf("Updating deployment %s", name) return true, nil } @@ -278,20 +278,20 @@ func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, } func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { - return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { - return c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + return deploymentutil.WaitForObservedDeployment(func() (*apps.Deployment, error) { + return c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) }, desiredGeneration, 2*time.Second, 1*time.Minute) } // WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback. func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error { err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return false, err } // Rollback not set or is kicked off - if deployment.Spec.RollbackTo == nil { + if deployment.Annotations[apps.DeprecatedRollbackTo] == "" { return true, nil } return false, nil @@ -304,9 +304,9 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName // WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error { - var deployment *extensions.Deployment + var deployment *apps.Deployment err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -319,10 +319,10 @@ func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentNa return nil } -func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error { - var deployment *extensions.Deployment +func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType, logf LogfFn, pollInterval, pollTimeout time.Duration) error { + var deployment *apps.Deployment pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) if err != nil { return false, err } @@ -332,7 +332,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r }) if pollErr == wait.ErrWaitTimeout { pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason, latest deployment conditions: %+v", deployment.Name, deployment.Status.Conditions) - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1()) + _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1()) if err == nil { LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf) LogPodsOfDeployment(c, deployment, append(allOldRSs, newRS), logf) From 680bc4b4b5d266c7c0346a0bd23918fa355c3644 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Tue, 20 Mar 2018 10:20:43 -0700 Subject: [PATCH 106/307] test/integration: Use apps/v1 Deployment/ReplicaSet. This must be done at the same time as the controller update, since they share code. --- test/integration/deployment/BUILD | 3 +- .../integration/deployment/deployment_test.go | 169 ++++++++++-------- test/integration/deployment/util.go | 77 ++++---- test/utils/replicaset.go | 52 +----- 4 files changed, 143 insertions(+), 158 deletions(-) diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD index fee88d65330..c4f0fed4e53 100644 --- a/test/integration/deployment/BUILD +++ b/test/integration/deployment/BUILD @@ -19,8 +19,8 @@ go_test( "//pkg/controller/deployment/util:go_default_library", "//pkg/util/pointer:go_default_library", "//test/integration/framework:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", @@ -41,6 +41,7 @@ go_library( "//pkg/util/metrics:go_default_library", "//test/integration/framework:go_default_library", "//test/utils:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/test/integration/deployment/deployment_test.go b/test/integration/deployment/deployment_test.go index 406259886bd..7f475660923 100644 --- a/test/integration/deployment/deployment_test.go +++ b/test/integration/deployment/deployment_test.go @@ -22,8 +22,8 @@ import ( "strings" "testing" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" @@ -47,7 +47,7 @@ func TestNewDeployment(t *testing.T) { tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -128,14 +128,14 @@ func TestDeploymentRollingUpdate(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} tester.deployment.Spec.MinReadySeconds = 4 quarter := intstr.FromString("25%") - tester.deployment.Spec.Strategy.RollingUpdate = &v1beta1.RollingUpdateDeployment{ + tester.deployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{ MaxUnavailable: &quarter, MaxSurge: &quarter, } // Create a deployment. var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -152,7 +152,7 @@ func TestDeploymentRollingUpdate(t *testing.T) { if oriImage == image { t.Fatalf("bad test setup, deployment %s roll out with the same image", tester.deployment.Name) } - imageFn := func(update *v1beta1.Deployment) { + imageFn := func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = image } tester.deployment, err = tester.updateDeployment(imageFn) @@ -186,7 +186,7 @@ func TestDeploymentRollingUpdate(t *testing.T) { // 3. Roll over a deployment before the previous rolling update finishes. image = "dont-finish" - imageFn = func(update *v1beta1.Deployment) { + imageFn = func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = image } tester.deployment, err = tester.updateDeployment(imageFn) @@ -199,7 +199,7 @@ func TestDeploymentRollingUpdate(t *testing.T) { // We don't mark pods as ready so that rollout won't finish. // Before the rollout finishes, trigger another rollout. image = "rollover" - imageFn = func(update *v1beta1.Deployment) { + imageFn = func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = image } tester.deployment, err = tester.updateDeployment(imageFn) @@ -212,7 +212,7 @@ func TestDeploymentRollingUpdate(t *testing.T) { if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil { t.Fatal(err) } - _, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1()) + _, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err) } @@ -232,13 +232,18 @@ func TestDeploymentSelectorImmutability(t *testing.T) { defer framework.DeleteTestingNamespace(ns, s, t) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, int32(20))} - deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + var err error + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { - t.Fatalf("failed to create extensions/v1beta1 deployment %s: %v", tester.deployment.Name, err) + t.Fatalf("failed to create apps/v1 deployment %s: %v", tester.deployment.Name, err) } // test to ensure extensions/v1beta1 selector is mutable newSelectorLabels := map[string]string{"name_extensions_v1beta1": "test_extensions_v1beta1"} + deploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get extensions/v1beta deployment %s: %v", name, err) + } deploymentExtensionsV1beta1.Spec.Selector.MatchLabels = newSelectorLabels deploymentExtensionsV1beta1.Spec.Template.Labels = newSelectorLabels updatedDeploymentExtensionsV1beta1, err := c.ExtensionsV1beta1().Deployments(ns.Name).Update(deploymentExtensionsV1beta1) @@ -283,6 +288,22 @@ func TestDeploymentSelectorImmutability(t *testing.T) { if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) { t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error()) } + + // test to ensure apps/v1 selector is immutable + deploymentAppsV1, err := c.AppsV1().Deployments(ns.Name).Get(updatedDeploymentAppsV1beta1.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get apps/v1 deployment %s: %v", updatedDeploymentAppsV1beta1.Name, err) + } + newSelectorLabels = map[string]string{"name_apps_v1": "test_apps_v1"} + deploymentAppsV1.Spec.Selector.MatchLabels = newSelectorLabels + deploymentAppsV1.Spec.Template.Labels = newSelectorLabels + _, err = c.AppsV1().Deployments(ns.Name).Update(deploymentAppsV1) + if err == nil { + t.Fatalf("failed to provide validation error when changing immutable selector when updating apps/v1 deployment %s", deploymentAppsV1.Name) + } + if !strings.Contains(err.Error(), expectedErrType) || !strings.Contains(err.Error(), expectedErrDetail) { + t.Errorf("error message does not match, expected type: %s, expected detail: %s, got: %s", expectedErrType, expectedErrDetail, err.Error()) + } } // Paused deployment should not start new rollout @@ -300,7 +321,7 @@ func TestPausedDeployment(t *testing.T) { tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -358,7 +379,7 @@ func TestPausedDeployment(t *testing.T) { // Update the deployment template newTGPS := int64(0) - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Template.Spec.TerminationGracePeriodSeconds = &newTGPS }) if err != nil { @@ -375,7 +396,7 @@ func TestPausedDeployment(t *testing.T) { t.Fatal(err) } - _, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1()) + _, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err) } @@ -401,7 +422,7 @@ func TestScalePausedDeployment(t *testing.T) { tester.deployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -442,7 +463,7 @@ func TestScalePausedDeployment(t *testing.T) { // Scale the paused deployment. newReplicas := int32(10) - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) if err != nil { @@ -482,7 +503,7 @@ func TestDeploymentHashCollision(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -500,14 +521,14 @@ func TestDeploymentHashCollision(t *testing.T) { } // Mock a hash collision - newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed getting new replicaset of deployment %s: %v", tester.deployment.Name, err) } if newRS == nil { t.Fatalf("unable to find new replicaset of deployment %s", tester.deployment.Name) } - _, err = tester.updateReplicaSet(newRS.Name, func(update *v1beta1.ReplicaSet) { + _, err = tester.updateReplicaSet(newRS.Name, func(update *apps.ReplicaSet) { *update.Spec.Template.Spec.TerminationGracePeriodSeconds = int64(5) }) if err != nil { @@ -516,7 +537,7 @@ func TestDeploymentHashCollision(t *testing.T) { // Expect deployment collision counter to increment if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - d, err := c.ExtensionsV1beta1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{}) + d, err := c.AppsV1().Deployments(ns.Name).Get(tester.deployment.Name, metav1.GetOptions{}) if err != nil { return false, nil } @@ -546,7 +567,7 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) { rs.Annotations = make(map[string]string) rs.Annotations["make"] = "difference" rs.Spec.Template.Spec.Containers[0].Image = "different-image" - _, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Create(rs) + _, err := c.AppsV1().ReplicaSets(ns.Name).Create(rs) if err != nil { t.Fatalf("failed to create replicaset %s: %v", rsName, err) } @@ -554,9 +575,13 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image + // Set absolute rollout limits (defaults changed to percentages) + max := intstr.FromInt(1) + tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = &max + tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = &max // Create a deployment which have different template than the replica set created above. - if tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment); err != nil { + if tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment); err != nil { t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) } @@ -595,7 +620,7 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) { // 2. Update the deployment to revision 2. updatedImage := "update" - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Name = updatedImage update.Spec.Template.Spec.Containers[0].Image = updatedImage }) @@ -644,10 +669,10 @@ func TestRollbackDeploymentRSNoRevision(t *testing.T) { } } -func checkRSHashLabels(rs *v1beta1.ReplicaSet) (string, error) { - hash := rs.Labels[v1beta1.DefaultDeploymentUniqueLabelKey] - selectorHash := rs.Spec.Selector.MatchLabels[v1beta1.DefaultDeploymentUniqueLabelKey] - templateLabelHash := rs.Spec.Template.Labels[v1beta1.DefaultDeploymentUniqueLabelKey] +func checkRSHashLabels(rs *apps.ReplicaSet) (string, error) { + hash := rs.Labels[apps.DefaultDeploymentUniqueLabelKey] + selectorHash := rs.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] + templateLabelHash := rs.Spec.Template.Labels[apps.DefaultDeploymentUniqueLabelKey] if hash != selectorHash || selectorHash != templateLabelHash { return "", fmt.Errorf("mismatching hash value found in replicaset %s: %#v", rs.Name, rs) @@ -665,7 +690,7 @@ func checkPodsHashLabel(pods *v1.PodList) (string, error) { } var hash string for _, pod := range pods.Items { - podHash := pod.Labels[v1beta1.DefaultDeploymentUniqueLabelKey] + podHash := pod.Labels[apps.DefaultDeploymentUniqueLabelKey] if len(podHash) == 0 { return "", fmt.Errorf("found pod %s missing pod-template-hash label: %#v", pod.Name, pods) } @@ -694,7 +719,7 @@ func TestFailedDeployment(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} tester.deployment.Spec.ProgressDeadlineSeconds = &three var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -712,7 +737,7 @@ func TestFailedDeployment(t *testing.T) { // Pods are not marked as Ready, therefore the deployment progress will eventually timeout after progressDeadlineSeconds has passed. // Wait for the deployment to have a progress timeout condition. - if err = tester.waitForDeploymentWithCondition(deploymentutil.TimedOutReason, v1beta1.DeploymentProgressing); err != nil { + if err = tester.waitForDeploymentWithCondition(deploymentutil.TimedOutReason, apps.DeploymentProgressing); err != nil { t.Fatal(err) } @@ -722,7 +747,7 @@ func TestFailedDeployment(t *testing.T) { } // Wait for the deployment to have a progress complete condition. - if err = tester.waitForDeploymentWithCondition(deploymentutil.NewRSAvailableReason, v1beta1.DeploymentProgressing); err != nil { + if err = tester.waitForDeploymentWithCondition(deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing); err != nil { t.Fatal(err) } } @@ -750,9 +775,9 @@ func TestOverlappingDeployments(t *testing.T) { // Create 2 deployments with overlapping selectors var err error - var rss []*v1beta1.ReplicaSet + var rss []*apps.ReplicaSet for _, tester := range testers { - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) dname := tester.deployment.Name if err != nil { t.Fatalf("failed to create deployment %q: %v", dname, err) @@ -784,7 +809,7 @@ func TestOverlappingDeployments(t *testing.T) { // Scale only the first deployment by 1 newReplicas := replicas + 1 - testers[0].deployment, err = testers[0].updateDeployment(func(update *v1beta1.Deployment) { + testers[0].deployment, err = testers[0].updateDeployment(func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) if err != nil { @@ -798,7 +823,7 @@ func TestOverlappingDeployments(t *testing.T) { // Verify replicaset of both deployments has updated number of replicas for i, tester := range testers { - rs, err := c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{}) + rs, err := c.AppsV1().ReplicaSets(ns.Name).Get(rss[i].Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset %q: %v", rss[i].Name, err) } @@ -828,7 +853,7 @@ func TestScaledRolloutDeployment(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", name, err) } @@ -847,7 +872,7 @@ func TestScaledRolloutDeployment(t *testing.T) { // Update the deployment with another new image but do not mark the pods as ready to block new replicaset fakeImage2 := "fakeimage2" - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = fakeImage2 }) if err != nil { @@ -858,7 +883,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Verify the deployment has minimum available replicas after 2nd rollout - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get deployment %q: %v", name, err) } @@ -868,7 +893,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Wait for old replicaset of 1st rollout to have desired replicas - firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{}) + firstRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset %q: %v", firstRS.Name, err) } @@ -888,7 +913,7 @@ func TestScaledRolloutDeployment(t *testing.T) { // Scale up the deployment and update its image to another new image simultaneously (this time marks all pods as ready) newReplicas := int32(20) fakeImage3 := "fakeimage3" - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas update.Spec.Template.Spec.Containers[0].Image = fakeImage3 }) @@ -903,13 +928,13 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Verify every replicaset has correct desiredReplicas annotation after 3rd rollout - thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed getting new revision 3 replicaset for deployment %q: %v", name, err) } - rss := []*v1beta1.ReplicaSet{firstRS, secondRS, thirdRS} + rss := []*apps.ReplicaSet{firstRS, secondRS, thirdRS} for _, curRS := range rss { - curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) + curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err) } @@ -924,7 +949,7 @@ func TestScaledRolloutDeployment(t *testing.T) { // Update the deployment with another new image but do not mark the pods as ready to block new replicaset fakeImage4 := "fakeimage4" - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Template.Spec.Containers[0].Image = fakeImage4 }) if err != nil { @@ -935,7 +960,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Verify the deployment has minimum available replicas after 4th rollout - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get deployment %q: %v", name, err) } @@ -945,7 +970,7 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Wait for old replicaset of 3rd rollout to have desired replicas - thirdRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{}) + thirdRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset %q: %v", thirdRS.Name, err) } @@ -965,7 +990,7 @@ func TestScaledRolloutDeployment(t *testing.T) { // Scale down the deployment and update its image to another new image simultaneously (this time marks all pods as ready) newReplicas = int32(5) fakeImage5 := "fakeimage5" - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas update.Spec.Template.Spec.Containers[0].Image = fakeImage5 }) @@ -980,13 +1005,13 @@ func TestScaledRolloutDeployment(t *testing.T) { } // Verify every replicaset has correct desiredReplicas annotation after 5th rollout - fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed getting new revision 5 replicaset for deployment %q: %v", name, err) } - rss = []*v1beta1.ReplicaSet{thirdRS, fourthRS, fifthRS} + rss = []*apps.ReplicaSet{thirdRS, fourthRS, fifthRS} for _, curRS := range rss { - curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) + curRS, err = c.AppsV1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err) } @@ -1010,10 +1035,10 @@ func TestSpecReplicasChange(t *testing.T) { deploymentName := "deployment" replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} - tester.deployment.Spec.Strategy.Type = v1beta1.RecreateDeploymentStrategyType + tester.deployment.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType tester.deployment.Spec.Strategy.RollingUpdate = nil var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1039,7 +1064,7 @@ func TestSpecReplicasChange(t *testing.T) { // Add a template annotation change to test deployment's status does update // without .spec.replicas change var oldGeneration int64 - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { oldGeneration = update.Generation update.Spec.RevisionHistoryLimit = pointer.Int32Ptr(4) }) @@ -1068,8 +1093,10 @@ func TestDeploymentAvailableCondition(t *testing.T) { tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} // Assign a high value to the deployment's minReadySeconds tester.deployment.Spec.MinReadySeconds = 3600 + // progressDeadlineSeconds must be greater than minReadySeconds + tester.deployment.Spec.ProgressDeadlineSeconds = pointer.Int32Ptr(7200) var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1087,7 +1114,7 @@ func TestDeploymentAvailableCondition(t *testing.T) { } // Wait for the deployment to have MinimumReplicasUnavailable reason because the pods are not marked as ready - if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil { + if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, apps.DeploymentAvailable); err != nil { t.Fatal(err) } @@ -1107,7 +1134,7 @@ func TestDeploymentAvailableCondition(t *testing.T) { } // Wait for the deployment to still have MinimumReplicasUnavailable reason within minReadySeconds period - if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, v1beta1.DeploymentAvailable); err != nil { + if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasUnavailable, apps.DeploymentAvailable); err != nil { t.Fatal(err) } @@ -1117,7 +1144,7 @@ func TestDeploymentAvailableCondition(t *testing.T) { } // Update the deployment's minReadySeconds to a small value - tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + tester.deployment, err = tester.updateDeployment(func(update *apps.Deployment) { update.Spec.MinReadySeconds = 1 }) if err != nil { @@ -1130,7 +1157,7 @@ func TestDeploymentAvailableCondition(t *testing.T) { } // Wait for the deployment to have MinimumReplicasAvailable reason after minReadySeconds period - if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, v1beta1.DeploymentAvailable); err != nil { + if err = tester.waitForDeploymentWithCondition(deploymentutil.MinimumReplicasAvailable, apps.DeploymentAvailable); err != nil { t.Fatal(err) } @@ -1141,10 +1168,10 @@ func TestDeploymentAvailableCondition(t *testing.T) { } // Wait for deployment to automatically patch incorrect ControllerRef of RS -func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *v1beta1.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) { +func testRSControllerRefPatch(t *testing.T, tester *deploymentTester, rs *apps.ReplicaSet, ownerReference *metav1.OwnerReference, expectedOwnerReferenceNum int) { ns := rs.Namespace - rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns) - rs, err := tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) { + rsClient := tester.c.AppsV1().ReplicaSets(ns) + rs, err := tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) { update.OwnerReferences = []metav1.OwnerReference{*ownerReference} }) if err != nil { @@ -1186,7 +1213,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1209,7 +1236,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) { } // Get replicaset of the deployment - rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err) } @@ -1233,7 +1260,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) { func testScalingUsingScaleSubresource(t *testing.T, tester *deploymentTester, replicas int32) { ns := tester.deployment.Namespace deploymentName := tester.deployment.Name - deploymentClient := tester.c.ExtensionsV1beta1().Deployments(ns) + deploymentClient := tester.c.AppsV1().Deployments(ns) deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{}) if err != nil { t.Fatalf("Failed to obtain deployment %q: %v", deploymentName, err) @@ -1280,7 +1307,7 @@ func TestDeploymentScaleSubresource(t *testing.T) { replicas := int32(2) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1324,7 +1351,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { replicas := int32(1) tester := &deploymentTester{t: t, c: c, deployment: newDeployment(deploymentName, ns.Name, replicas)} var err error - tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + tester.deployment, err = c.AppsV1().Deployments(ns.Name).Create(tester.deployment) if err != nil { t.Fatalf("failed to create deployment %q: %v", deploymentName, err) } @@ -1349,7 +1376,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { // Orphaning: deployment should remove OwnerReference from a RS when the RS's labels change to not match its labels // Get replicaset of the deployment - rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1()) if err != nil { t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err) } @@ -1368,7 +1395,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { // Change the replicaset's labels to not match the deployment's labels labelMap := map[string]string{"new-name": "new-test"} - rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) { + rs, err = tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) { update.Labels = labelMap }) if err != nil { @@ -1376,7 +1403,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { } // Wait for the controllerRef of the replicaset to become nil - rsClient := tester.c.ExtensionsV1beta1().ReplicaSets(ns.Name) + rsClient := tester.c.AppsV1().ReplicaSets(ns.Name) if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { rs, err = rsClient.Get(rs.Name, metav1.GetOptions{}) if err != nil { @@ -1390,9 +1417,9 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { // Wait for the deployment to create a new replicaset // This will trigger collision avoidance due to deterministic nature of replicaset name // i.e., the new replicaset will have a name with different hash to preserve name uniqueness - var newRS *v1beta1.ReplicaSet + var newRS *apps.ReplicaSet if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1()) if err != nil { return false, fmt.Errorf("failed to get new replicaset of deployment %q after orphaning: %v", deploymentName, err) } @@ -1407,7 +1434,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { // Adoption: deployment should add controllerRef to a RS when the RS's labels change to match its labels // Change the old replicaset's labels to match the deployment's labels - rs, err = tester.updateReplicaSet(rs.Name, func(update *v1beta1.ReplicaSet) { + rs, err = tester.updateReplicaSet(rs.Name, func(update *apps.ReplicaSet) { update.Labels = testLabels() }) if err != nil { diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index 70240ea94a8..505e72ac82b 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -23,8 +23,9 @@ import ( "testing" "time" + apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/api/extensions/v1beta1" + extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" @@ -48,18 +49,18 @@ const ( fakeImage = "fakeimage" ) -var pauseFn = func(update *v1beta1.Deployment) { +var pauseFn = func(update *apps.Deployment) { update.Spec.Paused = true } -var resumeFn = func(update *v1beta1.Deployment) { +var resumeFn = func(update *apps.Deployment) { update.Spec.Paused = false } type deploymentTester struct { t *testing.T c clientset.Interface - deployment *v1beta1.Deployment + deployment *apps.Deployment } func testLabels() map[string]string { @@ -67,22 +68,22 @@ func testLabels() map[string]string { } // newDeployment returns a RollingUpdate Deployment with with a fake container image -func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment { - return &v1beta1.Deployment{ +func newDeployment(name, ns string, replicas int32) *apps.Deployment { + return &apps.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", - APIVersion: "extensions/v1beta1", + APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: name, }, - Spec: v1beta1.DeploymentSpec{ + Spec: apps.DeploymentSpec{ Replicas: &replicas, Selector: &metav1.LabelSelector{MatchLabels: testLabels()}, - Strategy: v1beta1.DeploymentStrategy{ - Type: v1beta1.RollingUpdateDeploymentStrategyType, - RollingUpdate: new(v1beta1.RollingUpdateDeployment), + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: new(apps.RollingUpdateDeployment), }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -101,8 +102,8 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment { } } -func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet { - return &v1beta1.ReplicaSet{ +func newReplicaSet(name, ns string, replicas int32) *apps.ReplicaSet { + return &apps.ReplicaSet{ TypeMeta: metav1.TypeMeta{ Kind: "ReplicaSet", APIVersion: "extensions/v1beta1", @@ -110,8 +111,9 @@ func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet { ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: name, + Labels: testLabels(), }, - Spec: v1beta1.ReplicaSetSpec{ + Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: testLabels(), }, @@ -133,11 +135,11 @@ func newReplicaSet(name, ns string, replicas int32) *v1beta1.ReplicaSet { } } -func newDeploymentRollback(name string, annotations map[string]string, revision int64) *v1beta1.DeploymentRollback { - return &v1beta1.DeploymentRollback{ +func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback { + return &extensions.DeploymentRollback{ Name: name, UpdatedAnnotations: annotations, - RollbackTo: v1beta1.RollbackConfig{Revision: revision}, + RollbackTo: extensions.RollbackConfig{Revision: revision}, } } @@ -156,8 +158,8 @@ func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.R metrics.UnregisterMetricAndUntrackRateLimiterUsage("deployment_controller") dc, err := deployment.NewDeploymentController( - informers.Extensions().V1beta1().Deployments(), - informers.Extensions().V1beta1().ReplicaSets(), + informers.Apps().V1().Deployments(), + informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")), ) @@ -256,7 +258,7 @@ func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) { } func (d *deploymentTester) deploymentComplete() (bool, error) { - latest, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -285,6 +287,8 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe // Manually mark updated Deployment pods as ready in a separate goroutine wg.Add(1) go d.markUpdatedPodsReady(&wg) + // Wait for goroutine to finish, for all return paths. + defer wg.Wait() // Wait for the Deployment status to complete while Deployment pods are becoming ready err := d.waitForDeploymentCompleteAndCheckRolling() @@ -292,9 +296,6 @@ func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsRe return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err) } - // Wait for goroutine to finish - wg.Wait() - return nil } @@ -319,7 +320,7 @@ func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error { return nil } -func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*v1beta1.Deployment, error) { +func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*apps.Deployment, error) { return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout) } @@ -330,12 +331,12 @@ func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) er return nil } -func (d *deploymentTester) getNewReplicaSet() (*v1beta1.ReplicaSet, error) { - deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) +func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) { + deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err) } - rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.ExtensionsV1beta1()) + rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.AppsV1()) if err != nil { return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err) } @@ -353,7 +354,7 @@ func (d *deploymentTester) expectNoNewReplicaSet() error { return nil } -func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) { +func (d *deploymentTester) expectNewReplicaSet() (*apps.ReplicaSet, error) { rs, err := d.getNewReplicaSet() if err != nil { return nil, err @@ -364,12 +365,12 @@ func (d *deploymentTester) expectNewReplicaSet() (*v1beta1.ReplicaSet, error) { return rs, nil } -func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateExtensionsReplicaSetFunc) (*v1beta1.ReplicaSet, error) { - return testutil.UpdateExtensionsReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout) +func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) { + return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout) } -func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateExtensionsReplicaSetFunc) (*v1beta1.ReplicaSet, error) { - return testutil.UpdateExtensionsReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout) +func (d *deploymentTester) updateReplicaSetStatus(name string, applyStatusUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) { + return testutil.UpdateReplicaSetStatusWithRetries(d.c, d.deployment.Namespace, name, applyStatusUpdate, d.t.Logf, pollInterval, pollTimeout) } // waitForDeploymentRollbackCleared waits for deployment either started rolling back or doesn't need to rollback. @@ -386,7 +387,7 @@ func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplica return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout) } -func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType v1beta1.DeploymentConditionType) error { +func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType apps.DeploymentConditionType) error { return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout) } @@ -417,13 +418,13 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) { return ownedPods, nil } -func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error { - return testutil.WaitExtensionsRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout) +func (d *deploymentTester) waitRSStable(replicaset *apps.ReplicaSet) error { + return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout) } func (d *deploymentTester) scaleDeployment(newReplicas int32) error { var err error - d.deployment, err = d.updateDeployment(func(update *v1beta1.Deployment) { + d.deployment, err = d.updateDeployment(func(update *apps.Deployment) { update.Spec.Replicas = &newReplicas }) if err != nil { @@ -447,7 +448,7 @@ func (d *deploymentTester) scaleDeployment(newReplicas int32) error { // waitForReadyReplicas waits for number of ready replicas to equal number of replicas. func (d *deploymentTester) waitForReadyReplicas() error { if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err) } @@ -485,7 +486,7 @@ func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error { // Verify all replicas fields of DeploymentStatus have desired count. // Immediately return an error when found a non-matching replicas field. func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error { - deployment, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err) } diff --git a/test/utils/replicaset.go b/test/utils/replicaset.go index bcd52a89264..838dd891f03 100644 --- a/test/utils/replicaset.go +++ b/test/utils/replicaset.go @@ -22,7 +22,6 @@ import ( "time" apps "k8s.io/api/apps/v1" - extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -53,33 +52,6 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, return rs, pollErr } -// TODO(#55714): Remove this after Deployment tests use apps/v1 ReplicaSet. -type UpdateExtensionsReplicaSetFunc func(d *extensions.ReplicaSet) - -// TODO(#55714): Remove this after Deployment tests use apps/v1 ReplicaSet. -func UpdateExtensionsReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateExtensionsReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) { - var rs *extensions.ReplicaSet - var updateErr error - pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - var err error - if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(rs) - if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Update(rs); err == nil { - logf("Updating replica set %q", name) - return true, nil - } - updateErr = err - return false, nil - }) - if pollErr == wait.ErrWaitTimeout { - pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr) - } - return rs, pollErr -} - // Verify .Status.Replicas is equal to .Spec.Replicas func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaSet, pollInterval, pollTimeout time.Duration) error { desiredGeneration := rs.Generation @@ -95,33 +67,17 @@ func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaS return nil } -// TODO(#55714): Remove after Deployment tests use apps/v1 ReplicaSet. -func WaitExtensionsRSStable(t *testing.T, clientSet clientset.Interface, rs *extensions.ReplicaSet, pollInterval, pollTimeout time.Duration) error { - desiredGeneration := rs.Generation - if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - newRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - return newRS.Status.ObservedGeneration >= desiredGeneration && newRS.Status.Replicas == *rs.Spec.Replicas, nil - }); err != nil { - return fmt.Errorf("failed to verify .Status.Replicas is equal to .Spec.Replicas for replicaset %q: %v", rs.Name, err) - } - return nil -} - -// TODO(#55714): Remove after Deployment tests use apps/v1 ReplicaSet. -func UpdateExtensionsReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateExtensionsReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*extensions.ReplicaSet, error) { - var rs *extensions.ReplicaSet +func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*apps.ReplicaSet, error) { + var rs *apps.ReplicaSet var updateErr error pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { var err error - if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{}); err != nil { return false, err } // Apply the update, then attempt to push it to the apiserver. applyUpdate(rs) - if rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).UpdateStatus(rs); err == nil { + if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(rs); err == nil { logf("Updating replica set %q", name) return true, nil } From 436db717518188cf4e58cbfb41ea1a202d016d73 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Fri, 11 May 2018 11:09:56 -0700 Subject: [PATCH 107/307] Set explicit labels/selector for apps/v1 Deployment/RS. --- pkg/controller/deployment/sync.go | 1 + test/e2e/apimachinery/aggregator.go | 6 +++++- test/e2e/apimachinery/webhook.go | 6 +++++- test/e2e/apps/replica_set.go | 3 ++- test/e2e/framework/deployment_util.go | 3 ++- 5 files changed, 15 insertions(+), 4 deletions(-) diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index a49938bba9b..9ddb6d453f6 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -195,6 +195,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash), Namespace: d.Namespace, OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, + Labels: newRSTemplate.Labels, }, Spec: apps.ReplicaSetSpec{ Replicas: new(int32), diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index bd3a674de0c..a1139655e41 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -173,10 +173,14 @@ func TestSampleAPIServer(f *framework.Framework, image string) { } d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, + Name: deploymentName, + Labels: podLabels, }, Spec: apps.DeploymentSpec{ Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, Strategy: apps.DeploymentStrategy{ Type: apps.RollingUpdateDeploymentStrategyType, }, diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 2e3582a1afe..f5647aa989e 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -265,10 +265,14 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert } d := &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, + Name: deploymentName, + Labels: podLabels, }, Spec: apps.DeploymentSpec{ Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, Strategy: apps.DeploymentStrategy{ Type: apps.RollingUpdateDeploymentStrategyType, }, diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index a8c23ea7401..354d2e94b74 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -40,7 +40,8 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa zero := int64(0) return &apps.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ - Name: rsName, + Name: rsName, + Labels: rsPodLabels, }, Spec: apps.ReplicaSetSpec{ Selector: &metav1.LabelSelector{ diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 89695d28d6c..62162432816 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -87,7 +87,8 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s zero := int64(0) return &apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: deploymentName, + Name: deploymentName, + Labels: podLabels, }, Spec: apps.DeploymentSpec{ Replicas: &replicas, From 046ae81e35cce52860d66d2cf9720fe1a2aaaef4 Mon Sep 17 00:00:00 2001 From: Anthony Yeh Date: Fri, 11 May 2018 12:17:24 -0700 Subject: [PATCH 108/307] e2e/auth: Expect apps/v1 Deployment calls in audit test. --- test/e2e/auth/audit.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index ceafb65762c..4dd1c9e826c 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -251,7 +251,7 @@ var _ = SIGDescribe("Advanced Audit", func() { { v1beta1.LevelRequestResponse, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments", namespace), "create", 201, auditTestUser, @@ -263,7 +263,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequest, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace), "get", 200, auditTestUser, @@ -275,7 +275,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequest, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments", namespace), "list", 200, auditTestUser, @@ -287,7 +287,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequest, v1beta1.StageResponseStarted, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout), "watch", 200, auditTestUser, @@ -299,7 +299,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequest, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout), "watch", 200, auditTestUser, @@ -311,7 +311,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequestResponse, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace), "update", 200, auditTestUser, @@ -323,7 +323,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequestResponse, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace), "patch", 200, auditTestUser, @@ -335,7 +335,7 @@ var _ = SIGDescribe("Advanced Audit", func() { }, { v1beta1.LevelRequestResponse, v1beta1.StageResponseComplete, - fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace), + fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace), "delete", 200, auditTestUser, From 10c48ae51060057230b5fda975a5d8dc3dec485a Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Wed, 18 Apr 2018 11:21:29 -0700 Subject: [PATCH 109/307] CRD versioning - types change --- .../pkg/apis/apiextensions/types.go | 32 +++++++++++++++++ .../pkg/apis/apiextensions/v1beta1/types.go | 35 ++++++++++++++++++- 2 files changed, 66 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index 0deb7cbd081..d74387ae770 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -25,6 +25,9 @@ type CustomResourceDefinitionSpec struct { // Group is the group this resource belongs in Group string // Version is the version this resource belongs in + // Should be always first item in Versions field if provided. + // Optional, but at least one of Version or Versions must be set. + // Deprecated: Please use `Versions`. Version string // Names are the names used to describe this custom resource Names CustomResourceDefinitionNames @@ -34,6 +37,27 @@ type CustomResourceDefinitionSpec struct { Validation *CustomResourceValidation // Subresources describes the subresources for CustomResources Subresources *CustomResourceSubresources + // Versions is the list of all supported versions for this resource. + // If Version field is provided, this field is optional. + // Validation: All versions must use the same validation schema for now. i.e., top + // level Validation field is applied to all of these versions. + // Order: The version name will be used to compute the order. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of + // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion +} + +type CustomResourceDefinitionVersion struct { + // Name is the version name, e.g. “v1”, “v2beta1”, etc. + Name string + // Served is a flag enabling/disabling this version from being served via REST APIs + Served bool + // Storage flags the version as storage version. There must be exactly one flagged + // as storage version. + Storage bool } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition @@ -115,6 +139,14 @@ type CustomResourceDefinitionStatus struct { // AcceptedNames are the names that are actually being used to serve discovery // They may be different than the names in spec. AcceptedNames CustomResourceDefinitionNames + + // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so the migration controller can first finish a migration to another version (i.e. + // that no old objects are left in the storage), and then remove the rest of the + // versions from this list. + // None of the versions in this list can be removed from the spec.Versions field. + StoredVersions []string } // CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index beeb3510531..97062fd2165 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -25,7 +25,11 @@ type CustomResourceDefinitionSpec struct { // Group is the group this resource belongs in Group string `json:"group" protobuf:"bytes,1,opt,name=group"` // Version is the version this resource belongs in - Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + // Should be always first item in Versions field if provided. + // Optional, but at least one of Version or Versions must be set. + // Deprecated: Please use `Versions`. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` // Names are the names used to describe this custom resource Names CustomResourceDefinitionNames `json:"names" protobuf:"bytes,3,opt,name=names"` // Scope indicates whether this resource is cluster or namespace scoped. Default is namespaced @@ -36,6 +40,27 @@ type CustomResourceDefinitionSpec struct { // Subresources describes the subresources for CustomResources // +optional Subresources *CustomResourceSubresources `json:"subresources,omitempty" protobuf:"bytes,6,opt,name=subresources"` + // Versions is the list of all supported versions for this resource. + // If Version field is provided, this field is optional. + // Validation: All versions must use the same validation schema for now. i.e., top + // level Validation field is applied to all of these versions. + // Order: The version name will be used to compute the order. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of + // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` +} + +type CustomResourceDefinitionVersion struct { + // Name is the version name, e.g. “v1”, “v2beta1”, etc. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Served is a flag enabling/disabling this version from being served via REST APIs + Served bool `json:"served" protobuf:"varint,2,opt,name=served"` + // Storage flags the version as storage version. There must be exactly one + // flagged as storage version. + Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` } // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition @@ -117,6 +142,14 @@ type CustomResourceDefinitionStatus struct { // AcceptedNames are the names that are actually being used to serve discovery // They may be different than the names in spec. AcceptedNames CustomResourceDefinitionNames `json:"acceptedNames" protobuf:"bytes,2,opt,name=acceptedNames"` + + // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so the migration controller can first finish a migration to another version (i.e. + // that no old objects are left in the storage), and then remove the rest of the + // versions from this list. + // None of the versions in this list can be removed from the spec.Versions field. + StoredVersions []string `json:"storedVersions" protobuf:"bytes,3,rep,name=storedVersions"` } // CustomResourceCleanupFinalizer is the name of the finalizer which will delete instances of From 8a39e5381cdf0299ac654d3a35323694c9cd66e0 Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Mon, 7 May 2018 12:54:26 -0700 Subject: [PATCH 110/307] CRD versioning validation and defaulting --- .../pkg/apis/apiextensions/helpers.go | 31 ++ .../apis/apiextensions/v1beta1/defaults.go | 20 ++ .../apiextensions/validation/validation.go | 67 +++- .../validation/validation_test.go | 335 ++++++++++++++++-- 4 files changed, 424 insertions(+), 29 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go index 8dc7f72d660..92cad7d9b73 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go @@ -17,6 +17,7 @@ limitations under the License. package apiextensions import ( + "fmt" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -116,3 +117,33 @@ func CRDRemoveFinalizer(crd *CustomResourceDefinition, needle string) { } crd.Finalizers = newFinalizers } + +// HasServedCRDVersion returns true if `version` is in the list of CRD's versions and the Served flag is set. +func HasServedCRDVersion(crd *CustomResourceDefinition, version string) bool { + for _, v := range crd.Spec.Versions { + if v.Name == version { + return v.Served + } + } + return false +} + +// GetCRDStorageVersion returns the storage version for given CRD. +func GetCRDStorageVersion(crd *CustomResourceDefinition) (string, error) { + for _, v := range crd.Spec.Versions { + if v.Storage { + return v.Name, nil + } + } + // This should not happened if crd is valid + return "", fmt.Errorf("invalid CustomResourceDefinition, no storage version") +} + +func IsStoredVersion(crd *CustomResourceDefinition, version string) bool { + for _, v := range crd.Status.StoredVersions { + if version == v { + return true + } + } + return false +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index edffaed55f0..1984e229778 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -31,6 +31,14 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { func SetDefaults_CustomResourceDefinition(obj *CustomResourceDefinition) { SetDefaults_CustomResourceDefinitionSpec(&obj.Spec) + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + break + } + } + } } func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) { @@ -43,4 +51,16 @@ func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { obj.Names.ListKind = obj.Names.Kind + "List" } + // If there is no list of versions, create on using deprecated Version field. + if len(obj.Versions) == 0 && len(obj.Version) != 0 { + obj.Versions = []CustomResourceDefinitionVersion{{ + Name: obj.Version, + Storage: true, + Served: true, + }} + } + // For backward compatibility set the version field to the first item in versions list. + if len(obj.Version) == 0 && len(obj.Versions) != 0 { + obj.Version = obj.Versions[0].Name + } } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index 390f75f4426..eb9acc79e8e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -45,6 +45,7 @@ func ValidateCustomResourceDefinition(obj *apiextensions.CustomResourceDefinitio allErrs := genericvalidation.ValidateObjectMeta(&obj.ObjectMeta, false, nameValidationFn, field.NewPath("metadata")) allErrs = append(allErrs, ValidateCustomResourceDefinitionSpec(&obj.Spec, field.NewPath("spec"))...) allErrs = append(allErrs, ValidateCustomResourceDefinitionStatus(&obj.Status, field.NewPath("status"))...) + allErrs = append(allErrs, ValidateCustomResourceDefinitionStoredVersions(obj.Status.StoredVersions, obj.Spec.Versions, field.NewPath("status").Child("storedVersions"))...) return allErrs } @@ -53,6 +54,34 @@ func ValidateCustomResourceDefinitionUpdate(obj, oldObj *apiextensions.CustomRes allErrs := genericvalidation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateCustomResourceDefinitionSpecUpdate(&obj.Spec, &oldObj.Spec, apiextensions.IsCRDConditionTrue(oldObj, apiextensions.Established), field.NewPath("spec"))...) allErrs = append(allErrs, ValidateCustomResourceDefinitionStatus(&obj.Status, field.NewPath("status"))...) + allErrs = append(allErrs, ValidateCustomResourceDefinitionStoredVersions(obj.Status.StoredVersions, obj.Spec.Versions, field.NewPath("status").Child("storedVersions"))...) + return allErrs +} + +// ValidateCustomResourceDefinitionStoredVersions statically validates +func ValidateCustomResourceDefinitionStoredVersions(storedVersions []string, versions []apiextensions.CustomResourceDefinitionVersion, fldPath *field.Path) field.ErrorList { + if len(storedVersions) == 0 { + return field.ErrorList{field.Invalid(fldPath, storedVersions, "must have at least one stored version")} + } + allErrs := field.ErrorList{} + storedVersionsMap := map[string]int{} + for i, v := range storedVersions { + storedVersionsMap[v] = i + } + for _, v := range versions { + _, ok := storedVersionsMap[v.Name] + if v.Storage && !ok { + allErrs = append(allErrs, field.Invalid(fldPath, v, "must have the storage version "+v.Name)) + } + if ok { + delete(storedVersionsMap, v.Name) + } + } + + for v, i := range storedVersionsMap { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), v, "must appear in spec.versions")) + } + return allErrs } @@ -75,12 +104,6 @@ func ValidateCustomResourceDefinitionSpec(spec *apiextensions.CustomResourceDefi allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), spec.Group, "should be a domain with at least one dot")) } - if len(spec.Version) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("version"), "")) - } else if errs := validationutil.IsDNS1035Label(spec.Version); len(errs) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, strings.Join(errs, ","))) - } - switch spec.Scope { case "": allErrs = append(allErrs, field.Required(fldPath.Child("scope"), "")) @@ -89,6 +112,37 @@ func ValidateCustomResourceDefinitionSpec(spec *apiextensions.CustomResourceDefi allErrs = append(allErrs, field.NotSupported(fldPath.Child("scope"), spec.Scope, []string{string(apiextensions.ClusterScoped), string(apiextensions.NamespaceScoped)})) } + storageFlagCount := 0 + versionsMap := map[string]bool{} + uniqueNames := true + for i, version := range spec.Versions { + if version.Storage { + storageFlagCount++ + } + if versionsMap[version.Name] { + uniqueNames = false + } else { + versionsMap[version.Name] = true + } + if errs := validationutil.IsDNS1035Label(version.Name); len(errs) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("versions").Index(i).Child("name"), spec.Versions[i].Name, strings.Join(errs, ","))) + } + } + if !uniqueNames { + allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "must contain unique version names")) + } + if storageFlagCount != 1 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "must have exactly one version marked as storage version")) + } + if len(spec.Version) != 0 { + if errs := validationutil.IsDNS1035Label(spec.Version); len(errs) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, strings.Join(errs, ","))) + } + if len(spec.Versions) >= 1 && spec.Versions[0].Name != spec.Version { + allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, "must match the first version in spec.versions")) + } + } + // in addition to the basic name restrictions, some names are required for spec, but not for status if len(spec.Names.Plural) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("names", "plural"), "")) @@ -130,7 +184,6 @@ func ValidateCustomResourceDefinitionSpecUpdate(spec, oldSpec *apiextensions.Cus if established { // these effect the storage and cannot be changed therefore - allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Version, oldSpec.Version, fldPath.Child("version"))...) allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Scope, oldSpec.Scope, fldPath.Child("scope"))...) allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Names.Kind, oldSpec.Names.Kind, fldPath.Child("names", "kind"))...) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go index 8f71dd9b31d..6bc545c0fd8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation_test.go @@ -19,10 +19,9 @@ package validation import ( "testing" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" ) type validationMatch struct { @@ -51,11 +50,150 @@ func (v validationMatch) matches(err *field.Error) bool { } func TestValidateCustomResourceDefinition(t *testing.T) { + singleVersionList := []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + } tests := []struct { name string resource *apiextensions.CustomResourceDefinition errors []validationMatch }{ + { + name: "no_storage_version", + resource: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Scope: apiextensions.ResourceScope("Cluster"), + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "Plural", + ListKind: "PluralList", + }, + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: false, + }, + { + Name: "version2", + Served: true, + Storage: false, + }, + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{"version"}, + }, + }, + errors: []validationMatch{ + invalid("spec", "versions"), + }, + }, + { + name: "multiple_storage_version", + resource: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Scope: apiextensions.ResourceScope("Cluster"), + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "Plural", + ListKind: "PluralList", + }, + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + { + Name: "version2", + Served: true, + Storage: true, + }, + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{"version"}, + }, + }, + errors: []validationMatch{ + invalid("spec", "versions"), + invalid("status", "storedVersions"), + }, + }, + { + name: "missing_storage_version_in_stored_versions", + resource: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Scope: apiextensions.ResourceScope("Cluster"), + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "Plural", + ListKind: "PluralList", + }, + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: false, + }, + { + Name: "version2", + Served: true, + Storage: true, + }, + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{"version"}, + }, + }, + errors: []validationMatch{ + invalid("status", "storedVersions"), + }, + }, + { + name: "empty_stored_version", + resource: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Scope: apiextensions.ResourceScope("Cluster"), + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "Plural", + ListKind: "PluralList", + }, + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{}, + }, + }, + errors: []validationMatch{ + invalid("status", "storedVersions"), + }, + }, { name: "mismatched name", resource: &apiextensions.CustomResourceDefinition{ @@ -68,8 +206,9 @@ func TestValidateCustomResourceDefinition(t *testing.T) { }, }, errors: []validationMatch{ + invalid("status", "storedVersions"), invalid("metadata", "name"), - required("spec", "version"), + invalid("spec", "versions"), required("spec", "scope"), required("spec", "names", "singular"), required("spec", "names", "kind"), @@ -82,9 +221,10 @@ func TestValidateCustomResourceDefinition(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, }, errors: []validationMatch{ + invalid("status", "storedVersions"), invalid("metadata", "name"), + invalid("spec", "versions"), required("spec", "group"), - required("spec", "version"), required("spec", "scope"), required("spec", "names", "plural"), required("spec", "names", "singular"), @@ -117,9 +257,9 @@ func TestValidateCustomResourceDefinition(t *testing.T) { }, }, errors: []validationMatch{ + invalid("status", "storedVersions"), invalid("metadata", "name"), invalid("spec", "group"), - invalid("spec", "version"), unsupported("spec", "scope"), invalid("spec", "names", "plural"), invalid("spec", "names", "singular"), @@ -131,6 +271,8 @@ func TestValidateCustomResourceDefinition(t *testing.T) { invalid("status", "acceptedNames", "kind"), invalid("status", "acceptedNames", "listKind"), // invalid format invalid("status", "acceptedNames", "listKind"), // kind == listKind + invalid("spec", "versions"), + invalid("spec", "version"), }, }, { @@ -138,8 +280,9 @@ func TestValidateCustomResourceDefinition(t *testing.T) { resource: &apiextensions.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "plural.group"}, Spec: apiextensions.CustomResourceDefinitionSpec{ - Group: "group.c(*&om", - Version: "version", + Group: "group.c(*&om", + Version: "version", + Versions: singleVersionList, Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -154,6 +297,7 @@ func TestValidateCustomResourceDefinition(t *testing.T) { Kind: "matching", ListKind: "matching", }, + StoredVersions: []string{"version"}, }, }, errors: []validationMatch{ @@ -169,9 +313,10 @@ func TestValidateCustomResourceDefinition(t *testing.T) { resource: &apiextensions.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, Spec: apiextensions.CustomResourceDefinitionSpec{ - Group: "group.com", - Version: "version", - Scope: apiextensions.NamespaceScoped, + Group: "group.com", + Version: "version", + Versions: singleVersionList, + Scope: apiextensions.NamespaceScoped, Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -187,6 +332,9 @@ func TestValidateCustomResourceDefinition(t *testing.T) { }, }, }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{"version"}, + }, }, errors: []validationMatch{ forbidden("spec", "validation", "openAPIV3Schema", "additionalProperties"), @@ -197,9 +345,10 @@ func TestValidateCustomResourceDefinition(t *testing.T) { resource: &apiextensions.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "plural.group.com"}, Spec: apiextensions.CustomResourceDefinitionSpec{ - Group: "group.com", - Version: "version", - Scope: apiextensions.NamespaceScoped, + Group: "group.com", + Version: "version", + Versions: singleVersionList, + Scope: apiextensions.NamespaceScoped, Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -217,6 +366,9 @@ func TestValidateCustomResourceDefinition(t *testing.T) { }, }, }, + Status: apiextensions.CustomResourceDefinitionStatus{ + StoredVersions: []string{"version"}, + }, }, errors: []validationMatch{}, }, @@ -266,7 +418,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "group.com", Version: "version", - Scope: apiextensions.ResourceScope("Cluster"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -291,7 +450,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "group.com", Version: "version", - Scope: apiextensions.ResourceScope("Cluster"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -306,6 +472,7 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Kind: "kind", ListKind: "listkind", }, + StoredVersions: []string{"version"}, }, }, errors: []validationMatch{}, @@ -320,7 +487,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "group.com", Version: "version", - Scope: apiextensions.ResourceScope("Cluster"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -348,7 +522,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "group.com", Version: "version", - Scope: apiextensions.ResourceScope("Cluster"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -363,10 +544,91 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Kind: "kind", ListKind: "listkind", }, + StoredVersions: []string{"version"}, }, }, errors: []validationMatch{}, }, + { + name: "version-deleted", + old: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "plural.group.com", + ResourceVersion: "42", + }, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Version: "version", + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + { + Name: "version2", + Served: true, + Storage: false, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "kind", + ListKind: "listkind", + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + AcceptedNames: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "kind", + ListKind: "listkind", + }, + StoredVersions: []string{"version", "version2"}, + Conditions: []apiextensions.CustomResourceDefinitionCondition{ + {Type: apiextensions.Established, Status: apiextensions.ConditionTrue}, + }, + }, + }, + resource: &apiextensions.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "plural.group.com", + ResourceVersion: "42", + }, + Spec: apiextensions.CustomResourceDefinitionSpec{ + Group: "group.com", + Version: "version", + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), + Names: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "kind", + ListKind: "listkind", + }, + }, + Status: apiextensions.CustomResourceDefinitionStatus{ + AcceptedNames: apiextensions.CustomResourceDefinitionNames{ + Plural: "plural", + Singular: "singular", + Kind: "kind", + ListKind: "listkind", + }, + StoredVersions: []string{"version", "version2"}, + }, + }, + errors: []validationMatch{ + invalid("status", "storedVersions[1]"), + }, + }, { name: "changes", old: &apiextensions.CustomResourceDefinition{ @@ -377,7 +639,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "group.com", Version: "version", - Scope: apiextensions.ResourceScope("Cluster"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -405,7 +674,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "abc.com", Version: "version2", - Scope: apiextensions.ResourceScope("Namespaced"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version2", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Namespaced"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural2", Singular: "singular2", @@ -420,6 +696,7 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Kind: "kind2", ListKind: "listkind2", }, + StoredVersions: []string{"version2"}, }, }, errors: []validationMatch{ @@ -437,7 +714,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "group.com", Version: "version", - Scope: apiextensions.ResourceScope("Cluster"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Cluster"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural", Singular: "singular", @@ -465,7 +749,14 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Spec: apiextensions.CustomResourceDefinitionSpec{ Group: "abc.com", Version: "version2", - Scope: apiextensions.ResourceScope("Namespaced"), + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "version2", + Served: true, + Storage: true, + }, + }, + Scope: apiextensions.ResourceScope("Namespaced"), Names: apiextensions.CustomResourceDefinitionNames{ Plural: "plural2", Singular: "singular2", @@ -480,11 +771,11 @@ func TestValidateCustomResourceDefinitionUpdate(t *testing.T) { Kind: "kind2", ListKind: "listkind2", }, + StoredVersions: []string{"version2"}, }, }, errors: []validationMatch{ immutable("spec", "group"), - immutable("spec", "version"), immutable("spec", "scope"), immutable("spec", "names", "kind"), immutable("spec", "names", "plural"), From 531041ce943b99e4acf61102be0b83ca45053258 Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Mon, 7 May 2018 12:55:35 -0700 Subject: [PATCH 111/307] Do not bypass same version unstructed conversion if it is a list This give the converter a chance to convert list items to the requested version. --- .../serializer/versioning/versioning.go | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 48df6b5dd18..7716cc42178 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -19,6 +19,7 @@ package versioning import ( "io" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -170,17 +171,22 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error { case *runtime.Unknown: return c.encoder.Encode(obj, w) case runtime.Unstructured: - // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl) - objGVK := obj.GetObjectKind().GroupVersionKind() - if len(objGVK.Version) == 0 { - return c.encoder.Encode(obj, w) - } - targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK}) - if !ok { - return runtime.NewNotRegisteredGVKErrForTarget(objGVK, c.encodeVersion) - } - if targetGVK == objGVK { - return c.encoder.Encode(obj, w) + // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just + // because the top-level type matches our desired destination type. actually send the object to the converter + // to give it a chance to convert the list items if needed. + if _, ok := obj.(*unstructured.UnstructuredList); !ok { + // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl) + objGVK := obj.GetObjectKind().GroupVersionKind() + if len(objGVK.Version) == 0 { + return c.encoder.Encode(obj, w) + } + targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK}) + if !ok { + return runtime.NewNotRegisteredGVKErrForTarget(objGVK, c.encodeVersion) + } + if targetGVK == objGVK { + return c.encoder.Encode(obj, w) + } } } From 0f6d98a056650d4a3aeb3e69476fb53b5542bff3 Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Mon, 14 May 2018 14:23:46 -0700 Subject: [PATCH 112/307] CRD versioning with no-op converter --- .../crdregistration_controller.go | 56 +-- .../crdregistration_controller_test.go | 23 +- .../pkg/apis/apiextensions/fuzzer/fuzzer.go | 22 ++ .../pkg/apiserver/conversion/converter.go | 67 ++++ .../pkg/apiserver/conversion/nop_converter.go | 100 +++++ .../customresource_discovery_controller.go | 58 ++- .../pkg/apiserver/customresource_handler.go | 342 ++++++++++-------- .../apiserver/customresource_handler_test.go | 13 +- .../customresourcedefinition/strategy.go | 18 + .../test/integration/registration_test.go | 36 ++ .../test/integration/testserver/resources.go | 56 +++ .../test/integration/versioning_test.go | 304 ++++++++++++++++ 12 files changed, 897 insertions(+), 198 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go diff --git a/pkg/master/controller/crdregistration/crdregistration_controller.go b/pkg/master/controller/crdregistration/crdregistration_controller.go index c3a87b418a4..49e3b1edc20 100644 --- a/pkg/master/controller/crdregistration/crdregistration_controller.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller.go @@ -77,9 +77,11 @@ func NewAutoRegistrationController(crdinformer crdinformers.CustomResourceDefini cast := obj.(*apiextensions.CustomResourceDefinition) c.enqueueCRD(cast) }, - UpdateFunc: func(_, obj interface{}) { - cast := obj.(*apiextensions.CustomResourceDefinition) - c.enqueueCRD(cast) + UpdateFunc: func(oldObj, newObj interface{}) { + // Enqueue both old and new object to make sure we remove and add appropriate API services. + // The working queue will resolve any duplicates and only changes will stay in the queue. + c.enqueueCRD(oldObj.(*apiextensions.CustomResourceDefinition)) + c.enqueueCRD(newObj.(*apiextensions.CustomResourceDefinition)) }, DeleteFunc: func(obj interface{}) { cast, ok := obj.(*apiextensions.CustomResourceDefinition) @@ -120,8 +122,10 @@ func (c *crdRegistrationController) Run(threadiness int, stopCh <-chan struct{}) utilruntime.HandleError(err) } else { for _, crd := range crds { - if err := c.syncHandler(schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}); err != nil { - utilruntime.HandleError(err) + for _, version := range crd.Spec.Versions { + if err := c.syncHandler(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}); err != nil { + utilruntime.HandleError(err) + } } } } @@ -182,11 +186,12 @@ func (c *crdRegistrationController) processNextWorkItem() bool { } func (c *crdRegistrationController) enqueueCRD(crd *apiextensions.CustomResourceDefinition) { - c.queue.Add(schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}) + for _, version := range crd.Spec.Versions { + c.queue.Add(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}) + } } func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error { - found := false apiServiceName := groupVersion.Version + "." + groupVersion.Group // check all CRDs. There shouldn't that many, but if we have problems later we can index them @@ -195,26 +200,27 @@ func (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.Grou return err } for _, crd := range crds { - if crd.Spec.Version == groupVersion.Version && crd.Spec.Group == groupVersion.Group { - found = true - break + if crd.Spec.Group != groupVersion.Group { + continue + } + for _, version := range crd.Spec.Versions { + if version.Name != groupVersion.Version || !version.Served { + continue + } + + c.apiServiceRegistration.AddAPIServiceToSync(&apiregistration.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: apiServiceName}, + Spec: apiregistration.APIServiceSpec{ + Group: groupVersion.Group, + Version: groupVersion.Version, + GroupPriorityMinimum: 1000, // CRDs should have relatively low priority + VersionPriority: 100, // CRDs will be sorted by kube-like versions like any other APIService with the same VersionPriority + }, + }) + return nil } } - if !found { - c.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName) - return nil - } - - c.apiServiceRegistration.AddAPIServiceToSync(&apiregistration.APIService{ - ObjectMeta: metav1.ObjectMeta{Name: apiServiceName}, - Spec: apiregistration.APIServiceSpec{ - Group: groupVersion.Group, - Version: groupVersion.Version, - GroupPriorityMinimum: 1000, // CRDs should have relatively low priority - VersionPriority: 100, // CRDs should have relatively low priority - }, - }) - + c.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName) return nil } diff --git a/pkg/master/controller/crdregistration/crdregistration_controller_test.go b/pkg/master/controller/crdregistration/crdregistration_controller_test.go index 1e2d8df5879..0a06c147e9c 100644 --- a/pkg/master/controller/crdregistration/crdregistration_controller_test.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller_test.go @@ -42,8 +42,16 @@ func TestHandleVersionUpdate(t *testing.T) { startingCRDs: []*apiextensions.CustomResourceDefinition{ { Spec: apiextensions.CustomResourceDefinitionSpec{ - Group: "group.com", - Version: "v1", + Group: "group.com", + // Version field is deprecated and crd registration won't rely on it at all. + // defaulting route will fill up Versions field if user only provided version field. + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + }, + }, }, }, }, @@ -66,8 +74,14 @@ func TestHandleVersionUpdate(t *testing.T) { startingCRDs: []*apiextensions.CustomResourceDefinition{ { Spec: apiextensions.CustomResourceDefinitionSpec{ - Group: "group.com", - Version: "v1", + Group: "group.com", + Versions: []apiextensions.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + }, + }, }, }, }, @@ -98,7 +112,6 @@ func TestHandleVersionUpdate(t *testing.T) { t.Errorf("%s expected %v, got %v", test.name, test.expectedRemoved, registration.removed) } } - } type fakeAPIServiceRegistration struct { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go index f6b246cf93a..0fe919ab64e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go @@ -42,6 +42,28 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { obj.Names.ListKind = obj.Names.Kind + "List" } + if len(obj.Versions) == 0 && len(obj.Version) != 0 { + obj.Versions = []apiextensions.CustomResourceDefinitionVersion{ + { + Name: obj.Version, + Served: true, + Storage: true, + }, + } + } else if len(obj.Versions) != 0 { + obj.Version = obj.Versions[0].Name + } + }, + func(obj *apiextensions.CustomResourceDefinition, c fuzz.Continue) { + c.FuzzNoCustom(obj) + + if len(obj.Status.StoredVersions) == 0 { + for _, v := range obj.Spec.Versions { + if v.Storage && !apiextensions.IsStoredVersion(obj, v.Name) { + obj.Status.StoredVersions = append(obj.Status.StoredVersions, v.Name) + } + } + } }, func(obj *apiextensions.JSONSchemaProps, c fuzz.Continue) { // we cannot use c.FuzzNoCustom because of the interface{} fields. So let's loop with reflection. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go new file mode 100644 index 00000000000..ae0776fae59 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go @@ -0,0 +1,67 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NewCRDConverter returns a new CRD converter based on the conversion settings in crd object. +func NewCRDConverter(crd *apiextensions.CustomResourceDefinition) (safe, unsafe runtime.ObjectConvertor) { + validVersions := map[schema.GroupVersion]bool{} + for _, version := range crd.Spec.Versions { + validVersions[schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}] = true + } + + // The only converter right now is nopConverter. More converters will be returned based on the + // CRD object when they introduced. + unsafe = &nopConverter{ + clusterScoped: crd.Spec.Scope == apiextensions.ClusterScoped, + validVersions: validVersions, + } + return &safeConverterWrapper{unsafe}, unsafe +} + +// safeConverterWrapper is a wrapper over an unsafe object converter that makes copy of the input and then delegate to the unsafe converter. +type safeConverterWrapper struct { + unsafe runtime.ObjectConvertor +} + +var _ runtime.ObjectConvertor = &nopConverter{} + +// ConvertFieldLabel delegate the call to the unsafe converter. +func (c *safeConverterWrapper) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return c.unsafe.ConvertFieldLabel(version, kind, label, value) +} + +// Convert makes a copy of in object and then delegate the call to the unsafe converter. +func (c *safeConverterWrapper) Convert(in, out, context interface{}) error { + inObject, ok := in.(runtime.Object) + if !ok { + return fmt.Errorf("input type %T in not valid for object conversion", in) + } + return c.unsafe.Convert(inObject.DeepCopyObject(), out, context) +} + +// ConvertToVersion makes a copy of in object and then delegate the call to the unsafe converter. +func (c *safeConverterWrapper) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + return c.unsafe.ConvertToVersion(in.DeepCopyObject(), target) +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go new file mode 100644 index 00000000000..3a98f5c6c0f --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// nopConverter is a converter that only sets the apiVersion fields, but does not real conversion. It supports fields selectors. +type nopConverter struct { + clusterScoped bool + validVersions map[schema.GroupVersion]bool +} + +var _ runtime.ObjectConvertor = &nopConverter{} + +func (c *nopConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + // We currently only support metadata.namespace and metadata.name. + switch { + case label == "metadata.name": + return label, value, nil + case !c.clusterScoped && label == "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} + +func (c *nopConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + outGVK := unstructOut.GroupVersionKind() + if !c.validVersions[outGVK.GroupVersion()] { + return fmt.Errorf("request to convert CRD from an invalid group/version: %s", outGVK.String()) + } + inGVK := unstructIn.GroupVersionKind() + if !c.validVersions[inGVK.GroupVersion()] { + return fmt.Errorf("request to convert CRD to an invalid group/version: %s", inGVK.String()) + } + + unstructOut.SetUnstructuredContent(unstructIn.UnstructuredContent()) + _, err := c.ConvertToVersion(unstructOut, outGVK.GroupVersion()) + if err != nil { + return err + } + return nil +} + +func (c *nopConverter) convertToVersion(in runtime.Object, target runtime.GroupVersioner) error { + kind := in.GetObjectKind().GroupVersionKind() + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + if !c.validVersions[gvk.GroupVersion()] { + return fmt.Errorf("request to convert CRD to an invalid group/version: %s", gvk.String()) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + return nil +} + +// ConvertToVersion converts in object to the given gvk in place and returns the same `in` object. +func (c *nopConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + var err error + // Run the converter on the list items instead of list itself + if list, ok := in.(*unstructured.UnstructuredList); ok { + err = list.EachListItem(func(item runtime.Object) error { + return c.convertToVersion(item, target) + }) + } + err = c.convertToVersion(in, target) + return in, err +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go index 976d54754a1..e3b3d0a4413 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go @@ -18,6 +18,7 @@ package apiserver import ( "fmt" + "sort" "time" "github.com/golang/glog" @@ -28,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/version" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -75,6 +77,7 @@ func (c *DiscoveryController) sync(version schema.GroupVersion) error { apiVersionsForDiscovery := []metav1.GroupVersionForDiscovery{} apiResourcesForDiscovery := []metav1.APIResource{} + versionsForDiscoveryMap := map[metav1.GroupVersion]bool{} crds, err := c.crdLister.List(labels.Everything()) if err != nil { @@ -90,13 +93,29 @@ func (c *DiscoveryController) sync(version schema.GroupVersion) error { if crd.Spec.Group != version.Group { continue } - foundGroup = true - apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{ - GroupVersion: crd.Spec.Group + "/" + crd.Spec.Version, - Version: crd.Spec.Version, - }) - if crd.Spec.Version != version.Version { + foundThisVersion := false + for _, v := range crd.Spec.Versions { + if !v.Served { + continue + } + // If there is any Served version, that means the group should show up in discovery + foundGroup = true + + gv := metav1.GroupVersion{Group: crd.Spec.Group, Version: v.Name} + if !versionsForDiscoveryMap[gv] { + versionsForDiscoveryMap[gv] = true + apiVersionsForDiscovery = append(apiVersionsForDiscovery, metav1.GroupVersionForDiscovery{ + GroupVersion: crd.Spec.Group + "/" + v.Name, + Version: v.Name, + }) + } + if v.Name == version.Version { + foundThisVersion = true + } + } + + if !foundThisVersion { continue } foundVersion = true @@ -144,10 +163,13 @@ func (c *DiscoveryController) sync(version schema.GroupVersion) error { return nil } + sortGroupDiscoveryByKubeAwareVersion(apiVersionsForDiscovery) + apiGroup := metav1.APIGroup{ Name: version.Group, Versions: apiVersionsForDiscovery, - // the preferred versions for a group is arbitrary since there cannot be duplicate resources + // the preferred versions for a group is the first item in + // apiVersionsForDiscovery after it put in the right ordered PreferredVersion: apiVersionsForDiscovery[0], } c.groupHandler.setDiscovery(version.Group, discovery.NewAPIGroupHandler(Codecs, apiGroup)) @@ -163,6 +185,12 @@ func (c *DiscoveryController) sync(version schema.GroupVersion) error { return nil } +func sortGroupDiscoveryByKubeAwareVersion(gd []metav1.GroupVersionForDiscovery) { + sort.Slice(gd, func(i, j int) bool { + return version.CompareKubeAwareVersionStrings(gd[i].Version, gd[j].Version) > 0 + }) +} + func (c *DiscoveryController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() @@ -207,7 +235,9 @@ func (c *DiscoveryController) processNextWorkItem() bool { } func (c *DiscoveryController) enqueue(obj *apiextensions.CustomResourceDefinition) { - c.queue.Add(schema.GroupVersion{Group: obj.Spec.Group, Version: obj.Spec.Version}) + for _, v := range obj.Spec.Versions { + c.queue.Add(schema.GroupVersion{Group: obj.Spec.Group, Version: v.Name}) + } } func (c *DiscoveryController) addCustomResourceDefinition(obj interface{}) { @@ -216,10 +246,14 @@ func (c *DiscoveryController) addCustomResourceDefinition(obj interface{}) { c.enqueue(castObj) } -func (c *DiscoveryController) updateCustomResourceDefinition(obj, _ interface{}) { - castObj := obj.(*apiextensions.CustomResourceDefinition) - glog.V(4).Infof("Updating customresourcedefinition %s", castObj.Name) - c.enqueue(castObj) +func (c *DiscoveryController) updateCustomResourceDefinition(oldObj, newObj interface{}) { + castNewObj := newObj.(*apiextensions.CustomResourceDefinition) + castOldObj := oldObj.(*apiextensions.CustomResourceDefinition) + glog.V(4).Infof("Updating customresourcedefinition %s", castOldObj.Name) + // Enqueue both old and new object to make sure we remove and add appropriate Versions. + // The working queue will resolve any duplicates and only changes will stay in the queue. + c.enqueue(castNewObj) + c.enqueue(castOldObj) } func (c *DiscoveryController) deleteCustomResourceDefinition(obj interface{}) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index cc24f1974f4..83706c4b2a6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -58,6 +58,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "k8s.io/apiextensions-apiserver/pkg/apiserver/conversion" apiservervalidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" informers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion" listers "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion" @@ -94,11 +95,20 @@ type crdInfo struct { spec *apiextensions.CustomResourceDefinitionSpec acceptedNames *apiextensions.CustomResourceDefinitionNames - storage customresource.CustomResourceStorage + // Storage per version + storages map[string]customresource.CustomResourceStorage - requestScope handlers.RequestScope - scaleRequestScope handlers.RequestScope - statusRequestScope handlers.RequestScope + // Request scope per version + requestScopes map[string]handlers.RequestScope + + // Scale scope per version + scaleRequestScopes map[string]handlers.RequestScope + + // Status scope per version + statusRequestScopes map[string]handlers.RequestScope + + // storageVersion is the CRD version used when storing the object in etcd. + storageVersion string } // crdStorageMap goes from customresourcedefinition to its storage @@ -120,7 +130,6 @@ func NewCustomResourceDefinitionHandler( restOptionsGetter: restOptionsGetter, admission: admission, } - crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: ret.updateCustomResourceDefinition, DeleteFunc: func(obj interface{}) { @@ -168,7 +177,7 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if crd.Spec.Version != requestInfo.APIVersion { + if !apiextensions.HasServedCRDVersion(crd, requestInfo.APIVersion) { r.delegate.ServeHTTP(w, req) return } @@ -214,8 +223,8 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } func (r *crdHandler) serveResource(w http.ResponseWriter, req *http.Request, requestInfo *apirequest.RequestInfo, crdInfo *crdInfo, terminating bool, supportedTypes []string) http.HandlerFunc { - requestScope := crdInfo.requestScope - storage := crdInfo.storage.CustomResource + requestScope := crdInfo.requestScopes[requestInfo.APIVersion] + storage := crdInfo.storages[requestInfo.APIVersion].CustomResource minRequestTimeout := 1 * time.Minute switch requestInfo.Verb { @@ -250,8 +259,8 @@ func (r *crdHandler) serveResource(w http.ResponseWriter, req *http.Request, req } func (r *crdHandler) serveStatus(w http.ResponseWriter, req *http.Request, requestInfo *apirequest.RequestInfo, crdInfo *crdInfo, terminating bool, supportedTypes []string) http.HandlerFunc { - requestScope := crdInfo.statusRequestScope - storage := crdInfo.storage.Status + requestScope := crdInfo.statusRequestScopes[requestInfo.APIVersion] + storage := crdInfo.storages[requestInfo.APIVersion].Status switch requestInfo.Verb { case "get": @@ -267,8 +276,8 @@ func (r *crdHandler) serveStatus(w http.ResponseWriter, req *http.Request, reque } func (r *crdHandler) serveScale(w http.ResponseWriter, req *http.Request, requestInfo *apirequest.RequestInfo, crdInfo *crdInfo, terminating bool, supportedTypes []string) http.HandlerFunc { - requestScope := crdInfo.scaleRequestScope - storage := crdInfo.storage.Scale + requestScope := crdInfo.scaleRequestScopes[requestInfo.APIVersion] + storage := crdInfo.storages[requestInfo.APIVersion].Scale switch requestInfo.Verb { case "get": @@ -306,8 +315,10 @@ func (r *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) // as it is used without locking elsewhere. storageMap2 := storageMap.clone() if oldInfo, ok := storageMap2[types.UID(oldCRD.UID)]; ok { - // destroy only the main storage. Those for the subresources share cacher and etcd clients. - oldInfo.storage.CustomResource.DestroyFunc() + for _, storage := range oldInfo.storages { + // destroy only the main storage. Those for the subresources share cacher and etcd clients. + storage.CustomResource.DestroyFunc() + } delete(storageMap2, types.UID(oldCRD.UID)) } @@ -338,9 +349,11 @@ func (r *crdHandler) removeDeadStorage() { } } if !found { - glog.V(4).Infof("Removing dead CRD storage for %v", s.requestScope.Resource) - // destroy only the main storage. Those for the subresources share cacher and etcd clients. - s.storage.CustomResource.DestroyFunc() + for version, storage := range s.storages { + glog.V(4).Infof("Removing dead CRD storage for %v", s.requestScopes[version].Resource) + // destroy only the main storage. Those for the subresources share cacher and etcd clients. + storage.CustomResource.DestroyFunc() + } delete(storageMap2, uid) } } @@ -354,7 +367,7 @@ func (r *crdHandler) GetCustomResourceListerCollectionDeleter(crd *apiextensions if err != nil { return nil, err } - return info.storage.CustomResource, nil + return info.storages[info.storageVersion].CustomResource, nil } func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResourceDefinition) (*crdInfo, error) { @@ -371,140 +384,158 @@ func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResource return ret, nil } - // In addition to Unstructured objects (Custom Resources), we also may sometimes need to - // decode unversioned Options objects, so we delegate to parameterScheme for such types. - parameterScheme := runtime.NewScheme() - parameterScheme.AddUnversionedTypes(schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}, - &metav1.ListOptions{}, - &metav1.ExportOptions{}, - &metav1.GetOptions{}, - &metav1.DeleteOptions{}, - ) - parameterCodec := runtime.NewParameterCodec(parameterScheme) - - kind := schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Status.AcceptedNames.Kind} - typer := UnstructuredObjectTyper{ - Delegate: parameterScheme, - UnstructuredTyper: discovery.NewUnstructuredObjectTyper(), - } - creator := unstructuredCreator{} - - validator, _, err := apiservervalidation.NewSchemaValidator(crd.Spec.Validation) + storageVersion, err := apiextensions.GetCRDStorageVersion(crd) if err != nil { return nil, err } - var statusSpec *apiextensions.CustomResourceSubresourceStatus - var statusValidator *validate.SchemaValidator - if utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CustomResourceSubresources) && crd.Spec.Subresources != nil && crd.Spec.Subresources.Status != nil { - statusSpec = crd.Spec.Subresources.Status + // Scope/Storages per version. + requestScopes := map[string]handlers.RequestScope{} + storages := map[string]customresource.CustomResourceStorage{} + statusScopes := map[string]handlers.RequestScope{} + scaleScopes := map[string]handlers.RequestScope{} - // for the status subresource, validate only against the status schema - if crd.Spec.Validation != nil && crd.Spec.Validation.OpenAPIV3Schema != nil && crd.Spec.Validation.OpenAPIV3Schema.Properties != nil { - if statusSchema, ok := crd.Spec.Validation.OpenAPIV3Schema.Properties["status"]; ok { - openapiSchema := &spec.Schema{} - if err := apiservervalidation.ConvertJSONSchemaProps(&statusSchema, openapiSchema); err != nil { - return nil, err + for _, v := range crd.Spec.Versions { + safeConverter, unsafeConverter := conversion.NewCRDConverter(crd) + // In addition to Unstructured objects (Custom Resources), we also may sometimes need to + // decode unversioned Options objects, so we delegate to parameterScheme for such types. + parameterScheme := runtime.NewScheme() + parameterScheme.AddUnversionedTypes(schema.GroupVersion{Group: crd.Spec.Group, Version: v.Name}, + &metav1.ListOptions{}, + &metav1.ExportOptions{}, + &metav1.GetOptions{}, + &metav1.DeleteOptions{}, + ) + parameterCodec := runtime.NewParameterCodec(parameterScheme) + + kind := schema.GroupVersionKind{Group: crd.Spec.Group, Version: v.Name, Kind: crd.Status.AcceptedNames.Kind} + typer := newUnstructuredObjectTyper(parameterScheme) + creator := unstructuredCreator{} + + validator, _, err := apiservervalidation.NewSchemaValidator(crd.Spec.Validation) + if err != nil { + return nil, err + } + + var statusSpec *apiextensions.CustomResourceSubresourceStatus + var statusValidator *validate.SchemaValidator + if utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CustomResourceSubresources) && crd.Spec.Subresources != nil && crd.Spec.Subresources.Status != nil { + statusSpec = crd.Spec.Subresources.Status + + // for the status subresource, validate only against the status schema + if crd.Spec.Validation != nil && crd.Spec.Validation.OpenAPIV3Schema != nil && crd.Spec.Validation.OpenAPIV3Schema.Properties != nil { + if statusSchema, ok := crd.Spec.Validation.OpenAPIV3Schema.Properties["status"]; ok { + openapiSchema := &spec.Schema{} + if err := apiservervalidation.ConvertJSONSchemaProps(&statusSchema, openapiSchema); err != nil { + return nil, err + } + statusValidator = validate.NewSchemaValidator(openapiSchema, nil, "", strfmt.Default) } - statusValidator = validate.NewSchemaValidator(openapiSchema, nil, "", strfmt.Default) } } - } - var scaleSpec *apiextensions.CustomResourceSubresourceScale - if utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CustomResourceSubresources) && crd.Spec.Subresources != nil && crd.Spec.Subresources.Scale != nil { - scaleSpec = crd.Spec.Subresources.Scale - } + var scaleSpec *apiextensions.CustomResourceSubresourceScale + if utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CustomResourceSubresources) && crd.Spec.Subresources != nil && crd.Spec.Subresources.Scale != nil { + scaleSpec = crd.Spec.Subresources.Scale + } - // TODO: identify how to pass printer specification from the CRD - table, err := tableconvertor.New(nil) - if err != nil { - glog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) - } + // TODO: identify how to pass printer specification from the CRD + table, err := tableconvertor.New(nil) + if err != nil { + glog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) + } - customResourceStorage := customresource.NewStorage( - schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Status.AcceptedNames.Plural}, - schema.GroupVersionKind{Group: crd.Spec.Group, Version: crd.Spec.Version, Kind: crd.Status.AcceptedNames.ListKind}, - customresource.NewStrategy( - typer, - crd.Spec.Scope == apiextensions.NamespaceScoped, - kind, - validator, - statusValidator, - statusSpec, - scaleSpec, - ), - r.restOptionsGetter, - crd.Status.AcceptedNames.Categories, - table, - ) + storages[v.Name] = customresource.NewStorage( + schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Status.AcceptedNames.Plural}, + schema.GroupVersionKind{Group: crd.Spec.Group, Version: v.Name, Kind: crd.Status.AcceptedNames.ListKind}, + customresource.NewStrategy( + typer, + crd.Spec.Scope == apiextensions.NamespaceScoped, + kind, + validator, + statusValidator, + statusSpec, + scaleSpec, + ), + crdConversionRESTOptionsGetter{ + RESTOptionsGetter: r.restOptionsGetter, + converter: safeConverter, + decoderVersion: schema.GroupVersion{Group: crd.Spec.Group, Version: v.Name}, + encoderVersion: schema.GroupVersion{Group: crd.Spec.Group, Version: storageVersion}, + }, + crd.Status.AcceptedNames.Categories, + table, + ) - selfLinkPrefix := "" - switch crd.Spec.Scope { - case apiextensions.ClusterScoped: - selfLinkPrefix = "/" + path.Join("apis", crd.Spec.Group, crd.Spec.Version) + "/" + crd.Status.AcceptedNames.Plural + "/" - case apiextensions.NamespaceScoped: - selfLinkPrefix = "/" + path.Join("apis", crd.Spec.Group, crd.Spec.Version, "namespaces") + "/" - } + selfLinkPrefix := "" + switch crd.Spec.Scope { + case apiextensions.ClusterScoped: + selfLinkPrefix = "/" + path.Join("apis", crd.Spec.Group, v.Name) + "/" + crd.Status.AcceptedNames.Plural + "/" + case apiextensions.NamespaceScoped: + selfLinkPrefix = "/" + path.Join("apis", crd.Spec.Group, v.Name, "namespaces") + "/" + } - clusterScoped := crd.Spec.Scope == apiextensions.ClusterScoped + clusterScoped := crd.Spec.Scope == apiextensions.ClusterScoped - requestScope := handlers.RequestScope{ - Namer: handlers.ContextBasedNaming{ + requestScopes[v.Name] = handlers.RequestScope{ + Namer: handlers.ContextBasedNaming{ + SelfLinker: meta.NewAccessor(), + ClusterScoped: clusterScoped, + SelfLinkPathPrefix: selfLinkPrefix, + }, + Serializer: unstructuredNegotiatedSerializer{typer: typer, creator: creator, converter: safeConverter}, + ParameterCodec: parameterCodec, + + Creater: creator, + Convertor: safeConverter, + Defaulter: unstructuredDefaulter{parameterScheme}, + Typer: typer, + UnsafeConvertor: unsafeConverter, + + Resource: schema.GroupVersionResource{Group: crd.Spec.Group, Version: v.Name, Resource: crd.Status.AcceptedNames.Plural}, + Kind: kind, + + MetaGroupVersion: metav1.SchemeGroupVersion, + + TableConvertor: storages[v.Name].CustomResource, + } + + // override scaleSpec subresource values + // shallow copy + scaleScope := requestScopes[v.Name] + scaleConverter := scale.NewScaleConverter() + scaleScope.Subresource = "scale" + scaleScope.Serializer = serializer.NewCodecFactory(scaleConverter.Scheme()) + scaleScope.Kind = autoscalingv1.SchemeGroupVersion.WithKind("Scale") + scaleScope.Namer = handlers.ContextBasedNaming{ SelfLinker: meta.NewAccessor(), ClusterScoped: clusterScoped, SelfLinkPathPrefix: selfLinkPrefix, - }, + SelfLinkPathSuffix: "/scale", + } + scaleScopes[v.Name] = scaleScope - Serializer: unstructuredNegotiatedSerializer{typer: typer, creator: creator}, - ParameterCodec: parameterCodec, - - Creater: creator, - Convertor: crdObjectConverter{ - UnstructuredObjectConverter: unstructured.UnstructuredObjectConverter{}, - clusterScoped: clusterScoped, - }, - Defaulter: unstructuredDefaulter{parameterScheme}, - Typer: typer, - UnsafeConvertor: unstructured.UnstructuredObjectConverter{}, - - Resource: schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Status.AcceptedNames.Plural}, - Kind: kind, - - MetaGroupVersion: metav1.SchemeGroupVersion, - - TableConvertor: customResourceStorage.CustomResource, + // override status subresource values + // shallow copy + statusScope := requestScopes[v.Name] + statusScope.Subresource = "status" + statusScope.Namer = handlers.ContextBasedNaming{ + SelfLinker: meta.NewAccessor(), + ClusterScoped: clusterScoped, + SelfLinkPathPrefix: selfLinkPrefix, + SelfLinkPathSuffix: "/status", + } + statusScopes[v.Name] = statusScope } ret := &crdInfo{ - spec: &crd.Spec, - acceptedNames: &crd.Status.AcceptedNames, - - storage: customResourceStorage, - requestScope: requestScope, - scaleRequestScope: requestScope, // shallow copy - statusRequestScope: requestScope, // shallow copy - } - - // override scaleSpec subresource values - scaleConverter := scale.NewScaleConverter() - ret.scaleRequestScope.Subresource = "scale" - ret.scaleRequestScope.Serializer = serializer.NewCodecFactory(scaleConverter.Scheme()) - ret.scaleRequestScope.Kind = autoscalingv1.SchemeGroupVersion.WithKind("Scale") - ret.scaleRequestScope.Namer = handlers.ContextBasedNaming{ - SelfLinker: meta.NewAccessor(), - ClusterScoped: clusterScoped, - SelfLinkPathPrefix: selfLinkPrefix, - SelfLinkPathSuffix: "/scale", - } - - // override status subresource values - ret.statusRequestScope.Subresource = "status" - ret.statusRequestScope.Namer = handlers.ContextBasedNaming{ - SelfLinker: meta.NewAccessor(), - ClusterScoped: clusterScoped, - SelfLinkPathPrefix: selfLinkPrefix, - SelfLinkPathSuffix: "/status", + spec: &crd.Spec, + acceptedNames: &crd.Status.AcceptedNames, + storages: storages, + requestScopes: requestScopes, + scaleRequestScopes: scaleScopes, + statusRequestScopes: statusScopes, + storageVersion: storageVersion, } // Copy because we cannot write to storageMap without a race @@ -517,27 +548,10 @@ func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResource return ret, nil } -// crdObjectConverter is a converter that supports field selectors for CRDs. -type crdObjectConverter struct { - unstructured.UnstructuredObjectConverter - clusterScoped bool -} - -func (c crdObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - // We currently only support metadata.namespace and metadata.name. - switch { - case label == "metadata.name": - return label, value, nil - case !c.clusterScoped && label == "metadata.namespace": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } -} - type unstructuredNegotiatedSerializer struct { - typer runtime.ObjectTyper - creator runtime.ObjectCreater + typer runtime.ObjectTyper + creator runtime.ObjectCreater + converter runtime.ObjectConvertor } func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { @@ -562,7 +576,7 @@ func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.Serial } func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { - return versioning.NewDefaultingCodecForScheme(Scheme, encoder, nil, gv, nil) + return versioning.NewCodec(encoder, nil, s.converter, Scheme, Scheme, Scheme, gv, nil) } func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { @@ -574,6 +588,13 @@ type UnstructuredObjectTyper struct { UnstructuredTyper runtime.ObjectTyper } +func newUnstructuredObjectTyper(Delegate runtime.ObjectTyper) UnstructuredObjectTyper { + return UnstructuredObjectTyper{ + Delegate: Delegate, + UnstructuredTyper: discovery.NewUnstructuredObjectTyper(), + } +} + func (t UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { // Delegate for things other than Unstructured. if _, ok := obj.(runtime.Unstructured); !ok { @@ -640,3 +661,20 @@ func (in crdStorageMap) clone() crdStorageMap { } return out } + +// crdConversionRESTOptionsGetter overrides the codec with one using the +// provided custom converter and custom encoder and decoder version. +type crdConversionRESTOptionsGetter struct { + generic.RESTOptionsGetter + converter runtime.ObjectConvertor + encoderVersion schema.GroupVersion + decoderVersion schema.GroupVersion +} + +func (t crdConversionRESTOptionsGetter) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) { + ret, err := t.RESTOptionsGetter.GetRESTOptions(resource) + if err == nil { + ret.StorageConfig.Codec = versioning.NewCodec(ret.StorageConfig.Codec, ret.StorageConfig.Codec, t.converter, &unstructuredCreator{}, discovery.NewUnstructuredObjectTyper(), &unstructuredDefaulter{delegate: Scheme}, t.encoderVersion, t.decoderVersion) + } + return ret, err +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler_test.go index 81c3ac7050b..c275e93b12c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler_test.go @@ -19,7 +19,8 @@ package apiserver import ( "testing" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + conversion "k8s.io/apiextensions-apiserver/pkg/apiserver/conversion" ) func TestConvertFieldLabel(t *testing.T) { @@ -64,10 +65,14 @@ func TestConvertFieldLabel(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - c := crdObjectConverter{ - UnstructuredObjectConverter: unstructured.UnstructuredObjectConverter{}, - clusterScoped: test.clusterScoped, + crd := apiextensions.CustomResourceDefinition{} + + if test.clusterScoped { + crd.Spec.Scope = apiextensions.ClusterScoped + } else { + crd.Spec.Scope = apiextensions.NamespaceScoped } + _, c := conversion.NewCRDConverter(&crd) label, value, err := c.ConvertFieldLabel("", "", test.label, "value") if e, a := test.expectError, err != nil; e != a { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/strategy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/strategy.go index 839833e636f..a0bebb7a7b0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/strategy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition/strategy.go @@ -62,6 +62,15 @@ func (strategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { if !utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CustomResourceSubresources) { crd.Spec.Subresources = nil } + + for _, v := range crd.Spec.Versions { + if v.Storage { + if !apiextensions.IsStoredVersion(crd, v.Name) { + crd.Status.StoredVersions = append(crd.Status.StoredVersions, v.Name) + } + break + } + } } // PrepareForUpdate clears fields that are not allowed to be set by end users on update. @@ -90,6 +99,15 @@ func (strategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { newCRD.Spec.Subresources = nil oldCRD.Spec.Subresources = nil } + + for _, v := range newCRD.Spec.Versions { + if v.Storage { + if !apiextensions.IsStoredVersion(newCRD, v.Name) { + newCRD.Status.StoredVersions = append(newCRD.Status.StoredVersions, v.Name) + } + break + } + } } // Validate validates a new CustomResourceDefinition. diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go index 7d0ab684fe9..9cdb3393f05 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go @@ -68,6 +68,42 @@ func instantiateCustomResource(t *testing.T, instanceToCreate *unstructured.Unst return createdInstance, nil } +func instantiateVersionedCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition, version string) (*unstructured.Unstructured, error) { + createdInstance, err := client.Create(instanceToCreate) + if err != nil { + t.Logf("%#v", createdInstance) + return nil, err + } + createdObjectMeta, err := meta.Accessor(createdInstance) + if err != nil { + t.Fatal(err) + } + // it should have a UUID + if len(createdObjectMeta.GetUID()) == 0 { + t.Errorf("missing uuid: %#v", createdInstance) + } + createdTypeMeta, err := meta.TypeAccessor(createdInstance) + if err != nil { + t.Fatal(err) + } + if e, a := definition.Spec.Group+"/"+version, createdTypeMeta.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + return createdInstance, nil +} + +func NewNamespacedCustomResourceVersionedClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition, version string) dynamic.ResourceInterface { + gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version, Resource: crd.Spec.Names.Plural} + + if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { + return client.Resource(gvr).Namespace(ns) + } + return client.Resource(gvr) +} + func NewNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface { gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural} diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go index 2502b9a9006..2876bdc6170 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go @@ -100,6 +100,62 @@ func NewNoxuInstance(namespace, name string) *unstructured.Unstructured { } } +func NewMultipleVersionNoxuCRD(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "noxus.mygroup.example.com"}, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: "mygroup.example.com", + Version: "v1beta1", + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: "noxus", + Singular: "nonenglishnoxu", + Kind: "WishIHadChosenNoxu", + ShortNames: []string{"foo", "bar", "abc", "def"}, + ListKind: "NoxuItemList", + Categories: []string{"all"}, + }, + Scope: scope, + Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Served: true, + Storage: false, + }, + { + Name: "v1beta2", + Served: true, + Storage: true, + }, + { + Name: "v0", + Served: false, + Storage: false, + }, + }, + }, + } +} + +func NewVersionedNoxuInstance(namespace, name, version string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "mygroup.example.com/" + version, + "kind": "WishIHadChosenNoxu", + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + "content": map[string]interface{}{ + "key": "value", + }, + "num": map[string]interface{}{ + "num1": noxuInstanceNum, + "num2": 1000000, + }, + }, + } +} + func NewNoxu2CustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { return &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "noxus2.mygroup.example.com"}, diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go new file mode 100644 index 00000000000..a4dbb0d8ce5 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -0,0 +1,304 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "reflect" + "testing" + "time" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/test/integration/testserver" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" +) + +func TestVersionedNamspacedScopedCRD(t *testing.T) { + stopCh, apiExtensionClient, dynamicClient, err := testserver.StartDefaultServerWithClients() + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + noxuDefinition := testserver.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.NamespaceScoped) + err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + ns := "not-the-default" + testSimpleVersionedCRUD(t, ns, noxuDefinition, dynamicClient) +} + +func TestVersionedClusterScopedCRD(t *testing.T) { + stopCh, apiExtensionClient, dynamicClient, err := testserver.StartDefaultServerWithClients() + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + noxuDefinition := testserver.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.ClusterScoped) + err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + ns := "" + testSimpleVersionedCRUD(t, ns, noxuDefinition, dynamicClient) +} + +func TestStoragedVersionInNamespacedCRDStatus(t *testing.T) { + noxuDefinition := testserver.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.NamespaceScoped) + ns := "not-the-default" + testStoragedVersionInCRDStatus(t, ns, noxuDefinition) +} + +func TestStoragedVersionInClusterScopedCRDStatus(t *testing.T) { + noxuDefinition := testserver.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.ClusterScoped) + ns := "" + testStoragedVersionInCRDStatus(t, ns, noxuDefinition) +} + +func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition) { + versionsV1Beta1Storage := []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Served: true, + Storage: true, + }, + { + Name: "v1beta2", + Served: true, + Storage: false, + }, + } + versionsV1Beta2Storage := []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Served: true, + Storage: false, + }, + { + Name: "v1beta2", + Served: true, + Storage: true, + }, + } + stopCh, apiExtensionClient, dynamicClient, err := testserver.StartDefaultServerWithClients() + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + noxuDefinition.Spec.Versions = versionsV1Beta1Storage + err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + // The storage version list should be initilized to storage version + crd, err := testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient) + if err != nil { + t.Fatal(err) + } + if e, a := []string{"v1beta1"}, crd.Status.StoredVersions; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + // Changing CRD storage version should be reflected immediately + crd.Spec.Versions = versionsV1Beta2Storage + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + if err != nil { + t.Fatal(err) + } + crd, err = testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient) + if err != nil { + t.Fatal(err) + } + if e, a := []string{"v1beta1", "v1beta2"}, crd.Status.StoredVersions; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + + err = testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient) + if err != nil { + t.Fatal(err) + } +} + +func testSimpleVersionedCRUD(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition, dynamicClient dynamic.Interface) { + noxuResourceClients := map[string]dynamic.ResourceInterface{} + noxuWatchs := map[string]watch.Interface{} + disbaledVersions := map[string]bool{} + for _, v := range noxuDefinition.Spec.Versions { + disbaledVersions[v.Name] = !v.Served + } + for _, v := range noxuDefinition.Spec.Versions { + noxuResourceClients[v.Name] = NewNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name) + + noxuWatch, err := noxuResourceClients[v.Name].Watch(metav1.ListOptions{}) + if disbaledVersions[v.Name] { + if err == nil { + t.Errorf("expected the watch creation fail for disabled version %s", v.Name) + } + } else { + if err != nil { + t.Fatal(err) + } + noxuWatchs[v.Name] = noxuWatch + } + } + defer func() { + for _, w := range noxuWatchs { + w.Stop() + } + }() + + for version, noxuResourceClient := range noxuResourceClients { + createdNoxuInstance, err := instantiateVersionedCustomResource(t, testserver.NewVersionedNoxuInstance(ns, "foo", version), noxuResourceClient, noxuDefinition, version) + if disbaledVersions[version] { + if err == nil { + t.Errorf("expected the CR creation fail for disabled version %s", version) + } + continue + } + if err != nil { + t.Fatalf("unable to create noxu Instance:%v", err) + } + if e, a := noxuDefinition.Spec.Group+"/"+version, createdNoxuInstance.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + for watchVersion, noxuWatch := range noxuWatchs { + select { + case watchEvent := <-noxuWatch.ResultChan(): + if e, a := watch.Added, watchEvent.Type; e != a { + t.Errorf("expected %v, got %v", e, a) + break + } + createdObjectMeta, err := meta.Accessor(watchEvent.Object) + if err != nil { + t.Fatal(err) + } + // it should have a UUID + if len(createdObjectMeta.GetUID()) == 0 { + t.Errorf("missing uuid: %#v", watchEvent.Object) + } + if e, a := ns, createdObjectMeta.GetNamespace(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + createdTypeMeta, err := meta.TypeAccessor(watchEvent.Object) + if err != nil { + t.Fatal(err) + } + if e, a := noxuDefinition.Spec.Group+"/"+watchVersion, createdTypeMeta.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + case <-time.After(5 * time.Second): + t.Errorf("missing watch event") + } + } + + // Check get for all versions + for version2, noxuResourceClient2 := range noxuResourceClients { + // Get test + gottenNoxuInstance, err := noxuResourceClient2.Get("foo", metav1.GetOptions{}) + + if disbaledVersions[version2] { + if err == nil { + t.Errorf("expected the get operation fail for disabled version %s", version2) + } + } else { + if err != nil { + t.Fatal(err) + } + + if e, a := version2, gottenNoxuInstance.GroupVersionKind().Version; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + } + + // List test + listWithItem, err := noxuResourceClient2.List(metav1.ListOptions{}) + if disbaledVersions[version2] { + if err == nil { + t.Errorf("expected the list operation fail for disabled version %s", version2) + } + } else { + if err != nil { + t.Fatal(err) + } + if e, a := 1, len(listWithItem.Items); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := version2, listWithItem.GroupVersionKind().Version; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := version2, listWithItem.Items[0].GroupVersionKind().Version; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + } + } + + // Delete test + if err := noxuResourceClient.Delete("foo", metav1.NewDeleteOptions(0)); err != nil { + t.Fatal(err) + } + + listWithoutItem, err := noxuResourceClient.List(metav1.ListOptions{}) + if err != nil { + t.Fatal(err) + } + if e, a := 0, len(listWithoutItem.Items); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + for _, noxuWatch := range noxuWatchs { + select { + case watchEvent := <-noxuWatch.ResultChan(): + if e, a := watch.Deleted, watchEvent.Type; e != a { + t.Errorf("expected %v, got %v", e, a) + break + } + deletedObjectMeta, err := meta.Accessor(watchEvent.Object) + if err != nil { + t.Fatal(err) + } + // it should have a UUID + createdObjectMeta, err := meta.Accessor(createdNoxuInstance) + if err != nil { + t.Fatal(err) + } + if e, a := createdObjectMeta.GetUID(), deletedObjectMeta.GetUID(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + case <-time.After(5 * time.Second): + t.Errorf("missing watch event") + } + } + + // Delete test + if err := noxuResourceClient.DeleteCollection(metav1.NewDeleteOptions(0), metav1.ListOptions{}); err != nil { + t.Fatal(err) + } + + } +} From c25514a1ee513e1a7ad1c3c572ab9dd32a395d28 Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Mon, 14 May 2018 14:24:03 -0700 Subject: [PATCH 113/307] Generated files --- api/openapi-spec/swagger.json | 41 +- staging/src/k8s.io/api/Godeps/Godeps.json | 4 + .../apiextensions/v1beta1/generated.pb.go | 580 +++++++++++++----- .../apiextensions/v1beta1/generated.proto | 36 ++ .../v1beta1/zz_generated.conversion.go | 30 + .../v1beta1/zz_generated.deepcopy.go | 26 + .../apiextensions/zz_generated.deepcopy.go | 26 + .../pkg/apiserver/BUILD | 7 +- .../pkg/apiserver/conversion/BUILD | 31 + .../test/integration/BUILD | 1 + .../pkg/runtime/serializer/versioning/BUILD | 1 + staging/src/k8s.io/metrics/Godeps/Godeps.json | 4 + 12 files changed, 640 insertions(+), 147 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/BUILD diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b8fa0e77484..cb18bc0b792 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -84943,7 +84943,6 @@ "description": "CustomResourceDefinitionSpec describes how a user wants their resource to appear", "required": [ "group", - "version", "names", "scope" ], @@ -84969,8 +84968,15 @@ "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceValidation" }, "version": { - "description": "Version is the version this resource belongs in", + "description": "Version is the version this resource belongs in Should be always first item in Versions field if provided. Optional, but at least one of Version or Versions must be set. Deprecated: Please use `Versions`.", "type": "string" + }, + "versions": { + "description": "Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha, and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion" + } } } }, @@ -84978,7 +84984,8 @@ "description": "CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition", "required": [ "conditions", - "acceptedNames" + "acceptedNames", + "storedVersions" ], "properties": { "acceptedNames": { @@ -84991,6 +84998,34 @@ "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition" } + }, + "storedVersions": { + "description": "StoredVersions are all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so the migration controller can first finish a migration to another version (i.e. that no old objects are left in the storage), and then remove the rest of the versions from this list. None of the versions in this list can be removed from the spec.Versions field.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion": { + "required": [ + "name", + "served", + "storage" + ], + "properties": { + "name": { + "description": "Name is the version name, e.g. “v1”, “v2beta1”, etc.", + "type": "string" + }, + "served": { + "description": "Served is a flag enabling/disabling this version from being served via REST APIs", + "type": "boolean" + }, + "storage": { + "description": "Storage flags the version as storage version. There must be exactly one flagged as storage version.", + "type": "boolean" } } }, diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index bb2cd68b30d..da0412bc1a1 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -118,6 +118,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index a6268abb784..b5a8c0e0002 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -31,6 +31,7 @@ limitations under the License. CustomResourceDefinitionNames CustomResourceDefinitionSpec CustomResourceDefinitionStatus + CustomResourceDefinitionVersion CustomResourceSubresourceScale CustomResourceSubresourceStatus CustomResourceSubresources @@ -102,54 +103,60 @@ func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } +func (*CustomResourceDefinitionVersion) ProtoMessage() {} +func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{7} } func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptorGenerated, []int{8} } func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptorGenerated, []int{9} } func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{10} } func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *JSON) Reset() { *m = JSON{} } func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptorGenerated, []int{16} } func init() { @@ -159,6 +166,7 @@ func init() { proto.RegisterType((*CustomResourceDefinitionNames)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames") proto.RegisterType((*CustomResourceDefinitionSpec)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionSpec") proto.RegisterType((*CustomResourceDefinitionStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus") + proto.RegisterType((*CustomResourceDefinitionVersion)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion") proto.RegisterType((*CustomResourceSubresourceScale)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceScale") proto.RegisterType((*CustomResourceSubresourceStatus)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresourceStatus") proto.RegisterType((*CustomResourceSubresources)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceSubresources") @@ -411,6 +419,18 @@ func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { } i += n8 } + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -449,6 +469,59 @@ func (m *CustomResourceDefinitionStatus) MarshalTo(dAtA []byte) (int, error) { return 0, err } i += n9 + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *CustomResourceDefinitionVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceDefinitionVersion) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.Served { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x18 + i++ + if m.Storage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1271,6 +1344,12 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { l = m.Subresources.Size() n += 1 + l + sovGenerated(uint64(l)) } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1285,6 +1364,22 @@ func (m *CustomResourceDefinitionStatus) Size() (n int) { } l = m.AcceptedNames.Size() n += 1 + l + sovGenerated(uint64(l)) + if len(m.StoredVersions) > 0 { + for _, s := range m.StoredVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomResourceDefinitionVersion) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 return n } @@ -1619,6 +1714,7 @@ func (this *CustomResourceDefinitionSpec) String() string { `Scope:` + fmt.Sprintf("%v", this.Scope) + `,`, `Validation:` + strings.Replace(fmt.Sprintf("%v", this.Validation), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1630,6 +1726,19 @@ func (this *CustomResourceDefinitionStatus) String() string { s := strings.Join([]string{`&CustomResourceDefinitionStatus{`, `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CustomResourceDefinitionCondition", "CustomResourceDefinitionCondition", 1), `&`, ``, 1) + `,`, `AcceptedNames:` + strings.Replace(strings.Replace(this.AcceptedNames.String(), "CustomResourceDefinitionNames", "CustomResourceDefinitionNames", 1), `&`, ``, 1) + `,`, + `StoredVersions:` + fmt.Sprintf("%v", this.StoredVersions) + `,`, + `}`, + }, "") + return s +} +func (this *CustomResourceDefinitionVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceDefinitionVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Served:` + fmt.Sprintf("%v", this.Served) + `,`, + `Storage:` + fmt.Sprintf("%v", this.Storage) + `,`, `}`, }, "") return s @@ -2706,6 +2815,37 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, CustomResourceDefinitionVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2817,6 +2957,154 @@ func (m *CustomResourceDefinitionStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoredVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoredVersions = append(m.StoredVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomResourceDefinitionVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceDefinitionVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Served", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Served = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Storage = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5241,137 +5529,143 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2102 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xcb, 0x6f, 0x63, 0x49, - 0xd5, 0x4f, 0xd9, 0x79, 0x56, 0x92, 0x49, 0x52, 0xdd, 0xe9, 0xef, 0x76, 0xbe, 0x6e, 0x3b, 0xf1, - 0x30, 0xa3, 0x00, 0xd3, 0x36, 0x3d, 0x0f, 0x66, 0x40, 0x62, 0x11, 0x27, 0x01, 0xf5, 0x90, 0x74, - 0xa2, 0x72, 0x77, 0x23, 0x98, 0x67, 0xe5, 0xba, 0xec, 0x54, 0xe7, 0xbe, 0xfa, 0x56, 0x5d, 0x77, - 0x22, 0x01, 0xe2, 0xa1, 0x11, 0x12, 0x12, 0x0f, 0x41, 0x6f, 0x90, 0xd8, 0x80, 0xc4, 0x06, 0x21, - 0x58, 0xc0, 0x92, 0x25, 0x8b, 0x5e, 0x8e, 0xc4, 0x66, 0x56, 0x16, 0x6d, 0xfe, 0x05, 0x24, 0xa4, - 0xac, 0x50, 0x3d, 0xee, 0xcb, 0x8e, 0x67, 0x5a, 0x1a, 0x7b, 0x7a, 0xe7, 0x7b, 0x5e, 0xbf, 0x5f, - 0x9d, 0x3a, 0x75, 0xea, 0x94, 0x61, 0xeb, 0xe4, 0x0d, 0x5e, 0x65, 0x7e, 0xed, 0x24, 0x3a, 0xa2, - 0xa1, 0x47, 0x05, 0xe5, 0xb5, 0x0e, 0xf5, 0x9a, 0x7e, 0x58, 0x33, 0x0a, 0x12, 0x30, 0x7a, 0x2a, - 0xa8, 0xc7, 0x99, 0xef, 0xf1, 0x1b, 0x24, 0x60, 0x9c, 0x86, 0x1d, 0x1a, 0xd6, 0x82, 0x93, 0xb6, - 0xd4, 0xf1, 0xbc, 0x41, 0xad, 0x73, 0xf3, 0x88, 0x0a, 0x72, 0xb3, 0xd6, 0xa6, 0x1e, 0x0d, 0x89, - 0xa0, 0xcd, 0x6a, 0x10, 0xfa, 0xc2, 0x47, 0x5f, 0xd3, 0xe1, 0xaa, 0x39, 0xeb, 0xf7, 0x92, 0x70, - 0xd5, 0xe0, 0xa4, 0x2d, 0x75, 0x3c, 0x6f, 0x50, 0x35, 0xe1, 0xd6, 0x6e, 0xb4, 0x99, 0x38, 0x8e, - 0x8e, 0xaa, 0xb6, 0xef, 0xd6, 0xda, 0x7e, 0xdb, 0xaf, 0xa9, 0xa8, 0x47, 0x51, 0x4b, 0x7d, 0xa9, - 0x0f, 0xf5, 0x4b, 0xa3, 0xad, 0xbd, 0x9a, 0x92, 0x77, 0x89, 0x7d, 0xcc, 0x3c, 0x1a, 0x9e, 0xa5, - 0x8c, 0x5d, 0x2a, 0x48, 0xad, 0x33, 0xc0, 0x71, 0xad, 0x36, 0xcc, 0x2b, 0x8c, 0x3c, 0xc1, 0x5c, - 0x3a, 0xe0, 0xf0, 0xe5, 0x4f, 0x72, 0xe0, 0xf6, 0x31, 0x75, 0xc9, 0x80, 0xdf, 0x2b, 0xc3, 0xfc, - 0x22, 0xc1, 0x9c, 0x1a, 0xf3, 0x04, 0x17, 0x61, 0xbf, 0x53, 0xe5, 0xc7, 0x45, 0x68, 0x6d, 0x47, - 0x5c, 0xf8, 0x2e, 0xa6, 0xdc, 0x8f, 0x42, 0x9b, 0xee, 0xd0, 0x16, 0xf3, 0x98, 0x60, 0xbe, 0x87, - 0xde, 0x87, 0xb3, 0x72, 0x55, 0x4d, 0x22, 0x88, 0x05, 0xd6, 0xc1, 0xe6, 0xfc, 0xcb, 0x5f, 0xaa, - 0xa6, 0x19, 0x4f, 0x40, 0xd2, 0x34, 0x4b, 0xeb, 0x6a, 0xe7, 0x66, 0xf5, 0xe0, 0xe8, 0x3e, 0xb5, - 0xc5, 0x3e, 0x15, 0xa4, 0x8e, 0x1e, 0x77, 0xcb, 0x13, 0xbd, 0x6e, 0x19, 0xa6, 0x32, 0x9c, 0x44, - 0x45, 0xdf, 0x83, 0x93, 0x3c, 0xa0, 0xb6, 0x55, 0x50, 0xd1, 0xdf, 0xaa, 0x7e, 0xaa, 0xfd, 0xac, - 0x0e, 0x5b, 0x48, 0x23, 0xa0, 0x76, 0x7d, 0xc1, 0x10, 0x99, 0x94, 0x5f, 0x58, 0xc1, 0xa2, 0x0f, - 0x00, 0x9c, 0xe6, 0x82, 0x88, 0x88, 0x5b, 0x45, 0xc5, 0xe0, 0x9d, 0x71, 0x31, 0x50, 0x20, 0xf5, - 0xe7, 0x0c, 0x87, 0x69, 0xfd, 0x8d, 0x0d, 0x78, 0xe5, 0x3f, 0x05, 0xb8, 0x31, 0xcc, 0x75, 0xdb, - 0xf7, 0x9a, 0x7a, 0x3b, 0x6e, 0xc1, 0x49, 0x71, 0x16, 0x50, 0xb5, 0x15, 0x73, 0xf5, 0xd7, 0xe2, - 0xf5, 0xdc, 0x39, 0x0b, 0xe8, 0x79, 0xb7, 0xfc, 0xc2, 0x27, 0x06, 0x90, 0x86, 0x58, 0x85, 0x40, - 0x5f, 0x49, 0xd6, 0x5d, 0x50, 0xc1, 0x36, 0xf2, 0xc4, 0xce, 0xbb, 0xe5, 0xa5, 0xc4, 0x2d, 0xcf, - 0x15, 0x75, 0x20, 0x72, 0x08, 0x17, 0x77, 0x42, 0xe2, 0x71, 0x1d, 0x96, 0xb9, 0xd4, 0xa4, 0xef, - 0x0b, 0x4f, 0x57, 0x1e, 0xd2, 0xa3, 0xbe, 0x66, 0x20, 0xd1, 0xde, 0x40, 0x34, 0x7c, 0x01, 0x02, - 0x7a, 0x11, 0x4e, 0x87, 0x94, 0x70, 0xdf, 0xb3, 0x26, 0x15, 0xe5, 0x24, 0x97, 0x58, 0x49, 0xb1, - 0xd1, 0xa2, 0xcf, 0xc3, 0x19, 0x97, 0x72, 0x4e, 0xda, 0xd4, 0x9a, 0x52, 0x86, 0x4b, 0xc6, 0x70, - 0x66, 0x5f, 0x8b, 0x71, 0xac, 0xaf, 0x9c, 0x03, 0x78, 0x6d, 0x58, 0xd6, 0xf6, 0x18, 0x17, 0xe8, - 0xed, 0x81, 0x03, 0x50, 0x7d, 0xba, 0x15, 0x4a, 0x6f, 0x55, 0xfe, 0xcb, 0x06, 0x7c, 0x36, 0x96, - 0x64, 0x8a, 0xff, 0xbb, 0x70, 0x8a, 0x09, 0xea, 0xca, 0x3d, 0x28, 0x6e, 0xce, 0xbf, 0xfc, 0xad, - 0x31, 0xd5, 0x5e, 0x7d, 0xd1, 0x70, 0x98, 0xba, 0x25, 0xd1, 0xb0, 0x06, 0xad, 0xfc, 0xa1, 0x00, - 0xaf, 0x0f, 0x73, 0xb9, 0x4d, 0x5c, 0xca, 0x65, 0xc6, 0x03, 0x27, 0x0a, 0x89, 0x63, 0x2a, 0x2e, - 0xc9, 0xf8, 0xa1, 0x92, 0x62, 0xa3, 0x45, 0x2f, 0xc1, 0x59, 0xce, 0xbc, 0x76, 0xe4, 0x90, 0xd0, - 0x94, 0x53, 0xb2, 0xea, 0x86, 0x91, 0xe3, 0xc4, 0x02, 0x55, 0x21, 0xe4, 0xc7, 0x7e, 0x28, 0x14, - 0x86, 0x55, 0x5c, 0x2f, 0xca, 0xc8, 0xb2, 0x41, 0x34, 0x12, 0x29, 0xce, 0x58, 0xa0, 0x75, 0x38, - 0x79, 0xc2, 0xbc, 0xa6, 0xd9, 0xf5, 0xe4, 0x14, 0x7f, 0x93, 0x79, 0x4d, 0xac, 0x34, 0x12, 0xdf, - 0x61, 0x5c, 0x48, 0x89, 0xd9, 0xf2, 0x5c, 0xd6, 0x95, 0x65, 0x62, 0x21, 0xf1, 0x6d, 0x22, 0x68, - 0xdb, 0x0f, 0x19, 0xe5, 0xd6, 0x74, 0x8a, 0xbf, 0x9d, 0x48, 0x71, 0xc6, 0xa2, 0xf2, 0x8f, 0xc9, - 0xe1, 0x45, 0x22, 0x5b, 0x09, 0x7a, 0x1e, 0x4e, 0xb5, 0x43, 0x3f, 0x0a, 0x4c, 0x96, 0x92, 0x6c, - 0x7f, 0x43, 0x0a, 0xb1, 0xd6, 0xc9, 0xaa, 0xec, 0xd0, 0x50, 0x6e, 0x98, 0x49, 0x51, 0x52, 0x95, - 0xf7, 0xb4, 0x18, 0xc7, 0x7a, 0xf4, 0x43, 0x00, 0xa7, 0x3c, 0x93, 0x1c, 0x59, 0x72, 0x6f, 0x8f, - 0xa9, 0x2e, 0x54, 0x7a, 0x53, 0xba, 0x3a, 0xf3, 0x1a, 0x19, 0xbd, 0x0a, 0xa7, 0xb8, 0xed, 0x07, - 0xd4, 0x64, 0xbd, 0x14, 0x1b, 0x35, 0xa4, 0xf0, 0xbc, 0x5b, 0x5e, 0x8c, 0xc3, 0x29, 0x01, 0xd6, - 0xc6, 0xe8, 0x27, 0x00, 0xc2, 0x0e, 0x71, 0x58, 0x93, 0xc8, 0xf8, 0x6a, 0x2f, 0x46, 0x5d, 0xd6, - 0xf7, 0x92, 0xf0, 0x7a, 0xd3, 0xd2, 0x6f, 0x9c, 0x81, 0x46, 0xbf, 0x00, 0x70, 0x81, 0x47, 0x47, - 0xa1, 0xf1, 0x92, 0xfb, 0x2c, 0xb9, 0x7c, 0x7b, 0xa4, 0x5c, 0x1a, 0x19, 0x80, 0xfa, 0x72, 0xaf, - 0x5b, 0x5e, 0xc8, 0x4a, 0x70, 0x8e, 0x40, 0xe5, 0x9f, 0x05, 0x58, 0xfa, 0xf8, 0xdb, 0x01, 0x3d, - 0x02, 0x10, 0xda, 0x71, 0xd7, 0xe5, 0x16, 0x50, 0x5d, 0xe1, 0xfd, 0x31, 0xed, 0x7e, 0xd2, 0xde, - 0xd3, 0x1b, 0x3a, 0x11, 0xc9, 0x03, 0x90, 0xfc, 0x46, 0xbf, 0x01, 0x70, 0x91, 0xd8, 0x36, 0x0d, - 0x04, 0x6d, 0xea, 0x43, 0x5b, 0xf8, 0x0c, 0xea, 0x72, 0xd5, 0xb0, 0x5a, 0xdc, 0xca, 0x42, 0xe3, - 0x3c, 0x93, 0xca, 0x7f, 0x41, 0x7f, 0x56, 0x33, 0x5b, 0xd0, 0xb0, 0x89, 0x43, 0xd1, 0x0e, 0x5c, - 0x96, 0x77, 0x3d, 0xa6, 0x81, 0xc3, 0x6c, 0xc2, 0x0f, 0x89, 0x38, 0x36, 0x27, 0xd5, 0x32, 0x10, - 0xcb, 0x8d, 0x3e, 0x3d, 0x1e, 0xf0, 0x40, 0x6f, 0x42, 0xa4, 0xef, 0xbf, 0x5c, 0x1c, 0x7d, 0x94, - 0x93, 0x9b, 0xac, 0x31, 0x60, 0x81, 0x2f, 0xf0, 0x42, 0xdb, 0x70, 0xc5, 0x21, 0x47, 0xd4, 0x69, - 0x50, 0x87, 0xda, 0xc2, 0x0f, 0x55, 0xa8, 0xa2, 0x0a, 0xb5, 0xda, 0xeb, 0x96, 0x57, 0xf6, 0xfa, - 0x95, 0x78, 0xd0, 0xbe, 0xb2, 0x01, 0xcb, 0xc3, 0x17, 0xae, 0xa7, 0x8a, 0xdf, 0x15, 0xe0, 0xda, - 0xf0, 0x8a, 0x45, 0x3f, 0x4a, 0x87, 0x1f, 0x7d, 0xb7, 0xbd, 0x3b, 0xae, 0xd3, 0x61, 0xa6, 0x1f, - 0x38, 0x38, 0xf9, 0xa0, 0xef, 0xcb, 0x46, 0x43, 0x1c, 0x6a, 0x6a, 0xea, 0x9d, 0xb1, 0x51, 0x90, - 0x20, 0xf5, 0x39, 0xdd, 0xc3, 0x88, 0xa3, 0x5a, 0x16, 0x71, 0x68, 0xe5, 0x8f, 0xa0, 0x7f, 0xfe, - 0x4d, 0x3b, 0x0a, 0xfa, 0x19, 0x80, 0x4b, 0x7e, 0x40, 0xbd, 0xad, 0xc3, 0x5b, 0xf7, 0x5e, 0x69, - 0xa8, 0xa9, 0xdb, 0xa4, 0xea, 0xf6, 0xa7, 0xe4, 0xf9, 0x66, 0xe3, 0xe0, 0xb6, 0x0e, 0x78, 0x18, - 0xfa, 0x01, 0xaf, 0x5f, 0xea, 0x75, 0xcb, 0x4b, 0x07, 0x79, 0x28, 0xdc, 0x8f, 0x5d, 0x71, 0xe1, - 0xea, 0xee, 0xa9, 0xa0, 0xa1, 0x47, 0x9c, 0x1d, 0xdf, 0x8e, 0x5c, 0xea, 0x09, 0x4d, 0xf4, 0x35, - 0x38, 0xdf, 0xa4, 0xdc, 0x0e, 0x59, 0xa0, 0x1a, 0xaf, 0x2e, 0xef, 0x4b, 0xa6, 0x2c, 0xe7, 0x77, - 0x52, 0x15, 0xce, 0xda, 0xa1, 0xeb, 0xb0, 0x18, 0x85, 0x8e, 0xa9, 0xe2, 0x79, 0x63, 0x5e, 0xbc, - 0x8b, 0xf7, 0xb0, 0x94, 0x57, 0x36, 0xe0, 0xa4, 0xe4, 0x89, 0xae, 0xc2, 0x62, 0x48, 0x1e, 0xaa, - 0xa8, 0x0b, 0xf5, 0x19, 0x69, 0x82, 0xc9, 0x43, 0x2c, 0x65, 0x95, 0x3f, 0x5d, 0x83, 0x4b, 0x7d, - 0x6b, 0x41, 0x6b, 0xb0, 0xc0, 0x9a, 0x86, 0x03, 0x34, 0x41, 0x0b, 0xb7, 0x76, 0x70, 0x81, 0x35, - 0xd1, 0xeb, 0x70, 0x5a, 0xbf, 0x5e, 0x0c, 0x68, 0x39, 0x99, 0x3b, 0x95, 0x54, 0xde, 0x2c, 0x69, - 0x38, 0x49, 0xc4, 0x98, 0x2b, 0x0e, 0xb4, 0x65, 0x4e, 0x89, 0xe6, 0x40, 0x5b, 0x58, 0xca, 0xfa, - 0x17, 0x3f, 0xf9, 0x94, 0x8b, 0x5f, 0x37, 0xd3, 0xf4, 0x54, 0x7e, 0xae, 0xc8, 0x0c, 0xc9, 0x2f, - 0xc2, 0xe9, 0x96, 0x1f, 0xba, 0x44, 0xa8, 0xdb, 0x23, 0x33, 0xff, 0x7c, 0x5d, 0x49, 0xb1, 0xd1, - 0xca, 0x01, 0x40, 0x30, 0xe1, 0x50, 0x6b, 0x26, 0x3f, 0x00, 0xdc, 0x91, 0x42, 0xac, 0x75, 0xe8, - 0x3e, 0x9c, 0x69, 0xd2, 0x16, 0x89, 0x1c, 0x61, 0xcd, 0xaa, 0x12, 0xda, 0x1e, 0x41, 0x09, 0xd5, - 0xe7, 0xe5, 0x04, 0xb1, 0xa3, 0xe3, 0xe2, 0x18, 0x00, 0xbd, 0x00, 0x67, 0x5c, 0x72, 0xca, 0xdc, - 0xc8, 0xb5, 0xe6, 0xd6, 0xc1, 0x26, 0xd0, 0x66, 0xfb, 0x5a, 0x84, 0x63, 0x9d, 0xec, 0x8c, 0xf4, - 0xd4, 0x76, 0x22, 0xce, 0x3a, 0xd4, 0x28, 0x2d, 0xb8, 0x0e, 0x36, 0x67, 0xd3, 0xce, 0xb8, 0xdb, - 0xa7, 0xc7, 0x03, 0x1e, 0x0a, 0x8c, 0x79, 0xca, 0x79, 0x3e, 0x03, 0xa6, 0x45, 0x38, 0xd6, 0xe5, - 0xc1, 0x8c, 0xfd, 0xc2, 0x30, 0x30, 0xe3, 0x3c, 0xe0, 0x81, 0xbe, 0x08, 0xe7, 0x5c, 0x72, 0xba, - 0x47, 0xbd, 0xb6, 0x38, 0xb6, 0x16, 0xd7, 0xc1, 0x66, 0xb1, 0xbe, 0xd8, 0xeb, 0x96, 0xe7, 0xf6, - 0x63, 0x21, 0x4e, 0xf5, 0xca, 0x98, 0x79, 0xc6, 0xf8, 0xb9, 0x8c, 0x71, 0x2c, 0xc4, 0xa9, 0x5e, - 0x0e, 0x68, 0x01, 0x11, 0xf2, 0x70, 0x59, 0x4b, 0xf9, 0x01, 0xed, 0x50, 0x8b, 0x71, 0xac, 0x47, - 0x9b, 0x70, 0xd6, 0x25, 0xa7, 0x6a, 0x98, 0xb6, 0x96, 0x55, 0xd8, 0x05, 0x39, 0x6b, 0xee, 0x1b, - 0x19, 0x4e, 0xb4, 0xca, 0x92, 0x79, 0xda, 0x72, 0x25, 0x63, 0x69, 0x64, 0x38, 0xd1, 0xca, 0x22, - 0x8e, 0x3c, 0xf6, 0x20, 0xa2, 0xda, 0x18, 0xa9, 0xcc, 0x24, 0x45, 0x7c, 0x37, 0x55, 0xe1, 0xac, - 0x9d, 0x1c, 0x66, 0xdd, 0xc8, 0x11, 0x2c, 0x70, 0xe8, 0x41, 0xcb, 0xba, 0xa4, 0xf2, 0xaf, 0xe6, - 0xa2, 0xfd, 0x44, 0x8a, 0x33, 0x16, 0x88, 0xc2, 0x49, 0xea, 0x45, 0xae, 0x75, 0x59, 0xcd, 0x16, - 0x23, 0x29, 0xc1, 0xe4, 0xe4, 0xec, 0x7a, 0x91, 0x8b, 0x55, 0x78, 0xf4, 0x3a, 0x5c, 0x74, 0xc9, - 0xa9, 0x6c, 0x07, 0x34, 0x14, 0x72, 0xcc, 0x5e, 0x55, 0x8b, 0x5f, 0x91, 0xf7, 0xf9, 0x7e, 0x56, - 0x81, 0xf3, 0x76, 0xca, 0x91, 0x79, 0x19, 0xc7, 0x2b, 0x19, 0xc7, 0xac, 0x02, 0xe7, 0xed, 0x64, - 0xa6, 0x43, 0xfa, 0x20, 0x62, 0x21, 0x6d, 0x5a, 0xff, 0xa7, 0x66, 0x7a, 0x95, 0x69, 0x6c, 0x64, - 0x38, 0xd1, 0xa2, 0x4e, 0xfc, 0xea, 0xb2, 0xd4, 0x31, 0xbc, 0x3b, 0xda, 0x4e, 0x7e, 0x10, 0x6e, - 0x85, 0x21, 0x39, 0xd3, 0x37, 0x4d, 0xf6, 0xbd, 0x85, 0x38, 0x9c, 0x22, 0x8e, 0x73, 0xd0, 0xb2, - 0xae, 0xaa, 0xdc, 0x8f, 0xfa, 0x06, 0x49, 0xba, 0xce, 0x96, 0x04, 0xc1, 0x1a, 0x4b, 0x82, 0xfa, - 0x9e, 0x2c, 0x8d, 0xb5, 0xf1, 0x82, 0x1e, 0x48, 0x10, 0xac, 0xb1, 0xd4, 0x4a, 0xbd, 0xb3, 0x83, - 0x96, 0xf5, 0xff, 0x63, 0x5e, 0xa9, 0x04, 0xc1, 0x1a, 0x0b, 0x31, 0x58, 0xf4, 0x7c, 0x61, 0x5d, - 0x1b, 0xcb, 0xf5, 0xac, 0x2e, 0x9c, 0xdb, 0xbe, 0xc0, 0x12, 0x03, 0xfd, 0x0a, 0x40, 0x18, 0xa4, - 0x25, 0x7a, 0x5d, 0xad, 0xf2, 0xdd, 0xd1, 0x42, 0x56, 0xd3, 0xda, 0xde, 0xf5, 0x44, 0x78, 0x96, - 0x4e, 0xe9, 0x99, 0x33, 0x90, 0x61, 0x81, 0x7e, 0x0f, 0xe0, 0x65, 0xd2, 0xd4, 0x33, 0x3b, 0x71, - 0x32, 0x27, 0xa8, 0xa4, 0x32, 0x72, 0x67, 0xd4, 0x65, 0x5e, 0xf7, 0x7d, 0xa7, 0x6e, 0xf5, 0xba, - 0xe5, 0xcb, 0x5b, 0x17, 0xa0, 0xe2, 0x0b, 0xb9, 0xa0, 0x3f, 0x03, 0xb8, 0x62, 0xba, 0x68, 0x86, - 0x61, 0x59, 0x25, 0x90, 0x8e, 0x3a, 0x81, 0xfd, 0x38, 0x3a, 0x8f, 0x57, 0x4d, 0x1e, 0x57, 0x06, - 0xf4, 0x78, 0x90, 0x1a, 0xfa, 0x1b, 0x80, 0x0b, 0x4d, 0x1a, 0x50, 0xaf, 0x49, 0x3d, 0x5b, 0x72, - 0x5d, 0x1f, 0xc9, 0xa3, 0xac, 0x9f, 0xeb, 0x4e, 0x06, 0x42, 0xd3, 0xac, 0x1a, 0x9a, 0x0b, 0x59, - 0xd5, 0x79, 0xb7, 0x7c, 0x25, 0x75, 0xcd, 0x6a, 0x70, 0x8e, 0x25, 0xfa, 0x35, 0x80, 0x4b, 0xe9, - 0x06, 0xe8, 0x2b, 0x65, 0x63, 0x8c, 0x75, 0xa0, 0xc6, 0xd7, 0xad, 0x3c, 0x20, 0xee, 0x67, 0x80, - 0xfe, 0x02, 0xe4, 0xa4, 0x16, 0x3f, 0xf3, 0xb8, 0x55, 0x51, 0xb9, 0x7c, 0x6f, 0xe4, 0xb9, 0x4c, - 0x10, 0x74, 0x2a, 0x5f, 0x4a, 0x47, 0xc1, 0x44, 0x73, 0xde, 0x2d, 0xaf, 0x66, 0x33, 0x99, 0x28, - 0x70, 0x96, 0x21, 0xfa, 0x29, 0x80, 0x0b, 0x34, 0x9d, 0xb8, 0xb9, 0xf5, 0xfc, 0x48, 0x92, 0x78, - 0xe1, 0x10, 0xaf, 0xff, 0x41, 0xc8, 0xa8, 0x38, 0xce, 0x61, 0xcb, 0x09, 0x92, 0x9e, 0x12, 0x37, - 0x70, 0xa8, 0xf5, 0xb9, 0x11, 0x4f, 0x90, 0xbb, 0x3a, 0x2e, 0x8e, 0x01, 0xd6, 0xe4, 0xcb, 0xa7, - 0xef, 0xe4, 0xa0, 0x65, 0x58, 0x3c, 0xa1, 0x67, 0x7a, 0xb0, 0xc7, 0xf2, 0x27, 0x6a, 0xc2, 0xa9, - 0x0e, 0x71, 0xa2, 0xf8, 0xf1, 0x36, 0xe2, 0xae, 0x8b, 0x75, 0xf0, 0xaf, 0x16, 0xde, 0x00, 0x6b, - 0x8f, 0x00, 0xbc, 0x72, 0xf1, 0x81, 0x7e, 0xa6, 0xb4, 0x7e, 0x0b, 0xe0, 0xca, 0xc0, 0xd9, 0xbd, - 0x80, 0xd1, 0x83, 0x3c, 0xa3, 0xb7, 0x46, 0x7d, 0x08, 0x1b, 0x22, 0x64, 0x5e, 0x5b, 0x4d, 0x1e, - 0x59, 0x7a, 0x3f, 0x07, 0x70, 0xb9, 0xff, 0x38, 0x3c, 0xcb, 0x7c, 0x55, 0x1e, 0x15, 0xe0, 0x95, - 0x8b, 0x07, 0x26, 0x14, 0x26, 0x2f, 0xc3, 0xf1, 0xbc, 0xb0, 0x61, 0xfa, 0xca, 0x4c, 0x1e, 0x95, - 0x1f, 0x00, 0x38, 0x7f, 0x3f, 0xb1, 0x8b, 0xff, 0x87, 0x1f, 0xf9, 0xdb, 0x3e, 0xee, 0x3f, 0xa9, - 0x82, 0xe3, 0x2c, 0x6e, 0xe5, 0xaf, 0x00, 0xae, 0x5e, 0xd8, 0x58, 0xe5, 0x13, 0x94, 0x38, 0x8e, - 0xff, 0x50, 0xff, 0x45, 0x33, 0x9b, 0x3e, 0x41, 0xb7, 0x94, 0x14, 0x1b, 0x6d, 0x26, 0x7b, 0x85, - 0xcf, 0x2a, 0x7b, 0x95, 0xbf, 0x03, 0x78, 0xed, 0xe3, 0x2a, 0xf1, 0x99, 0x6c, 0xe9, 0x26, 0x9c, - 0x35, 0x43, 0xd1, 0x99, 0xda, 0x4e, 0xf3, 0x0e, 0x30, 0x4d, 0xe3, 0x0c, 0x27, 0xda, 0xfa, 0x8d, - 0xc7, 0x4f, 0x4a, 0x13, 0x1f, 0x3e, 0x29, 0x4d, 0x7c, 0xf4, 0xa4, 0x34, 0xf1, 0x83, 0x5e, 0x09, - 0x3c, 0xee, 0x95, 0xc0, 0x87, 0xbd, 0x12, 0xf8, 0xa8, 0x57, 0x02, 0xff, 0xea, 0x95, 0xc0, 0x2f, - 0xff, 0x5d, 0x9a, 0xf8, 0xce, 0x8c, 0x01, 0xff, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6f, - 0x04, 0x49, 0xd3, 0x1e, 0x00, 0x00, + // 2200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x77, 0xcf, 0x78, 0xfc, 0x51, 0xb6, 0x63, 0xbb, 0x12, 0x87, 0x8e, 0x49, 0x66, 0xec, 0x59, + 0x76, 0x65, 0x60, 0x33, 0x43, 0xf6, 0x83, 0x5d, 0x56, 0xe2, 0xe0, 0xb1, 0x0d, 0xca, 0x62, 0xc7, + 0x56, 0x4d, 0x12, 0x04, 0xfb, 0x59, 0xee, 0xae, 0x19, 0x77, 0xdc, 0x5f, 0xe9, 0xaa, 0x9e, 0xd8, + 0x12, 0x20, 0x3e, 0xb4, 0x42, 0x42, 0xc0, 0x22, 0x88, 0x90, 0x90, 0xb8, 0x80, 0xc4, 0x05, 0x21, + 0x38, 0xc0, 0x91, 0x3f, 0x20, 0xc7, 0x95, 0xb8, 0xec, 0x69, 0x44, 0x86, 0x7f, 0x01, 0x09, 0xc9, + 0x27, 0x54, 0x1f, 0x5d, 0xdd, 0x3d, 0xe3, 0xd9, 0x44, 0xda, 0x99, 0xcd, 0xcd, 0xfd, 0xde, 0xab, + 0xf7, 0xfb, 0xd5, 0xab, 0x57, 0xaf, 0xde, 0x1b, 0x83, 0xd6, 0xf1, 0xeb, 0xb4, 0xe6, 0x04, 0xf5, + 0xe3, 0xf8, 0x90, 0x44, 0x3e, 0x61, 0x84, 0xd6, 0x3b, 0xc4, 0xb7, 0x83, 0xa8, 0xae, 0x14, 0x38, + 0x74, 0xc8, 0x09, 0x23, 0x3e, 0x75, 0x02, 0x9f, 0x5e, 0xc7, 0xa1, 0x43, 0x49, 0xd4, 0x21, 0x51, + 0x3d, 0x3c, 0x6e, 0x73, 0x1d, 0xcd, 0x1b, 0xd4, 0x3b, 0x37, 0x0e, 0x09, 0xc3, 0x37, 0xea, 0x6d, + 0xe2, 0x93, 0x08, 0x33, 0x62, 0xd7, 0xc2, 0x28, 0x60, 0x01, 0xfc, 0xba, 0x74, 0x57, 0xcb, 0x59, + 0xbf, 0xa7, 0xdd, 0xd5, 0xc2, 0xe3, 0x36, 0xd7, 0xd1, 0xbc, 0x41, 0x4d, 0xb9, 0x5b, 0xbd, 0xde, + 0x76, 0xd8, 0x51, 0x7c, 0x58, 0xb3, 0x02, 0xaf, 0xde, 0x0e, 0xda, 0x41, 0x5d, 0x78, 0x3d, 0x8c, + 0x5b, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x24, 0xda, 0xea, 0x2b, 0x29, 0x79, 0x0f, 0x5b, 0x47, 0x8e, + 0x4f, 0xa2, 0xd3, 0x94, 0xb1, 0x47, 0x18, 0xae, 0x77, 0x06, 0x38, 0xae, 0xd6, 0x87, 0xad, 0x8a, + 0x62, 0x9f, 0x39, 0x1e, 0x19, 0x58, 0xf0, 0xd5, 0x27, 0x2d, 0xa0, 0xd6, 0x11, 0xf1, 0xf0, 0xc0, + 0xba, 0x97, 0x87, 0xad, 0x8b, 0x99, 0xe3, 0xd6, 0x1d, 0x9f, 0x51, 0x16, 0xf5, 0x2f, 0xaa, 0xfe, + 0xa4, 0x08, 0xcc, 0xad, 0x98, 0xb2, 0xc0, 0x43, 0x84, 0x06, 0x71, 0x64, 0x91, 0x6d, 0xd2, 0x72, + 0x7c, 0x87, 0x39, 0x81, 0x0f, 0xdf, 0x07, 0x33, 0x7c, 0x57, 0x36, 0x66, 0xd8, 0x34, 0xd6, 0x8c, + 0x8d, 0xb9, 0x97, 0xbe, 0x52, 0x4b, 0x23, 0xae, 0x41, 0xd2, 0x30, 0x73, 0xeb, 0x5a, 0xe7, 0x46, + 0x6d, 0xff, 0xf0, 0x1e, 0xb1, 0xd8, 0x1e, 0x61, 0xb8, 0x01, 0x1f, 0x75, 0x2b, 0x13, 0xbd, 0x6e, + 0x05, 0xa4, 0x32, 0xa4, 0xbd, 0xc2, 0xef, 0x83, 0x49, 0x1a, 0x12, 0xcb, 0x2c, 0x08, 0xef, 0x6f, + 0xd5, 0x3e, 0xd5, 0x79, 0xd6, 0x86, 0x6d, 0xa4, 0x19, 0x12, 0xab, 0x31, 0xaf, 0x88, 0x4c, 0xf2, + 0x2f, 0x24, 0x60, 0xe1, 0x07, 0x06, 0x98, 0xa2, 0x0c, 0xb3, 0x98, 0x9a, 0x45, 0xc1, 0xe0, 0x9d, + 0x71, 0x31, 0x10, 0x20, 0x8d, 0x0b, 0x8a, 0xc3, 0x94, 0xfc, 0x46, 0x0a, 0xbc, 0xfa, 0xdf, 0x02, + 0x58, 0x1f, 0xb6, 0x74, 0x2b, 0xf0, 0x6d, 0x79, 0x1c, 0x37, 0xc1, 0x24, 0x3b, 0x0d, 0x89, 0x38, + 0x8a, 0xd9, 0xc6, 0xab, 0xc9, 0x7e, 0x6e, 0x9f, 0x86, 0xe4, 0xac, 0x5b, 0x79, 0xfe, 0x89, 0x0e, + 0xb8, 0x21, 0x12, 0x2e, 0xe0, 0xd7, 0xf4, 0xbe, 0x0b, 0xc2, 0xd9, 0x7a, 0x9e, 0xd8, 0x59, 0xb7, + 0xb2, 0xa8, 0x97, 0xe5, 0xb9, 0xc2, 0x0e, 0x80, 0x2e, 0xa6, 0xec, 0x76, 0x84, 0x7d, 0x2a, 0xdd, + 0x3a, 0x1e, 0x51, 0xe1, 0xfb, 0xd2, 0xd3, 0xa5, 0x07, 0x5f, 0xd1, 0x58, 0x55, 0x90, 0x70, 0x77, + 0xc0, 0x1b, 0x3a, 0x07, 0x01, 0xbe, 0x00, 0xa6, 0x22, 0x82, 0x69, 0xe0, 0x9b, 0x93, 0x82, 0xb2, + 0x8e, 0x25, 0x12, 0x52, 0xa4, 0xb4, 0xf0, 0x8b, 0x60, 0xda, 0x23, 0x94, 0xe2, 0x36, 0x31, 0x4b, + 0xc2, 0x70, 0x51, 0x19, 0x4e, 0xef, 0x49, 0x31, 0x4a, 0xf4, 0xd5, 0x33, 0x03, 0x5c, 0x1d, 0x16, + 0xb5, 0x5d, 0x87, 0x32, 0xf8, 0xf6, 0xc0, 0x05, 0xa8, 0x3d, 0xdd, 0x0e, 0xf9, 0x6a, 0x91, 0xfe, + 0x4b, 0x0a, 0x7c, 0x26, 0x91, 0x64, 0x92, 0xff, 0x7b, 0xa0, 0xe4, 0x30, 0xe2, 0xf1, 0x33, 0x28, + 0x6e, 0xcc, 0xbd, 0xf4, 0xed, 0x31, 0xe5, 0x5e, 0x63, 0x41, 0x71, 0x28, 0xdd, 0xe4, 0x68, 0x48, + 0x82, 0x56, 0xff, 0x54, 0x00, 0xd7, 0x86, 0x2d, 0xb9, 0x85, 0x3d, 0x42, 0x79, 0xc4, 0x43, 0x37, + 0x8e, 0xb0, 0xab, 0x32, 0x4e, 0x47, 0xfc, 0x40, 0x48, 0x91, 0xd2, 0xc2, 0x17, 0xc1, 0x0c, 0x75, + 0xfc, 0x76, 0xec, 0xe2, 0x48, 0xa5, 0x93, 0xde, 0x75, 0x53, 0xc9, 0x91, 0xb6, 0x80, 0x35, 0x00, + 0xe8, 0x51, 0x10, 0x31, 0x81, 0x61, 0x16, 0xd7, 0x8a, 0xdc, 0x33, 0x2f, 0x10, 0x4d, 0x2d, 0x45, + 0x19, 0x0b, 0xb8, 0x06, 0x26, 0x8f, 0x1d, 0xdf, 0x56, 0xa7, 0xae, 0x6f, 0xf1, 0xb7, 0x1c, 0xdf, + 0x46, 0x42, 0xc3, 0xf1, 0x5d, 0x87, 0x32, 0x2e, 0x51, 0x47, 0x9e, 0x8b, 0xba, 0xb0, 0xd4, 0x16, + 0x1c, 0xdf, 0xc2, 0x8c, 0xb4, 0x83, 0xc8, 0x21, 0xd4, 0x9c, 0x4a, 0xf1, 0xb7, 0xb4, 0x14, 0x65, + 0x2c, 0xaa, 0xff, 0x2a, 0x0d, 0x4f, 0x12, 0x5e, 0x4a, 0xe0, 0x73, 0xa0, 0xd4, 0x8e, 0x82, 0x38, + 0x54, 0x51, 0xd2, 0xd1, 0xfe, 0x26, 0x17, 0x22, 0xa9, 0xe3, 0x59, 0xd9, 0x21, 0x11, 0x3f, 0x30, + 0x15, 0x22, 0x9d, 0x95, 0x77, 0xa5, 0x18, 0x25, 0x7a, 0xf8, 0x23, 0x03, 0x94, 0x7c, 0x15, 0x1c, + 0x9e, 0x72, 0x6f, 0x8f, 0x29, 0x2f, 0x44, 0x78, 0x53, 0xba, 0x32, 0xf2, 0x12, 0x19, 0xbe, 0x02, + 0x4a, 0xd4, 0x0a, 0x42, 0xa2, 0xa2, 0x5e, 0x4e, 0x8c, 0x9a, 0x5c, 0x78, 0xd6, 0xad, 0x2c, 0x24, + 0xee, 0x84, 0x00, 0x49, 0x63, 0xf8, 0x53, 0x03, 0x80, 0x0e, 0x76, 0x1d, 0x1b, 0x73, 0xff, 0xe2, + 0x2c, 0x46, 0x9d, 0xd6, 0x77, 0xb5, 0x7b, 0x79, 0x68, 0xe9, 0x37, 0xca, 0x40, 0xc3, 0x0f, 0x0d, + 0x30, 0x4f, 0xe3, 0xc3, 0x48, 0xad, 0xe2, 0xe7, 0xcc, 0xb9, 0x7c, 0x67, 0xa4, 0x5c, 0x9a, 0x19, + 0x80, 0xc6, 0x52, 0xaf, 0x5b, 0x99, 0xcf, 0x4a, 0x50, 0x8e, 0x00, 0xfc, 0xb9, 0x01, 0x66, 0xd4, + 0x09, 0x53, 0x73, 0x5a, 0x5c, 0xf8, 0x77, 0xc7, 0x74, 0xb0, 0x2a, 0xa3, 0xd2, 0x5b, 0xa0, 0x04, + 0x14, 0x69, 0x06, 0xd5, 0x0f, 0x8b, 0xa0, 0xfc, 0xc9, 0x8f, 0x15, 0x7c, 0x68, 0x00, 0x60, 0x25, + 0x8f, 0x00, 0x35, 0x0d, 0xc1, 0xf9, 0xfd, 0x31, 0x71, 0xd6, 0xaf, 0x4d, 0xda, 0x30, 0x68, 0x11, + 0xbf, 0x8f, 0xfa, 0x6f, 0xf8, 0x3b, 0x03, 0x2c, 0x60, 0xcb, 0x22, 0x21, 0x23, 0xb6, 0xac, 0x21, + 0x85, 0xcf, 0xe0, 0x9a, 0xac, 0x28, 0x56, 0x0b, 0x9b, 0x59, 0x68, 0x94, 0x67, 0x02, 0xdf, 0x00, + 0x17, 0x28, 0x0b, 0x22, 0x62, 0x27, 0x11, 0x57, 0xf5, 0x0d, 0xf6, 0xba, 0x95, 0x0b, 0xcd, 0x9c, + 0x06, 0xf5, 0x59, 0x56, 0x7f, 0x6b, 0x80, 0xca, 0x13, 0x4e, 0x94, 0xd7, 0x42, 0x7e, 0x3f, 0x55, + 0xa5, 0xd1, 0xb5, 0x90, 0x83, 0x23, 0xa1, 0xe1, 0x35, 0x5b, 0x6c, 0xd7, 0x16, 0x51, 0x99, 0xc9, + 0x74, 0x1c, 0x42, 0x8a, 0x94, 0x96, 0xd7, 0x23, 0x8e, 0xcf, 0x5f, 0xc9, 0xa2, 0x30, 0xd4, 0xf5, + 0xa8, 0x29, 0xc5, 0x28, 0xd1, 0x57, 0xff, 0x67, 0xf4, 0xa7, 0x4a, 0x26, 0xcd, 0x9b, 0x16, 0x76, + 0x09, 0xdc, 0x06, 0x4b, 0xbc, 0x9f, 0x42, 0x24, 0x74, 0x1d, 0x0b, 0xd3, 0x03, 0xcc, 0x8e, 0x14, + 0x47, 0x53, 0xb9, 0x5d, 0x6a, 0xf6, 0xe9, 0xd1, 0xc0, 0x0a, 0xf8, 0x26, 0x80, 0xb2, 0xc7, 0xc8, + 0xf9, 0x91, 0xe5, 0x52, 0x77, 0x0b, 0xcd, 0x01, 0x0b, 0x74, 0xce, 0x2a, 0xb8, 0x05, 0x96, 0x5d, + 0x7c, 0x48, 0xdc, 0x26, 0x71, 0x89, 0xc5, 0x82, 0x48, 0xb8, 0x2a, 0x0a, 0x57, 0x2b, 0xbd, 0x6e, + 0x65, 0x79, 0xb7, 0x5f, 0x89, 0x06, 0xed, 0xab, 0xeb, 0xfd, 0x27, 0x92, 0xdd, 0xb8, 0xec, 0xdc, + 0xfe, 0x50, 0x00, 0xab, 0xc3, 0xab, 0x02, 0xfc, 0x71, 0xda, 0x60, 0xca, 0xfe, 0xe1, 0xdd, 0x71, + 0x55, 0x20, 0xd5, 0x61, 0x82, 0xc1, 0xee, 0x12, 0xfe, 0x80, 0x17, 0x73, 0xec, 0x12, 0x75, 0x51, + 0xde, 0x19, 0x1b, 0x05, 0x0e, 0xd2, 0x98, 0x95, 0xef, 0x04, 0x76, 0xc5, 0xb3, 0x80, 0x5d, 0x52, + 0xfd, 0xb3, 0xd1, 0x3f, 0x63, 0xa4, 0x55, 0x1b, 0xfe, 0xc2, 0x00, 0x8b, 0x41, 0x48, 0xfc, 0xcd, + 0x83, 0x9b, 0x77, 0x5f, 0x6e, 0x8a, 0xc9, 0x46, 0x85, 0xea, 0xd6, 0xa7, 0xe4, 0xf9, 0x66, 0x73, + 0xff, 0x96, 0x74, 0x78, 0x10, 0x05, 0x21, 0x6d, 0x5c, 0xec, 0x75, 0x2b, 0x8b, 0xfb, 0x79, 0x28, + 0xd4, 0x8f, 0x5d, 0xf5, 0xc0, 0xca, 0xce, 0x09, 0x23, 0x91, 0x8f, 0xdd, 0xed, 0xc0, 0x8a, 0x3d, + 0xe2, 0x33, 0x49, 0xf4, 0x55, 0x30, 0x67, 0x13, 0x6a, 0x45, 0x4e, 0x28, 0x1e, 0x37, 0x99, 0xde, + 0x17, 0x55, 0x5a, 0xce, 0x6d, 0xa7, 0x2a, 0x94, 0xb5, 0x83, 0xd7, 0x40, 0x31, 0x8e, 0x5c, 0x95, + 0xc5, 0x73, 0xca, 0xbc, 0x78, 0x07, 0xed, 0x22, 0x2e, 0xaf, 0xae, 0x83, 0x49, 0xce, 0x13, 0x5e, + 0x01, 0xc5, 0x08, 0x3f, 0x10, 0x5e, 0xe7, 0x1b, 0xd3, 0xdc, 0x04, 0xe1, 0x07, 0x88, 0xcb, 0xaa, + 0x7f, 0xb9, 0x0a, 0x16, 0xfb, 0xf6, 0x02, 0x57, 0x41, 0xc1, 0xb1, 0x15, 0x07, 0xa0, 0x9c, 0x16, + 0x6e, 0x6e, 0xa3, 0x82, 0x63, 0xc3, 0xd7, 0xc0, 0x94, 0x9c, 0x10, 0x15, 0x68, 0x45, 0x97, 0x00, + 0x21, 0xe5, 0xaf, 0x77, 0xea, 0x8e, 0x13, 0x51, 0xe6, 0x82, 0x03, 0x69, 0xa9, 0x5b, 0x22, 0x39, + 0x90, 0x16, 0xe2, 0xb2, 0xfe, 0xcd, 0x4f, 0x3e, 0xe5, 0xe6, 0xd7, 0xd4, 0xc4, 0x52, 0xca, 0xd7, + 0xab, 0xcc, 0x20, 0xf2, 0x02, 0x98, 0x6a, 0x05, 0x91, 0x87, 0x99, 0x78, 0xa1, 0x33, 0x3d, 0xe6, + 0x37, 0x84, 0x14, 0x29, 0x2d, 0x6f, 0xb2, 0x98, 0xc3, 0x5c, 0x62, 0x4e, 0xe7, 0x9b, 0xac, 0xdb, + 0x5c, 0x88, 0xa4, 0x0e, 0xde, 0x03, 0xd3, 0x36, 0x69, 0xe1, 0xd8, 0x65, 0xe6, 0x8c, 0x48, 0xa1, + 0xad, 0x11, 0xa4, 0x50, 0x63, 0x8e, 0x57, 0xc5, 0x6d, 0xe9, 0x17, 0x25, 0x00, 0xf0, 0x79, 0x30, + 0xed, 0xe1, 0x13, 0xc7, 0x8b, 0x3d, 0x73, 0x76, 0xcd, 0xd8, 0x30, 0xa4, 0xd9, 0x9e, 0x14, 0xa1, + 0x44, 0xc7, 0x2b, 0x23, 0x39, 0xb1, 0xdc, 0x98, 0x3a, 0x1d, 0xa2, 0x94, 0x26, 0x10, 0x05, 0x57, + 0x57, 0xc6, 0x9d, 0x3e, 0x3d, 0x1a, 0x58, 0x21, 0xc0, 0x1c, 0x5f, 0x2c, 0x9e, 0xcb, 0x80, 0x49, + 0x11, 0x4a, 0x74, 0x79, 0x30, 0x65, 0x3f, 0x3f, 0x0c, 0x4c, 0x2d, 0x1e, 0x58, 0x01, 0xbf, 0x0c, + 0x66, 0x3d, 0x7c, 0xb2, 0x4b, 0xfc, 0x36, 0x3b, 0x32, 0x17, 0xd6, 0x8c, 0x8d, 0x62, 0x63, 0xa1, + 0xd7, 0xad, 0xcc, 0xee, 0x25, 0x42, 0x94, 0xea, 0x85, 0xb1, 0xe3, 0x2b, 0xe3, 0x0b, 0x19, 0xe3, + 0x44, 0x88, 0x52, 0x3d, 0x7f, 0x74, 0x42, 0xcc, 0xf8, 0xe5, 0x32, 0x17, 0xf3, 0x4d, 0xf0, 0x81, + 0x14, 0xa3, 0x44, 0x0f, 0x37, 0xc0, 0x8c, 0x87, 0x4f, 0xc4, 0xc0, 0x62, 0x2e, 0x09, 0xb7, 0xf3, + 0xbc, 0x93, 0xd9, 0x53, 0x32, 0xa4, 0xb5, 0xc2, 0xd2, 0xf1, 0xa5, 0xe5, 0x72, 0xc6, 0x52, 0xc9, + 0x90, 0xd6, 0xf2, 0x24, 0x8e, 0x7d, 0xe7, 0x7e, 0x4c, 0xa4, 0x31, 0x14, 0x91, 0xd1, 0x49, 0x7c, + 0x27, 0x55, 0xa1, 0xac, 0x1d, 0x1f, 0x18, 0xbc, 0xd8, 0x65, 0x4e, 0xe8, 0x92, 0xfd, 0x96, 0x79, + 0x51, 0xc4, 0x5f, 0xf4, 0x9e, 0x7b, 0x5a, 0x8a, 0x32, 0x16, 0x90, 0x80, 0x49, 0xe2, 0xc7, 0x9e, + 0x79, 0x49, 0x34, 0x4c, 0x23, 0x49, 0x41, 0x7d, 0x73, 0x76, 0xfc, 0xd8, 0x43, 0xc2, 0x3d, 0x7c, + 0x0d, 0x2c, 0x78, 0xf8, 0x84, 0x97, 0x03, 0x12, 0x31, 0x3e, 0xca, 0xac, 0x88, 0xcd, 0x2f, 0xf3, + 0x26, 0x65, 0x2f, 0xab, 0x40, 0x79, 0x3b, 0xb1, 0xd0, 0xf1, 0x33, 0x0b, 0x2f, 0x67, 0x16, 0x66, + 0x15, 0x28, 0x6f, 0xc7, 0x23, 0x1d, 0x91, 0xfb, 0xb1, 0x13, 0x11, 0xdb, 0xfc, 0x9c, 0xe8, 0x6b, + 0x44, 0xa4, 0x91, 0x92, 0x21, 0xad, 0x85, 0x9d, 0x64, 0xb2, 0x35, 0xc5, 0x35, 0xbc, 0x33, 0xda, + 0x4a, 0xbe, 0x1f, 0x6d, 0x46, 0x11, 0x3e, 0x95, 0x2f, 0x4d, 0x76, 0xa6, 0x85, 0x14, 0x94, 0xb0, + 0xeb, 0xee, 0xb7, 0xcc, 0x2b, 0x22, 0xf6, 0xa3, 0x7e, 0x41, 0x74, 0xd5, 0xd9, 0xe4, 0x20, 0x48, + 0x62, 0x71, 0xd0, 0xc0, 0xe7, 0xa9, 0xb1, 0x3a, 0x5e, 0xd0, 0x7d, 0x0e, 0x82, 0x24, 0x96, 0xd8, + 0xa9, 0x7f, 0xba, 0xdf, 0x32, 0x3f, 0x3f, 0xe6, 0x9d, 0x72, 0x10, 0x24, 0xb1, 0xa0, 0x03, 0x8a, + 0x7e, 0xc0, 0xcc, 0xab, 0x63, 0x79, 0x9e, 0xc5, 0x83, 0x73, 0x2b, 0x60, 0x88, 0x63, 0xc0, 0x5f, + 0x1b, 0x00, 0x84, 0x69, 0x8a, 0x5e, 0x1b, 0xc9, 0xc0, 0xd4, 0x07, 0x59, 0x4b, 0x73, 0x7b, 0xc7, + 0x67, 0xd1, 0x69, 0x3a, 0x7a, 0x64, 0xee, 0x40, 0x86, 0x05, 0xfc, 0xa3, 0x01, 0x2e, 0x61, 0x5b, + 0x0e, 0x22, 0xd8, 0xcd, 0xdc, 0xa0, 0xb2, 0x88, 0xc8, 0xed, 0x51, 0xa7, 0x79, 0x23, 0x08, 0xdc, + 0x86, 0xd9, 0xeb, 0x56, 0x2e, 0x6d, 0x9e, 0x83, 0x8a, 0xce, 0xe5, 0x02, 0xff, 0x6a, 0x80, 0x65, + 0x55, 0x45, 0x33, 0x0c, 0x2b, 0x22, 0x80, 0x64, 0xd4, 0x01, 0xec, 0xc7, 0x91, 0x71, 0xbc, 0xa2, + 0xe2, 0xb8, 0x3c, 0xa0, 0x47, 0x83, 0xd4, 0xe0, 0x3f, 0x0c, 0x30, 0x6f, 0x93, 0x90, 0xf8, 0x36, + 0xf1, 0x2d, 0xce, 0x75, 0x6d, 0x24, 0x93, 0x66, 0x3f, 0xd7, 0xed, 0x0c, 0x84, 0xa4, 0x59, 0x53, + 0x34, 0xe7, 0xb3, 0xaa, 0xb3, 0x6e, 0xe5, 0x72, 0xba, 0x34, 0xab, 0x41, 0x39, 0x96, 0xf0, 0x37, + 0x06, 0x58, 0x4c, 0x0f, 0x40, 0x3e, 0x29, 0xeb, 0x63, 0xcc, 0x03, 0xd1, 0xbe, 0x6e, 0xe6, 0x01, + 0x51, 0x3f, 0x03, 0xf8, 0x37, 0x83, 0x77, 0x6a, 0xc9, 0xdc, 0x48, 0xcd, 0xaa, 0x88, 0xe5, 0x7b, + 0x23, 0x8f, 0xa5, 0x46, 0x90, 0xa1, 0x7c, 0x31, 0x6d, 0x05, 0xb5, 0xe6, 0xac, 0x5b, 0x59, 0xc9, + 0x46, 0x52, 0x2b, 0x50, 0x96, 0x21, 0xfc, 0x99, 0x01, 0xe6, 0x49, 0xda, 0x71, 0x53, 0xf3, 0xb9, + 0x91, 0x04, 0xf1, 0xdc, 0x26, 0x5e, 0xfe, 0x4a, 0x93, 0x51, 0x51, 0x94, 0xc3, 0xe6, 0x1d, 0x24, + 0x39, 0xc1, 0x5e, 0xe8, 0x12, 0xf3, 0x0b, 0x23, 0xee, 0x20, 0x77, 0xa4, 0x5f, 0x94, 0x00, 0xac, + 0xf2, 0xc9, 0xa7, 0xef, 0xe6, 0xc0, 0x25, 0x50, 0x3c, 0x26, 0xa7, 0xb2, 0xb1, 0x47, 0xfc, 0x4f, + 0x68, 0x83, 0x52, 0x07, 0xbb, 0x71, 0x32, 0xbc, 0x8d, 0xb8, 0xea, 0x22, 0xe9, 0xfc, 0x8d, 0xc2, + 0xeb, 0xc6, 0xea, 0x43, 0x03, 0x5c, 0x3e, 0xff, 0x42, 0x3f, 0x53, 0x5a, 0xbf, 0x37, 0xc0, 0xf2, + 0xc0, 0xdd, 0x3d, 0x87, 0xd1, 0xfd, 0x3c, 0xa3, 0xb7, 0x46, 0x7d, 0x09, 0x9b, 0x2c, 0x72, 0xfc, + 0xb6, 0xe8, 0x3c, 0xb2, 0xf4, 0x7e, 0x69, 0x80, 0xa5, 0xfe, 0xeb, 0xf0, 0x2c, 0xe3, 0x55, 0x7d, + 0x58, 0x00, 0x97, 0xcf, 0x6f, 0x98, 0x60, 0xa4, 0x27, 0xc3, 0xf1, 0x4c, 0xd8, 0x20, 0x9d, 0x32, + 0xf5, 0x50, 0xf9, 0x81, 0x01, 0xe6, 0xee, 0x69, 0xbb, 0xe4, 0x7f, 0x1d, 0x23, 0x9f, 0xed, 0x93, + 0xfa, 0x93, 0x2a, 0x28, 0xca, 0xe2, 0x56, 0xff, 0x6e, 0x80, 0x95, 0x73, 0x0b, 0x2b, 0x1f, 0x41, + 0xb1, 0xeb, 0x06, 0x0f, 0xe4, 0x4f, 0x34, 0x99, 0x9f, 0xcc, 0x36, 0x85, 0x14, 0x29, 0x6d, 0x26, + 0x7a, 0x85, 0xcf, 0x2a, 0x7a, 0xd5, 0x7f, 0x1a, 0xe0, 0xea, 0x27, 0x65, 0xe2, 0x33, 0x39, 0xd2, + 0x0d, 0x30, 0xa3, 0x9a, 0xa2, 0x53, 0x71, 0x9c, 0x6a, 0x0e, 0x50, 0x45, 0xe3, 0x14, 0x69, 0x6d, + 0xe3, 0xfa, 0xa3, 0xc7, 0xe5, 0x89, 0x8f, 0x1e, 0x97, 0x27, 0x3e, 0x7e, 0x5c, 0x9e, 0xf8, 0x61, + 0xaf, 0x6c, 0x3c, 0xea, 0x95, 0x8d, 0x8f, 0x7a, 0x65, 0xe3, 0xe3, 0x5e, 0xd9, 0xf8, 0x77, 0xaf, + 0x6c, 0xfc, 0xea, 0x3f, 0xe5, 0x89, 0xef, 0x4e, 0x2b, 0xf0, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0x8c, 0x3a, 0x89, 0x32, 0x37, 0x20, 0x00, 0x00, } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 53ff8f194b7..6e7f06a7f12 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -100,6 +100,10 @@ message CustomResourceDefinitionSpec { optional string group = 1; // Version is the version this resource belongs in + // Should be always first item in Versions field if provided. + // Optional, but at least one of Version or Versions must be set. + // Deprecated: Please use `Versions`. + // +optional optional string version = 2; // Names are the names used to describe this custom resource @@ -115,6 +119,18 @@ message CustomResourceDefinitionSpec { // Subresources describes the subresources for CustomResources // +optional optional CustomResourceSubresources subresources = 6; + + // Versions is the list of all supported versions for this resource. + // If Version field is provided, this field is optional. + // Validation: All versions must use the same validation schema for now. i.e., top + // level Validation field is applied to all of these versions. + // Order: The version name will be used to compute the order. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of + // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + repeated CustomResourceDefinitionVersion versions = 7; } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition @@ -125,6 +141,26 @@ message CustomResourceDefinitionStatus { // AcceptedNames are the names that are actually being used to serve discovery // They may be different than the names in spec. optional CustomResourceDefinitionNames acceptedNames = 2; + + // StoredVersions are all versions of CustomResources that were ever persisted. Tracking these + // versions allows a migration path for stored versions in etcd. The field is mutable + // so the migration controller can first finish a migration to another version (i.e. + // that no old objects are left in the storage), and then remove the rest of the + // versions from this list. + // None of the versions in this list can be removed from the spec.Versions field. + repeated string storedVersions = 3; +} + +message CustomResourceDefinitionVersion { + // Name is the version name, e.g. “v1”, “v2beta1”, etc. + optional string name = 1; + + // Served is a flag enabling/disabling this version from being served via REST APIs + optional bool served = 2; + + // Storage flags the version as storage version. There must be exactly one + // flagged as storage version. + optional bool storage = 3; } // CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index feee95feac1..db2340e7333 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -48,6 +48,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomResourceDefinitionSpec, Convert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomResourceDefinitionStatus, Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus, + Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion, + Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion, Convert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale, Convert_apiextensions_CustomResourceSubresourceScale_To_v1beta1_CustomResourceSubresourceScale, Convert_v1beta1_CustomResourceSubresourceStatus_To_apiextensions_CustomResourceSubresourceStatus, @@ -220,6 +222,7 @@ func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomRes out.Validation = nil } out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.Versions = *(*[]apiextensions.CustomResourceDefinitionVersion)(unsafe.Pointer(&in.Versions)) return nil } @@ -245,6 +248,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomRes out.Validation = nil } out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) + out.Versions = *(*[]CustomResourceDefinitionVersion)(unsafe.Pointer(&in.Versions)) return nil } @@ -258,6 +262,7 @@ func autoConvert_v1beta1_CustomResourceDefinitionStatus_To_apiextensions_CustomR if err := Convert_v1beta1_CustomResourceDefinitionNames_To_apiextensions_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { return err } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) return nil } @@ -271,6 +276,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomR if err := Convert_apiextensions_CustomResourceDefinitionNames_To_v1beta1_CustomResourceDefinitionNames(&in.AcceptedNames, &out.AcceptedNames, s); err != nil { return err } + out.StoredVersions = *(*[]string)(unsafe.Pointer(&in.StoredVersions)) return nil } @@ -279,6 +285,30 @@ func Convert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResou return autoConvert_apiextensions_CustomResourceDefinitionStatus_To_v1beta1_CustomResourceDefinitionStatus(in, out, s) } +func autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + return nil +} + +// Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in *CustomResourceDefinitionVersion, out *apiextensions.CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceDefinitionVersion_To_apiextensions_CustomResourceDefinitionVersion(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + out.Name = in.Name + out.Served = in.Served + out.Storage = in.Storage + return nil +} + +// Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in *apiextensions.CustomResourceDefinitionVersion, out *CustomResourceDefinitionVersion, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceDefinitionVersion_To_v1beta1_CustomResourceDefinitionVersion(in, out, s) +} + func autoConvert_v1beta1_CustomResourceSubresourceScale_To_apiextensions_CustomResourceSubresourceScale(in *CustomResourceSubresourceScale, out *apiextensions.CustomResourceSubresourceScale, s conversion.Scope) error { out.SpecReplicasPath = in.SpecReplicasPath out.StatusReplicasPath = in.StatusReplicasPath diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index 4092f6dab39..c990055d03a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -150,6 +150,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti (*in).DeepCopyInto(*out) } } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + copy(*out, *in) + } return } @@ -174,6 +179,11 @@ func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefini } } in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -187,6 +197,22 @@ func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionSt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { *out = *in diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index 6b906d2cf31..9284bbc81c4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -150,6 +150,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti (*in).DeepCopyInto(*out) } } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]CustomResourceDefinitionVersion, len(*in)) + copy(*out, *in) + } return } @@ -174,6 +179,11 @@ func (in *CustomResourceDefinitionStatus) DeepCopyInto(out *CustomResourceDefini } } in.AcceptedNames.DeepCopyInto(&out.AcceptedNames) + if in.StoredVersions != nil { + in, out := &in.StoredVersions, &out.StoredVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -187,6 +197,22 @@ func (in *CustomResourceDefinitionStatus) DeepCopy() *CustomResourceDefinitionSt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceDefinitionVersion) DeepCopyInto(out *CustomResourceDefinitionVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionVersion. +func (in *CustomResourceDefinitionVersion) DeepCopy() *CustomResourceDefinitionVersion { + if in == nil { + return nil + } + out := new(CustomResourceDefinitionVersion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceSubresourceScale) DeepCopyInto(out *CustomResourceSubresourceScale) { *out = *in diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index d4f9a30c849..4d5e2a73ec0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -24,6 +24,7 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset:go_default_library", @@ -84,6 +85,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/validation:all-srcs", ], tags = ["automanaged"], @@ -93,5 +95,8 @@ go_test( name = "go_default_test", srcs = ["customresource_handler_test.go"], embed = [":go_default_library"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library"], + deps = [ + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion:go_default_library", + ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/BUILD new file mode 100644 index 00000000000..6eff75181f6 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "converter.go", + "nop_converter.go", + ], + importpath = "k8s.io/apiextensions-apiserver/pkg/apiserver/conversion", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index 9a71069758a..5a37caa82ec 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -13,6 +13,7 @@ go_test( "registration_test.go", "subresources_test.go", "validation_test.go", + "versioning_test.go", "yaml_test.go", ], tags = ["integration"], diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD index 32e6863c97f..95aa6d961d0 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD @@ -22,6 +22,7 @@ go_library( srcs = ["versioning.go"], importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index a6a0272e673..9415da5b540 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -278,6 +278,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" From fd3432ef058d35301bee6cb23a12fd37b4125704 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 10 May 2018 15:04:23 -0700 Subject: [PATCH 114/307] add dynamic config metrics This PR exports config-releated metrics from the Kubelet. The Guages for active, assigned, and last-known-good config can be used to identify config versions and produce aggregate counts across several nodes. The error-reporting Gauge can be used to determine whether a node is experiencing a config-related error, and to prodouce an aggregate count of nodes in an error state. --- pkg/kubelet/kubeletconfig/status/BUILD | 1 + pkg/kubelet/kubeletconfig/status/status.go | 19 +++ pkg/kubelet/metrics/BUILD | 3 + pkg/kubelet/metrics/metrics.go | 140 +++++++++++++++++++ test/e2e_node/BUILD | 3 +- test/e2e_node/dynamic_kubelet_config_test.go | 117 +++++++++++++++- test/e2e_node/util.go | 19 +++ 7 files changed, 298 insertions(+), 4 deletions(-) diff --git a/pkg/kubelet/kubeletconfig/status/BUILD b/pkg/kubelet/kubeletconfig/status/BUILD index 666b3b42bef..fe925bfc905 100644 --- a/pkg/kubelet/kubeletconfig/status/BUILD +++ b/pkg/kubelet/kubeletconfig/status/BUILD @@ -11,6 +11,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", deps = [ "//pkg/kubelet/kubeletconfig/util/log:go_default_library", + "//pkg/kubelet/metrics:go_default_library", "//pkg/util/node:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubelet/kubeletconfig/status/status.go b/pkg/kubelet/kubeletconfig/status/status.go index ae8cdec069c..750a504cf3e 100644 --- a/pkg/kubelet/kubeletconfig/status/status.go +++ b/pkg/kubelet/kubeletconfig/status/status.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" utillog "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log" + "k8s.io/kubernetes/pkg/kubelet/metrics" nodeutil "k8s.io/kubernetes/pkg/util/node" ) @@ -176,6 +177,24 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) { status.Error = s.errorOverride } + // update metrics based on the status we will sync + metrics.SetConfigError(len(status.Error) > 0) + err = metrics.SetAssignedConfig(status.Assigned) + if err != nil { + err = fmt.Errorf("failed to update Assigned config metric, error: %v", err) + return + } + err = metrics.SetActiveConfig(status.Active) + if err != nil { + err = fmt.Errorf("failed to update Active config metric, error: %v", err) + return + } + err = metrics.SetLastKnownGoodConfig(status.LastKnownGood) + if err != nil { + err = fmt.Errorf("failed to update LastKnownGood config metric, error: %v", err) + return + } + // apply the status to a copy of the node so we don't modify the object in the informer's store newNode := oldNode.DeepCopy() newNode.Status.Config = status diff --git a/pkg/kubelet/metrics/BUILD b/pkg/kubelet/metrics/BUILD index 2f8d8839738..8e6041502bc 100644 --- a/pkg/kubelet/metrics/BUILD +++ b/pkg/kubelet/metrics/BUILD @@ -10,9 +10,12 @@ go_library( srcs = ["metrics.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/metrics", deps = [ + "//pkg/features:go_default_library", "//pkg/kubelet/container:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/pkg/kubelet/metrics/metrics.go b/pkg/kubelet/metrics/metrics.go index 4e9470f5a52..058253258e4 100644 --- a/pkg/kubelet/metrics/metrics.go +++ b/pkg/kubelet/metrics/metrics.go @@ -17,11 +17,15 @@ limitations under the License. package metrics import ( + "fmt" "sync" "time" "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + corev1 "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -47,6 +51,17 @@ const ( // Metrics keys of device plugin operations DevicePluginRegistrationCountKey = "device_plugin_registration_count" DevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_microseconds" + + // Metric keys for node config + AssignedConfigKey = "node_config_assigned" + ActiveConfigKey = "node_config_active" + LastKnownGoodConfigKey = "node_config_last_known_good" + ConfigErrorKey = "node_config_error" + ConfigSourceLabelKey = "node_config_source" + ConfigSourceLabelValueLocal = "local" + ConfigUIDLabelKey = "node_config_uid" + ConfigResourceVersionLabelKey = "node_config_resource_version" + KubeletConfigKeyLabelKey = "node_config_kubelet_key" ) var ( @@ -150,6 +165,40 @@ var ( }, []string{"resource_name"}, ) + + // Metrics for node config + + AssignedConfig = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: AssignedConfigKey, + Help: "The node's understanding of intended config. The count is always 1.", + }, + []string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey}, + ) + ActiveConfig = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: ActiveConfigKey, + Help: "The config source the node is actively using. The count is always 1.", + }, + []string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey}, + ) + LastKnownGoodConfig = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: LastKnownGoodConfigKey, + Help: "The config source the node will fall back to when it encounters certain errors. The count is always 1.", + }, + []string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey}, + ) + ConfigError = prometheus.NewGauge( + prometheus.GaugeOpts{ + Subsystem: KubeletSubsystem, + Name: ConfigErrorKey, + Help: "This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise.", + }, + ) ) var registerMetrics sync.Once @@ -172,6 +221,12 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheu prometheus.MustRegister(EvictionStatsAge) prometheus.MustRegister(DevicePluginRegistrationCount) prometheus.MustRegister(DevicePluginAllocationLatency) + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + prometheus.MustRegister(AssignedConfig) + prometheus.MustRegister(ActiveConfig) + prometheus.MustRegister(LastKnownGoodConfig) + prometheus.MustRegister(ConfigError) + } for _, collector := range collectors { prometheus.MustRegister(collector) } @@ -232,3 +287,88 @@ func (pc *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { prometheus.GaugeValue, float64(runningContainers)) } + +const configMapAPIPathFmt = "/api/v1/namespaces/%s/configmaps/%s" + +func configLabels(source *corev1.NodeConfigSource) (map[string]string, error) { + if source == nil { + return map[string]string{ + // prometheus requires all of the labels that can be set on the metric + ConfigSourceLabelKey: "local", + ConfigUIDLabelKey: "", + ConfigResourceVersionLabelKey: "", + KubeletConfigKeyLabelKey: "", + }, nil + } + if source.ConfigMap != nil { + return map[string]string{ + ConfigSourceLabelKey: fmt.Sprintf(configMapAPIPathFmt, source.ConfigMap.Namespace, source.ConfigMap.Name), + ConfigUIDLabelKey: string(source.ConfigMap.UID), + ConfigResourceVersionLabelKey: source.ConfigMap.ResourceVersion, + KubeletConfigKeyLabelKey: source.ConfigMap.KubeletConfigKey, + }, nil + } + return nil, fmt.Errorf("unrecognized config source type, all source subfields were nil") +} + +// track labels across metric updates, so we can delete old label sets and prevent leaks +var assignedConfigLabels map[string]string = map[string]string{} + +func SetAssignedConfig(source *corev1.NodeConfigSource) error { + // compute the timeseries labels from the source + labels, err := configLabels(source) + if err != nil { + return err + } + // clean up the old timeseries (WithLabelValues creates a new one for each distinct label set) + AssignedConfig.Delete(assignedConfigLabels) + // record the new timeseries + assignedConfigLabels = labels + // expose the new timeseries with a constant count of 1 + AssignedConfig.With(assignedConfigLabels).Set(1) + return nil +} + +// track labels across metric updates, so we can delete old label sets and prevent leaks +var activeConfigLabels map[string]string = map[string]string{} + +func SetActiveConfig(source *corev1.NodeConfigSource) error { + // compute the timeseries labels from the source + labels, err := configLabels(source) + if err != nil { + return err + } + // clean up the old timeseries (WithLabelValues creates a new one for each distinct label set) + ActiveConfig.Delete(activeConfigLabels) + // record the new timeseries + activeConfigLabels = labels + // expose the new timeseries with a constant count of 1 + ActiveConfig.With(activeConfigLabels).Set(1) + return nil +} + +// track labels across metric updates, so we can delete old label sets and prevent leaks +var lastKnownGoodConfigLabels map[string]string = map[string]string{} + +func SetLastKnownGoodConfig(source *corev1.NodeConfigSource) error { + // compute the timeseries labels from the source + labels, err := configLabels(source) + if err != nil { + return err + } + // clean up the old timeseries (WithLabelValues creates a new one for each distinct label set) + LastKnownGoodConfig.Delete(lastKnownGoodConfigLabels) + // record the new timeseries + lastKnownGoodConfigLabels = labels + // expose the new timeseries with a constant count of 1 + LastKnownGoodConfig.With(lastKnownGoodConfigLabels).Set(1) + return nil +} + +func SetConfigError(err bool) { + if err { + ConfigError.Set(1) + } else { + ConfigError.Set(0) + } +} diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 9113fa745d9..2520218911e 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -137,6 +137,7 @@ go_test( "//pkg/kubelet/types:go_default_library", "//pkg/security/apparmor:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/metrics:go_default_library", "//test/e2e_node/services:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/blang/semver:go_default_library", @@ -147,6 +148,7 @@ go_test( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega/gstruct:go_default_library", "//vendor/github.com/onsi/gomega/types:go_default_library", + "//vendor/github.com/prometheus/common/model:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -164,7 +166,6 @@ go_test( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//test/e2e/common:go_default_library", - "//test/e2e/framework/metrics:go_default_library", "//test/e2e_node/system:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 3d709b99d10..e29cd65a440 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -18,6 +18,7 @@ package e2e_node import ( "fmt" + "reflect" "strings" "time" @@ -27,12 +28,17 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" controller "k8s.io/kubernetes/pkg/kubelet/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status" + "k8s.io/kubernetes/pkg/kubelet/metrics" + frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework" + "github.com/prometheus/common/model" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) @@ -45,8 +51,6 @@ type expectNodeConfigStatus struct { // If true, expect Status.Config.Active == Status.Config.LastKnownGood, // otherwise expect Status.Config.Active == Status.Config.Assigned. lkgActive bool - // If true, skip checking Status.Config.LastKnownGood == this.lastKnownGood in the status. - skipLkg bool } type nodeConfigTestCase struct { @@ -809,6 +813,8 @@ func (tc *nodeConfigTestCase) run(f *framework.Framework, fn func(f *framework.F tc.checkNodeConfigSource(f) // check status tc.checkConfigStatus(f) + // check that the Kubelet's config-related metrics are correct + tc.checkConfigMetrics(f) // check expectConfig if tc.expectConfig != nil { tc.checkConfig(f) @@ -929,7 +935,7 @@ func expectConfigStatus(tc *nodeConfigTestCase, actual *apiv1.NodeConfigStatus) errs = append(errs, spew.Sprintf("expected Assigned %#v but got %#v", expectAssigned, actual.Assigned)) } // check LastKnownGood matches tc.expectConfigStatus.lastKnownGood - if !tc.expectConfigStatus.skipLkg && !apiequality.Semantic.DeepEqual(tc.expectConfigStatus.lastKnownGood, actual.LastKnownGood) { + if !apiequality.Semantic.DeepEqual(tc.expectConfigStatus.lastKnownGood, actual.LastKnownGood) { errs = append(errs, spew.Sprintf("expected LastKnownGood %#v but got %#v", tc.expectConfigStatus.lastKnownGood, actual.LastKnownGood)) } // check Active matches Assigned or LastKnownGood, depending on tc.expectConfigStatus.lkgActive @@ -1016,6 +1022,111 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) { }, timeout, interval).Should(BeNil()) } +// checkConfigMetrics makes sure the Kubelet's config related metrics are as we expect, given the test case +func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) { + const ( + timeout = time.Minute + interval = time.Second + assignedConfigKey = metrics.KubeletSubsystem + "_" + metrics.AssignedConfigKey + activeConfigKey = metrics.KubeletSubsystem + "_" + metrics.ActiveConfigKey + lastKnownGoodConfigKey = metrics.KubeletSubsystem + "_" + metrics.LastKnownGoodConfigKey + configErrorKey = metrics.KubeletSubsystem + "_" + metrics.ConfigErrorKey + ) + // local config helper + mkLocalSample := func(name model.LabelValue) *model.Sample { + return &model.Sample{ + Metric: model.Metric(map[model.LabelName]model.LabelValue{ + model.MetricNameLabel: name, + metrics.ConfigSourceLabelKey: metrics.ConfigSourceLabelValueLocal, + metrics.ConfigUIDLabelKey: "", + metrics.ConfigResourceVersionLabelKey: "", + metrics.KubeletConfigKeyLabelKey: "", + }), + Value: 1, + } + } + // remote config helper + mkRemoteSample := func(name model.LabelValue, source *apiv1.NodeConfigSource) *model.Sample { + return &model.Sample{ + Metric: model.Metric(map[model.LabelName]model.LabelValue{ + model.MetricNameLabel: name, + metrics.ConfigSourceLabelKey: model.LabelValue(fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", source.ConfigMap.Namespace, source.ConfigMap.Name)), + metrics.ConfigUIDLabelKey: model.LabelValue(source.ConfigMap.UID), + metrics.ConfigResourceVersionLabelKey: model.LabelValue(source.ConfigMap.ResourceVersion), + metrics.KubeletConfigKeyLabelKey: model.LabelValue(source.ConfigMap.KubeletConfigKey), + }), + Value: 1, + } + } + // error helper + mkErrorSample := func(expectError bool) *model.Sample { + v := model.SampleValue(0) + if expectError { + v = model.SampleValue(1) + } + return &model.Sample{ + Metric: model.Metric(map[model.LabelName]model.LabelValue{model.MetricNameLabel: configErrorKey}), + Value: v, + } + } + // construct expected metrics + // assigned + assignedSamples := model.Samples{mkLocalSample(assignedConfigKey)} + assignedSource := tc.configSource.DeepCopy() + if assignedSource != nil && assignedSource.ConfigMap != nil { + assignedSource.ConfigMap.UID = tc.configMap.UID + assignedSource.ConfigMap.ResourceVersion = tc.configMap.ResourceVersion + assignedSamples = model.Samples{mkRemoteSample(assignedConfigKey, assignedSource)} + } + // last-known-good + lastKnownGoodSamples := model.Samples{mkLocalSample(lastKnownGoodConfigKey)} + lastKnownGoodSource := tc.expectConfigStatus.lastKnownGood + if lastKnownGoodSource != nil && lastKnownGoodSource.ConfigMap != nil { + lastKnownGoodSamples = model.Samples{mkRemoteSample(lastKnownGoodConfigKey, lastKnownGoodSource)} + } + // active + activeSamples := model.Samples{mkLocalSample(activeConfigKey)} + activeSource := assignedSource + if tc.expectConfigStatus.lkgActive { + activeSource = lastKnownGoodSource + } + if activeSource != nil && activeSource.ConfigMap != nil { + activeSamples = model.Samples{mkRemoteSample(activeConfigKey, activeSource)} + } + // error + errorSamples := model.Samples{mkErrorSample(len(tc.expectConfigStatus.err) > 0)} + // expected metrics + expect := frameworkmetrics.KubeletMetrics(map[string]model.Samples{ + assignedConfigKey: assignedSamples, + activeConfigKey: activeSamples, + lastKnownGoodConfigKey: lastKnownGoodSamples, + configErrorKey: errorSamples, + }) + // wait for expected metrics to appear + Eventually(func() error { + actual, err := getKubeletMetrics(sets.NewString( + assignedConfigKey, + activeConfigKey, + lastKnownGoodConfigKey, + configErrorKey, + )) + if err != nil { + return err + } + // clear timestamps from actual, so DeepEqual is time-invariant + for _, samples := range actual { + for _, sample := range samples { + sample.Timestamp = 0 + } + } + // compare to expected + if !reflect.DeepEqual(expect, actual) { + return fmt.Errorf("checkConfigMetrics: case: %s: expect metrics %s but got %s", tc.desc, spew.Sprintf("%#v", expect), spew.Sprintf("%#v", actual)) + } + return nil + }, timeout, interval).Should(BeNil()) +} + // constructs the expected SelfLink for a config map func configMapAPIPath(cm *apiv1.ConfigMap) string { return fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", cm.Namespace, cm.Name) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index cec7974bc4c..0cd2dca5540 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/remote" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/metrics" + frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -334,6 +335,24 @@ func logKubeletMetrics(metricKeys ...string) { } } +// returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in +func getKubeletMetrics(filterMetricNames sets.String) (frameworkmetrics.KubeletMetrics, error) { + // grab Kubelet metrics + ms, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255") + if err != nil { + return nil, err + } + + filtered := metrics.NewKubeletMetrics() + for name := range ms { + if !filterMetricNames.Has(name) { + continue + } + filtered[name] = ms[name] + } + return filtered, nil +} + // runCommand runs the cmd and returns the combined stdout and stderr, or an // error if the command failed. func runCommand(cmd ...string) (string, error) { From d4ff4ca87fc7720dd06b22f878fa63f8ab00f4fb Mon Sep 17 00:00:00 2001 From: Sandeep Rajan Date: Fri, 27 Apr 2018 17:04:12 -0400 Subject: [PATCH 115/307] extend configmap tests to include CoreDNS --- test/e2e/network/dns_common.go | 46 ++++++++++++++++-- test/e2e/network/dns_configmap.go | 80 ++++++++++++++++++++++++++----- 2 files changed, 110 insertions(+), 16 deletions(-) diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 2e6bd200c7a..cc5011774b4 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -56,9 +56,8 @@ type dnsTestCommon struct { func newDnsTestCommon() dnsTestCommon { return dnsTestCommon{ - f: framework.NewDefaultFramework("dns-config-map"), - ns: "kube-system", - name: "kube-dns", + f: framework.NewDefaultFramework("dns-config-map"), + ns: "kube-system", } } @@ -73,6 +72,12 @@ func (t *dnsTestCommon) init() { t.dnsPod = &pods.Items[0] framework.Logf("Using DNS pod: %v", t.dnsPod.Name) + + if strings.Contains(t.dnsPod.Name, "coredns") { + t.name = "coredns" + } else { + t.name = "kube-dns" + } } func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) { @@ -103,6 +108,8 @@ func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) func (t *dnsTestCommon) runDig(dnsName, target string) []string { cmd := []string{"/usr/bin/dig", "+short"} switch target { + case "coredns": + cmd = append(cmd, "@"+t.dnsPod.Status.PodIP) case "kube-dns": cmd = append(cmd, "@"+t.dnsPod.Status.PodIP, "-p", "10053") case "dnsmasq": @@ -162,6 +169,24 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { } } +func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string { + if t.name == "coredns" { + pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + return pcm.Data + } + return nil +} + +func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) { + if t.name == "coredns" { + t.setConfigMap(&v1.ConfigMap{Data: configMapData}) + t.deleteCoreDNSPods() + } else { + t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) + } +} + func (t *dnsTestCommon) deleteConfigMap() { By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) t.cm = nil @@ -235,6 +260,21 @@ func (t *dnsTestCommon) deleteUtilPod() { } } +// deleteCoreDNSPods manually deletes the CoreDNS pods to apply the changes to the ConfigMap. +func (t *dnsTestCommon) deleteCoreDNSPods() { + + label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) + options := metav1.ListOptions{LabelSelector: label.String()} + + pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options) + podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem) + + for _, pod := range pods.Items { + err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) + Expect(err).NotTo(HaveOccurred()) + } +} + func generateDNSServerPod(aRecords map[string]string) *v1.Pod { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index 3e5fb020b20..332b84d590f 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -151,6 +151,8 @@ func (t *dnsNameserverTest) run() { t.createUtilPod() defer t.deleteUtilPod() + originalConfigMapData := t.fetchDNSConfigMapData() + defer t.restoreDNSConfigMap(originalConfigMapData) t.createDNSServer(map[string]string{ "abc.acme.local": "1.1.1.1", @@ -159,10 +161,28 @@ func (t *dnsNameserverTest) run() { }) defer t.deleteDNSServerPod() - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ - "stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP), - "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), - }}) + if t.name == "coredns" { + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "Corefile": fmt.Sprintf(`.:53 { + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + proxy . %v + } + acme.local:53 { + proxy . %v + }`, t.dnsServerPod.Status.PodIP, t.dnsServerPod.Status.PodIP), + }}) + + t.deleteCoreDNSPods() + } else { + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP), + "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), + }}) + } t.checkDNSRecordFrom( "abc.acme.local", @@ -180,7 +200,7 @@ func (t *dnsNameserverTest) run() { "dnsmasq", moreForeverTestTimeout) - t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) + t.restoreDNSConfigMap(originalConfigMapData) // Wait for the deleted ConfigMap to take effect, otherwise the // configuration can bleed into other tests. t.checkDNSRecordFrom( @@ -199,6 +219,8 @@ func (t *dnsPtrFwdTest) run() { t.createUtilPod() defer t.deleteUtilPod() + originalConfigMapData := t.fetchDNSConfigMapData() + defer t.restoreDNSConfigMap(originalConfigMapData) t.createDNSServerWithPtrRecord() defer t.deleteDNSServerPod() @@ -210,9 +232,24 @@ func (t *dnsPtrFwdTest) run() { "dnsmasq", moreForeverTestTimeout) - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ - "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), - }}) + if t.name == "coredns" { + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "Corefile": fmt.Sprintf(`.:53 { + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + proxy . %v + }`, t.dnsServerPod.Status.PodIP), + }}) + + t.deleteCoreDNSPods() + } else { + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), + }}) + } t.checkDNSRecordFrom( "123.2.0.192.in-addr.arpa", @@ -220,7 +257,7 @@ func (t *dnsPtrFwdTest) run() { "dnsmasq", moreForeverTestTimeout) - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{}}) + t.restoreDNSConfigMap(originalConfigMapData) t.checkDNSRecordFrom( "123.2.0.192.in-addr.arpa", func(actual []string) bool { return len(actual) == 0 }, @@ -237,6 +274,8 @@ func (t *dnsExternalNameTest) run() { t.createUtilPod() defer t.deleteUtilPod() + originalConfigMapData := t.fetchDNSConfigMapData() + defer t.restoreDNSConfigMap(originalConfigMapData) fooHostname := "foo.example.com" t.createDNSServer(map[string]string{ @@ -270,9 +309,24 @@ func (t *dnsExternalNameTest) run() { "dnsmasq", moreForeverTestTimeout) - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ - "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), - }}) + if t.name == "coredns" { + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "Corefile": fmt.Sprintf(`.:53 { + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + proxy . %v + }`, t.dnsServerPod.Status.PodIP), + }}) + + t.deleteCoreDNSPods() + } else { + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), + }}) + } t.checkDNSRecordFrom( fmt.Sprintf("%s.%s.svc.cluster.local", serviceNameLocal, f.Namespace.Name), @@ -282,7 +336,7 @@ func (t *dnsExternalNameTest) run() { "dnsmasq", moreForeverTestTimeout) - t.setConfigMap(&v1.ConfigMap{Data: map[string]string{}}) + t.restoreDNSConfigMap(originalConfigMapData) } var _ = SIGDescribe("DNS configMap nameserver", func() { From 2590e127f93f594c8eeb1cefb0ac6c3cf3c14a2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 23 May 2018 00:19:11 +0300 Subject: [PATCH 116/307] kubelet: Move RotateCertificates to the KubeletConfiguration struct --- cmd/kubelet/app/options/options.go | 4 +--- pkg/kubelet/apis/kubeletconfig/helpers_test.go | 1 + pkg/kubelet/apis/kubeletconfig/types.go | 5 +++++ pkg/kubelet/apis/kubeletconfig/v1beta1/types.go | 8 ++++++++ pkg/kubelet/apis/kubeletconfig/validation/validation.go | 3 +++ 5 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index ad9a91a169b..2964754eb30 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -54,7 +54,6 @@ const defaultRootDir = "/var/lib/kubelet" type KubeletFlags struct { KubeConfig string BootstrapKubeconfig string - RotateCertificates bool // Insert a probability of random errors during calls to the master. ChaosChance float64 @@ -232,7 +231,6 @@ func NewKubeletFlags() *KubeletFlags { RegisterSchedulable: true, ExperimentalKernelMemcgNotification: false, RemoteRuntimeEndpoint: remoteRuntimeEndpoint, - RotateCertificates: false, // TODO(#54161:v1.11.0): Remove --enable-custom-metrics flag, it is deprecated. EnableCustomMetrics: false, NodeLabels: make(map[string]string), @@ -352,7 +350,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { "If the file specified by --kubeconfig does not exist, the bootstrap kubeconfig is used to request a client certificate from the API server. "+ "On success, a kubeconfig file referencing the generated client certificate and key is written to the path specified by --kubeconfig. "+ "The client certificate and key file will be stored in the directory pointed by --cert-dir.") - fs.BoolVar(&f.RotateCertificates, "rotate-certificates", f.RotateCertificates, " Auto rotate the kubelet client certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches.") fs.BoolVar(&f.ReallyCrashForTesting, "really-crash-for-testing", f.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.") fs.Float64Var(&f.ChaosChance, "chaos-chance", f.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing.") @@ -497,6 +494,7 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig fs.StringVar(&c.TLSMinVersion, "tls-min-version", c.TLSMinVersion, "Minimum TLS version supported. "+ "Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants.") + fs.BoolVar(&c.RotateCertificates, "rotate-certificates", c.RotateCertificates, " Auto rotate the kubelet client certificates by requesting new certificates from the kube-apiserver when the certificate expiration approaches.") fs.Int32Var(&c.RegistryPullQPS, "registry-qps", c.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited.") fs.Int32Var(&c.RegistryBurst, "registry-burst", c.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0") diff --git a/pkg/kubelet/apis/kubeletconfig/helpers_test.go b/pkg/kubelet/apis/kubeletconfig/helpers_test.go index a8775171c4c..929ac004223 100644 --- a/pkg/kubelet/apis/kubeletconfig/helpers_test.go +++ b/pkg/kubelet/apis/kubeletconfig/helpers_test.go @@ -188,6 +188,7 @@ var ( "KubeReserved[*]", "KubeletCgroups", "MakeIPTablesUtilChains", + "RotateCertificates", "ServerTLSBootstrap", "StaticPodURL", "StaticPodURLHeader[*][*]", diff --git a/pkg/kubelet/apis/kubeletconfig/types.go b/pkg/kubelet/apis/kubeletconfig/types.go index e0337e5aaf5..7a2c75055b0 100644 --- a/pkg/kubelet/apis/kubeletconfig/types.go +++ b/pkg/kubelet/apis/kubeletconfig/types.go @@ -82,6 +82,11 @@ type KubeletConfiguration struct { // TLSMinVersion is the minimum TLS version supported. // Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). TLSMinVersion string + // rotateCertificates enables client certificate rotation. The Kubelet will request a + // new certificate from the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. The RotateKubeletClientCertificate feature + // must be enabled. + RotateCertificates bool // serverTLSBootstrap enables server certificate bootstrap. Instead of self // signing a serving certificate, the Kubelet will request a certificate from // the certificates.k8s.io API. This requires an approver to approve the diff --git a/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go b/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go index 984b61b3047..00b3a837e9b 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go +++ b/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go @@ -108,12 +108,20 @@ type KubeletConfiguration struct { // Default: "" // +optional TLSMinVersion string `json:"tlsMinVersion,omitempty"` + // rotateCertificates enables client certificate rotation. The Kubelet will request a + // new certificate from the certificates.k8s.io API. This requires an approver to approve the + // certificate signing requests. The RotateKubeletClientCertificate feature + // must be enabled. + // Default: false + // +optional + RotateCertificates bool `json:"rotateCertificates,omitempty"` // serverTLSBootstrap enables server certificate bootstrap. Instead of self // signing a serving certificate, the Kubelet will request a certificate from // the certificates.k8s.io API. This requires an approver to approve the // certificate signing requests. The RotateKubeletServerCertificate feature // must be enabled. // Default: false + // +optional ServerTLSBootstrap bool `json:"serverTLSBootstrap,omitempty"` // authentication specifies how requests to the Kubelet's server are authenticated // Defaults: diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation.go b/pkg/kubelet/apis/kubeletconfig/validation/validation.go index ab3bc4e14b4..aa0192183bb 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/validation.go +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation.go @@ -93,6 +93,9 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error if kc.RegistryPullQPS < 0 { allErrors = append(allErrors, fmt.Errorf("invalid configuration: RegistryPullQPS (--registry-qps) %v must not be a negative number", kc.RegistryPullQPS)) } + if kc.RotateCertificates && !localFeatureGate.Enabled(features.RotateKubeletClientCertificate) { + allErrors = append(allErrors, fmt.Errorf("invalid configuration: RotateCertificates %v requires feature gate RotateKubeletClientCertificate", kc.RotateCertificates)) + } if kc.ServerTLSBootstrap && !localFeatureGate.Enabled(features.RotateKubeletServerCertificate) { allErrors = append(allErrors, fmt.Errorf("invalid configuration: ServerTLSBootstrap %v requires feature gate RotateKubeletServerCertificate", kc.ServerTLSBootstrap)) } From 57e74f99289b3a364bc94027232a746c7af29f7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 23 May 2018 00:19:21 +0300 Subject: [PATCH 117/307] autogenerated --- .../apis/kubeletconfig/v1beta1/zz_generated.conversion.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/kubelet/apis/kubeletconfig/v1beta1/zz_generated.conversion.go b/pkg/kubelet/apis/kubeletconfig/v1beta1/zz_generated.conversion.go index 67234da275c..5765982b744 100644 --- a/pkg/kubelet/apis/kubeletconfig/v1beta1/zz_generated.conversion.go +++ b/pkg/kubelet/apis/kubeletconfig/v1beta1/zz_generated.conversion.go @@ -156,6 +156,7 @@ func autoConvert_v1beta1_KubeletConfiguration_To_kubeletconfig_KubeletConfigurat out.TLSPrivateKeyFile = in.TLSPrivateKeyFile out.TLSCipherSuites = *(*[]string)(unsafe.Pointer(&in.TLSCipherSuites)) out.TLSMinVersion = in.TLSMinVersion + out.RotateCertificates = in.RotateCertificates out.ServerTLSBootstrap = in.ServerTLSBootstrap if err := Convert_v1beta1_KubeletAuthentication_To_kubeletconfig_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil { return err @@ -279,6 +280,7 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1beta1_KubeletConfigurat out.TLSPrivateKeyFile = in.TLSPrivateKeyFile out.TLSCipherSuites = *(*[]string)(unsafe.Pointer(&in.TLSCipherSuites)) out.TLSMinVersion = in.TLSMinVersion + out.RotateCertificates = in.RotateCertificates out.ServerTLSBootstrap = in.ServerTLSBootstrap if err := Convert_kubeletconfig_KubeletAuthentication_To_v1beta1_KubeletAuthentication(&in.Authentication, &out.Authentication, s); err != nil { return err From 18a8184dcecbefb824f971fac50acaba4202adaf Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Tue, 15 May 2018 16:01:12 -0700 Subject: [PATCH 118/307] Add warnings about cache invalidation. Part of https://github.com/kubernetes/kubernetes/pull/63040 is the assumption that scheduler cache updates must happen before equivalence cache updates for any given informer event. The reason for this is that the equivalence cache implementation checks the main cache for staleness while holding the equiv. cache write lock. case 1: If an informer invalidates an equiv. cache entry before the staleness check, then we know that the main cache update completed. case 2: If an informer blocks trying to grab the equiv. cache lock, then invalidation will occur right after the potentially stale update is written. This patch adds a note to places where we invalidate the equivalence cache so that hopefully nobody violates this invariant. --- pkg/scheduler/factory/factory.go | 15 +++++++++++++++ pkg/scheduler/scheduler.go | 3 +++ 2 files changed, 18 insertions(+) diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index 0c7dee77134..fb5cc22fcbe 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -634,6 +634,9 @@ func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { return } + // NOTE: Because the scheduler uses snapshots of schedulerCache and the live + // version of equivalencePodCache, updates must be written to schedulerCache + // before invalidating equivalencePodCache. if err := c.schedulerCache.UpdatePod(oldPod, newPod); err != nil { glog.Errorf("scheduler cache UpdatePod failed: %v", err) } @@ -720,6 +723,9 @@ func (c *configFactory) deletePodFromCache(obj interface{}) { glog.Errorf("cannot convert to *v1.Pod: %v", t) return } + // NOTE: Because the scheduler uses snapshots of schedulerCache and the live + // version of equivalencePodCache, updates must be written to schedulerCache + // before invalidating equivalencePodCache. if err := c.schedulerCache.RemovePod(pod); err != nil { glog.Errorf("scheduler cache RemovePod failed: %v", err) } @@ -776,6 +782,9 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { return } + // NOTE: Because the scheduler uses snapshots of schedulerCache and the live + // version of equivalencePodCache, updates must be written to schedulerCache + // before invalidating equivalencePodCache. if err := c.schedulerCache.UpdateNode(oldNode, newNode); err != nil { glog.Errorf("scheduler cache UpdateNode failed: %v", err) } @@ -869,6 +878,9 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) { glog.Errorf("cannot convert to *v1.Node: %v", t) return } + // NOTE: Because the scheduler uses snapshots of schedulerCache and the live + // version of equivalencePodCache, updates must be written to schedulerCache + // before invalidating equivalencePodCache. if err := c.schedulerCache.RemoveNode(node); err != nil { glog.Errorf("scheduler cache RemoveNode failed: %v", err) } @@ -1297,6 +1309,9 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue _, err := c.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} + // NOTE: Because the scheduler uses snapshots of schedulerCache and the live + // version of equivalencePodCache, updates must be written to schedulerCache + // before invalidating equivalencePodCache. c.schedulerCache.RemoveNode(&node) // invalidate cached predicate for the node if c.enableEquivalenceClassCache { diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index a3109599911..98beabe5c57 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -373,6 +373,9 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // If the binding fails, scheduler will release resources allocated to assumed pod // immediately. assumed.Spec.NodeName = host + // NOTE: Because the scheduler uses snapshots of SchedulerCache and the live + // version of Ecache, updates must be written to SchedulerCache before + // invalidating Ecache. if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil { glog.Errorf("scheduler cache AssumePod failed: %v", err) From b3a31b28afa3db09846a849a6c0fa34f1c4671fe Mon Sep 17 00:00:00 2001 From: CJ Cullen Date: Tue, 22 May 2018 16:44:46 -0700 Subject: [PATCH 119/307] re-reorder authorizers (RBAC before Webhook). --- cluster/gce/gci/configure-helper.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 356f103b5da..0935fc37f2c 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1683,7 +1683,7 @@ function start-kube-apiserver { local webhook_config_mount="" local webhook_config_volume="" if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then - authorization_mode="Webhook,${authorization_mode}" + authorization_mode="${authorization_mode},Webhook" params+=" --authorization-webhook-config-file=/etc/gcp_authz.config" webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false}," webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}}," From 0868db5bf17223bb551db1ac0720a339fef1c536 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Tue, 22 May 2018 17:20:49 -0700 Subject: [PATCH 120/307] fix the e2e node helpers that let tests reconfigure Kubelet The dynamic config tests were updated with the validation change, but the tests that try to use dynamic config via this helper were not. --- test/e2e_node/util.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index cec7974bc4c..a0607deea92 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -172,7 +172,6 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube ConfigMap: &apiv1.ConfigMapNodeConfigSource{ Namespace: "kube-system", Name: cm.Name, - UID: cm.UID, KubeletConfigKey: "kubelet", }, } From 0188e8c2b5213a3480aa45bff9d9ae7e644571f2 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Tue, 22 May 2018 17:33:18 -0700 Subject: [PATCH 121/307] add colon separators to improve readability of test names --- test/e2e_node/dynamic_kubelet_config_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/e2e_node/dynamic_kubelet_config_test.go b/test/e2e_node/dynamic_kubelet_config_test.go index 3d709b99d10..186c5ab4140 100644 --- a/test/e2e_node/dynamic_kubelet_config_test.go +++ b/test/e2e_node/dynamic_kubelet_config_test.go @@ -296,7 +296,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt }) }) - Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap", func() { + Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap:", func() { It(itDescription, func() { var err error // we base the "lkg" configmap off of the configuration from before the test @@ -360,7 +360,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt }) }) - Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey", func() { + Context("update Node.Spec.ConfigSource: recover to last-known-good ConfigMap.KubeletConfigKey:", func() { It(itDescription, func() { const badConfigKey = "bad" var err error @@ -415,7 +415,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt }) // exposes resource leaks across config changes - Context("update Node.Spec.ConfigSource: 100 update stress test", func() { + Context("update Node.Spec.ConfigSource: 100 update stress test:", func() { It(itDescription, func() { var err error @@ -472,7 +472,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations // change the ResourceVersion of the ConfigMap. - Context("update ConfigMap in-place: state transitions", func() { + Context("update ConfigMap in-place: state transitions:", func() { It(itDescription, func() { var err error // we base the "correct" configmap off of the configuration from before the test @@ -552,7 +552,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations // change the ResourceVersion of the ConfigMap. - Context("update ConfigMap in-place: recover to last-known-good version", func() { + Context("update ConfigMap in-place: recover to last-known-good version:", func() { It(itDescription, func() { var err error // we base the "lkg" configmap off of the configuration from before the test @@ -631,7 +631,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations // change the ResourceVersion of the ConfigMap. - Context("delete and recreate ConfigMap: state transitions", func() { + Context("delete and recreate ConfigMap: state transitions:", func() { It(itDescription, func() { var err error // we base the "correct" configmap off of the configuration from before the test @@ -711,7 +711,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig] [Serial] [Disrupt // roll out a new Node.Spec.ConfigSource that references the new ConfigMap. In-place ConfigMap updates, including deletion // followed by re-creation, will cause all observing Kubelets to immediately restart for new config, because these operations // change the ResourceVersion of the ConfigMap. - Context("delete and recreate ConfigMap: error while ConfigMap is absent", func() { + Context("delete and recreate ConfigMap: error while ConfigMap is absent:", func() { It(itDescription, func() { var err error // we base the "correct" configmap off of the configuration from before the test From 43551e82081a1fa364879bd49e67095a3fc0926b Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 22 May 2018 19:29:00 -0400 Subject: [PATCH 122/307] Correctly identify types served in the kube-apiserver openapi doc --- api/openapi-spec/swagger.json | 84 +++++++++++++++++-- cmd/kube-apiserver/app/BUILD | 1 + cmd/kube-apiserver/app/server.go | 5 +- hack/make-rules/test-cmd-util.sh | 3 + pkg/master/BUILD | 1 + pkg/master/master_openapi_test.go | 3 +- .../pkg/endpoints/openapi/openapi.go | 20 ++++- staging/src/k8s.io/apiserver/pkg/server/BUILD | 1 + .../src/k8s.io/apiserver/pkg/server/config.go | 3 +- .../pkg/server/genericapiserver_test.go | 3 +- test/integration/framework/BUILD | 1 + test/integration/framework/master_utils.go | 3 +- 12 files changed, 112 insertions(+), 16 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b8fa0e77484..0ae179a1f28 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -84843,7 +84843,14 @@ "description": "Status indicates the actual state of the CustomResourceDefinition", "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionStatus" } - } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiextensions.k8s.io", + "kind": "CustomResourceDefinition", + "version": "v1beta1" + } + ] }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition": { "description": "CustomResourceDefinitionCondition contains details for the current condition of this pod.", @@ -84898,7 +84905,14 @@ "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" } - } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiextensions.k8s.io", + "kind": "CustomResourceDefinitionList", + "version": "v1beta1" + } + ] }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionNames": { "description": "CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition", @@ -85523,6 +85537,21 @@ "kind": "DeleteOptions", "version": "v1beta1" }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, { "group": "apps", "kind": "DeleteOptions", @@ -86061,6 +86090,21 @@ "kind": "WatchEvent", "version": "v1beta1" }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, { "group": "apps", "kind": "WatchEvent", @@ -86280,7 +86324,14 @@ "description": "Status contains derived information about an API server", "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus" } - } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1" + } + ] }, "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition": { "required": [ @@ -86333,7 +86384,14 @@ "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" } - } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIServiceList", + "version": "v1" + } + ] }, "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec": { "description": "APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.", @@ -86425,7 +86483,14 @@ "description": "Status contains derived information about an API server", "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus" } - } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1beta1" + } + ] }, "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceCondition": { "required": [ @@ -86478,7 +86543,14 @@ "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" } - } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIServiceList", + "version": "v1beta1" + } + ] }, "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpec": { "description": "APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.", diff --git a/cmd/kube-apiserver/app/BUILD b/cmd/kube-apiserver/app/BUILD index 9ea3abbf64e..690d0f0193a 100644 --- a/cmd/kube-apiserver/app/BUILD +++ b/cmd/kube-apiserver/app/BUILD @@ -70,6 +70,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 6b29c564d17..86724e41a6a 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -35,6 +35,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + extensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -46,6 +47,7 @@ import ( webhookinit "k8s.io/apiserver/pkg/admission/plugin/webhook/initializer" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authorization/authorizer" + openapinamer "k8s.io/apiserver/pkg/endpoints/openapi" "k8s.io/apiserver/pkg/server" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" @@ -61,6 +63,7 @@ import ( "k8s.io/client-go/restmapper" certutil "k8s.io/client-go/util/cert" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" + aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme" openapi "k8s.io/kube-openapi/pkg/common" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -432,7 +435,7 @@ func BuildGenericConfig( return } - genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, legacyscheme.Scheme) + genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme, extensionsapiserver.Scheme, aggregatorscheme.Scheme)) genericConfig.OpenAPIConfig.PostProcessSpec = postProcessOpenAPISpecForBackwardCompatibility genericConfig.OpenAPIConfig.Info.Title = "Kubernetes" genericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index bba8d4656cc..01bbcfe99c0 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -1453,6 +1453,7 @@ run_kubectl_old_print_tests() { "spec": { "group": "company.com", "version": "v1", + "scope": "Namespaced", "names": { "plural": "foos", "kind": "Foo" @@ -1712,6 +1713,7 @@ run_crd_tests() { "spec": { "group": "company.com", "version": "v1", + "scope": "Namespaced", "names": { "plural": "foos", "kind": "Foo" @@ -1733,6 +1735,7 @@ __EOF__ "spec": { "group": "company.com", "version": "v1", + "scope": "Namespaced", "names": { "plural": "bars", "kind": "Bar" diff --git a/pkg/master/BUILD b/pkg/master/BUILD index dbbd0abcc88..8a4fb904282 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -165,6 +165,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", diff --git a/pkg/master/master_openapi_test.go b/pkg/master/master_openapi_test.go index e54417769ea..b8053e89af5 100644 --- a/pkg/master/master_openapi_test.go +++ b/pkg/master/master_openapi_test.go @@ -27,6 +27,7 @@ import ( "net/http/httptest" "testing" + openapinamer "k8s.io/apiserver/pkg/endpoints/openapi" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/kubernetes/pkg/api/legacyscheme" openapigen "k8s.io/kubernetes/pkg/generated/openapi" @@ -44,7 +45,7 @@ func TestValidOpenAPISpec(t *testing.T) { defer etcdserver.Terminate(t) config.GenericConfig.EnableIndex = true - config.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapigen.GetOpenAPIDefinitions, legacyscheme.Scheme) + config.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapigen.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme)) config.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go b/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go index 7b8f3589e02..e512f29b38c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go @@ -143,12 +143,24 @@ func typeName(t reflect.Type) string { } // NewDefinitionNamer constructs a new DefinitionNamer to be used to customize OpenAPI spec. -func NewDefinitionNamer(s *runtime.Scheme) DefinitionNamer { - ret := DefinitionNamer{ +func NewDefinitionNamer(schemes ...*runtime.Scheme) *DefinitionNamer { + ret := &DefinitionNamer{ typeGroupVersionKinds: map[string]groupVersionKinds{}, } - for gvk, rtype := range s.AllKnownTypes() { - ret.typeGroupVersionKinds[typeName(rtype)] = append(ret.typeGroupVersionKinds[typeName(rtype)], gvkConvert(gvk)) + for _, s := range schemes { + for gvk, rtype := range s.AllKnownTypes() { + newGVK := gvkConvert(gvk) + exists := false + for _, existingGVK := range ret.typeGroupVersionKinds[typeName(rtype)] { + if newGVK == existingGVK { + exists = true + break + } + } + if !exists { + ret.typeGroupVersionKinds[typeName(rtype)] = append(ret.typeGroupVersionKinds[typeName(rtype)], newGVK) + } + } } for _, gvk := range ret.typeGroupVersionKinds { sort.Sort(gvk) diff --git a/staging/src/k8s.io/apiserver/pkg/server/BUILD b/staging/src/k8s.io/apiserver/pkg/server/BUILD index 96dbb82a22b..dcefdb4b746 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/BUILD @@ -27,6 +27,7 @@ go_test( "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/filters:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index eb1c27d141f..75a2b5dcc56 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -278,8 +278,7 @@ func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig { } } -func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, scheme *runtime.Scheme) *openapicommon.Config { - defNamer := apiopenapi.NewDefinitionNamer(scheme) +func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config { return &openapicommon.Config{ ProtocolList: []string{"https"}, IgnorePrefixes: []string{"/swaggerapi"}, diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go index 4b6e7b7ddad..fbe7235c2c2 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go @@ -44,6 +44,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/discovery" genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" + openapinamer "k8s.io/apiserver/pkg/endpoints/openapi" "k8s.io/apiserver/pkg/registry/rest" genericfilters "k8s.io/apiserver/pkg/server/filters" "k8s.io/client-go/informers" @@ -95,7 +96,7 @@ func setUp(t *testing.T) (Config, *assert.Assertions) { t.Fatal("unable to create fake client set") } - config.OpenAPIConfig = DefaultOpenAPIConfig(testGetOpenAPIDefinitions, runtime.NewScheme()) + config.OpenAPIConfig = DefaultOpenAPIConfig(testGetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(runtime.NewScheme())) config.OpenAPIConfig.Info.Version = "unversioned" config.SwaggerConfig = DefaultSwaggerConfig() sharedInformers := informers.NewSharedInformerFactory(clientset, config.LoopbackClientConfig.Timeout) diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 3cc28e13ca9..94573f4db96 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -53,6 +53,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library", "//vendor/k8s.io/apiserver/pkg/authorization/union:go_default_library", + "//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/options:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 387d3bb4d47..1f6caeb6cb8 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -44,6 +44,7 @@ import ( "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" authorizerunion "k8s.io/apiserver/pkg/authorization/union" + openapinamer "k8s.io/apiserver/pkg/endpoints/openapi" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" @@ -124,7 +125,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv if masterConfig == nil { masterConfig = NewMasterConfig() - masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, legacyscheme.Scheme) + masterConfig.GenericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(legacyscheme.Scheme)) masterConfig.GenericConfig.OpenAPIConfig.Info = &spec.Info{ InfoProps: spec.InfoProps{ Title: "Kubernetes", From df0f108a023d6d8ec9fbfe869c6c573eaa42a1ee Mon Sep 17 00:00:00 2001 From: Yecheng Fu Date: Thu, 10 May 2018 15:06:38 +0800 Subject: [PATCH 123/307] Fixes fsGroup check in local volume in containerized kubelet. Except this, it also fixes fsGroup check when volume source is a normal directory whether kubelet is running on the host or in a container. --- pkg/util/mount/mount_linux.go | 109 +++++++- pkg/util/mount/mount_linux_test.go | 265 ++++++++++++++++++- pkg/util/mount/nsenter_mount.go | 2 +- test/e2e/storage/persistent_volumes-local.go | 34 --- 4 files changed, 353 insertions(+), 57 deletions(-) diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index dbb864cd479..9059503e819 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -42,6 +42,8 @@ const ( maxListTries = 3 // Number of fields per line in /proc/mounts as per the fstab man page. expectedNumFieldsPerLine = 6 + // At least number of fields per line in /proc//mountinfo. + expectedAtLeastNumFieldsPerMountInfo = 10 // Location of the mount file to use procMountsPath = "/proc/mounts" // Location of the mountinfo file @@ -598,7 +600,7 @@ func isShared(mount string, mountInfoPath string) (bool, error) { } // parse optional parameters - for _, opt := range info.optional { + for _, opt := range info.optionalFields { if strings.HasPrefix(opt, "shared:") { return true, nil } @@ -606,14 +608,27 @@ func isShared(mount string, mountInfoPath string) (bool, error) { return false, nil } +// This represents a single line in /proc//mountinfo. type mountInfo struct { - // Path of the mount point + // Unique ID for the mount (maybe reused after umount). + id int + // The ID of the parent mount (or of self for the root of this mount namespace's mount tree). + parentID int + // The value of `st_dev` for files on this filesystem. + majorMinor string + // The pathname of the directory in the filesystem which forms the root of this mount. + root string + // Mount source, filesystem-specific information. e.g. device, tmpfs name. + source string + // Mount point, the pathname of the mount point. mountPoint string - // list of "optional parameters", mount propagation is one of them - optional []string - // mount options + // Optional fieds, zero or more fields of the form "tag[:value]". + optionalFields []string + // The filesystem type in the form "type[.subtype]". + fsType string + // Per-mount options. mountOptions []string - // super options: per-superblock options. + // Per-superblock options. superOptions []string } @@ -633,20 +648,38 @@ func parseMountInfo(filename string) ([]mountInfo, error) { } // See `man proc` for authoritative description of format of the file. fields := strings.Fields(line) - if len(fields) < 10 { - return nil, fmt.Errorf("wrong number of fields in (expected %d, got %d): %s", 10, len(fields), line) + if len(fields) < expectedAtLeastNumFieldsPerMountInfo { + return nil, fmt.Errorf("wrong number of fields in (expected at least %d, got %d): %s", expectedAtLeastNumFieldsPerMountInfo, len(fields), line) + } + id, err := strconv.Atoi(fields[0]) + if err != nil { + return nil, err + } + parentID, err := strconv.Atoi(fields[1]) + if err != nil { + return nil, err } info := mountInfo{ + id: id, + parentID: parentID, + majorMinor: fields[2], + root: fields[3], mountPoint: fields[4], mountOptions: strings.Split(fields[5], ","), - optional: []string{}, } // All fields until "-" are "optional fields". - for i := 6; i < len(fields) && fields[i] != "-"; i++ { - info.optional = append(info.optional, fields[i]) + i := 6 + for ; i < len(fields) && fields[i] != "-"; i++ { + info.optionalFields = append(info.optionalFields, fields[i]) } - superOpts := fields[len(fields)-1] - info.superOptions = strings.Split(superOpts, ",") + // Parse the rest 3 fields. + i += 1 + if len(fields)-i < 3 { + return nil, fmt.Errorf("expect 3 fields in %s, got %d", line, len(fields)-i) + } + info.fsType = fields[i] + info.source = fields[i+1] + info.superOptions = strings.Split(fields[i+2], ",") infos = append(infos, info) } return infos, nil @@ -967,7 +1000,7 @@ func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { if err != nil { return nil, err } - return getMountRefsByDev(mounter, realpath) + return searchMountPoints(realpath, procMountInfoPath) } func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) { @@ -1216,3 +1249,51 @@ func doSafeOpen(pathname string, base string) (int, error) { return finalFD, nil } + +// searchMountPoints finds all mount references to the source, returns a list of +// mountpoints. +// This function assumes source cannot be device. +// Some filesystems may share a source name, e.g. tmpfs. And for bind mounting, +// it's possible to mount a non-root path of a filesystem, so we need to use +// root path and major:minor to represent mount source uniquely. +// This implementation is shared between Linux and NsEnterMounter +func searchMountPoints(hostSource, mountInfoPath string) ([]string, error) { + mis, err := parseMountInfo(mountInfoPath) + if err != nil { + return nil, err + } + + mountID := 0 + rootPath := "" + majorMinor := "" + + // Finding the underlying root path and major:minor if possible. + // We need search in backward order because it's possible for later mounts + // to overlap earlier mounts. + for i := len(mis) - 1; i >= 0; i-- { + if hostSource == mis[i].mountPoint || pathWithinBase(hostSource, mis[i].mountPoint) { + // If it's a mount point or path under a mount point. + mountID = mis[i].id + rootPath = filepath.Join(mis[i].root, strings.TrimPrefix(hostSource, mis[i].mountPoint)) + majorMinor = mis[i].majorMinor + break + } + } + + if rootPath == "" || majorMinor == "" { + return nil, fmt.Errorf("failed to get root path and major:minor for %s", hostSource) + } + + var refs []string + for i := range mis { + if mis[i].id == mountID { + // Ignore mount entry for mount source itself. + continue + } + if mis[i].root == rootPath && mis[i].majorMinor == majorMinor { + refs = append(refs, mis[i].mountPoint) + } + } + + return refs, nil +} diff --git a/pkg/util/mount/mount_linux_test.go b/pkg/util/mount/mount_linux_test.go index 6028d608e97..b9a1049a4f9 100644 --- a/pkg/util/mount/mount_linux_test.go +++ b/pkg/util/mount/mount_linux_test.go @@ -1383,6 +1383,19 @@ func TestParseMountInfo(t *testing.T) { 187 23 0:58 / /var/lib/kubelet/pods/1fc5ea21-eff4-11e7-ac80-0e858b8eaf40/volumes/kubernetes.io~nfs/nfs2 rw,relatime shared:96 - nfs4 172.18.4.223:/srv/nfs2 rw,vers=4.0,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=172.18.4.223,local_lock=none,addr=172.18.4.223 188 24 0:58 / /var/lib/kubelet/pods/43219158-e5e1-11e7-a392-0e858b8eaf40/volume-subpaths/nfs1/subpath1/0 rw,relatime shared:89 - nfs4 172.18.4.223:/srv/nfs/foo rw,vers=4.0,rsize=524288,wsize=524288,namlen=255,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=172.18.4.223,local_lock=none,addr=172.18.4.223 347 60 0:71 / /var/lib/kubelet/pods/13195d46-f9fa-11e7-bbf1-5254007a695a/volumes/kubernetes.io~nfs/vol2 rw,relatime shared:170 - nfs 172.17.0.3:/exports/2 rw,vers=3,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=172.17.0.3,mountvers=3,mountport=20048,mountproto=udp,local_lock=none,addr=172.17.0.3 +222 24 253:0 /tmp/src /mnt/dst rw,relatime shared:1 - ext4 /dev/mapper/vagrant--vg-root rw,errors=remount-ro,data=ordered +28 18 0:24 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:9 - tmpfs tmpfs ro,mode=755 +29 28 0:25 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd +31 28 0:27 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,cpuset +32 28 0:28 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,cpu,cpuacct +33 28 0:29 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer +34 28 0:30 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,net_prio +35 28 0:31 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,pids +36 28 0:32 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,devices +37 28 0:33 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb +38 28 0:34 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:20 - cgroup cgroup rw,blkio +39 28 0:35 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:21 - cgroup cgroup rw,memory +40 28 0:36 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:22 - cgroup cgroup rw,perf_event ` tempDir, filename, err := writeFile(info) if err != nil { @@ -1392,17 +1405,103 @@ func TestParseMountInfo(t *testing.T) { tests := []struct { name string - mountPoint string + id int expectedInfo mountInfo }{ { "simple bind mount", - "/var/lib/kubelet", + 189, mountInfo{ - mountPoint: "/var/lib/kubelet", - optional: []string{"shared:30"}, - mountOptions: []string{"rw", "relatime"}, - superOptions: []string{"rw", "commit=30", "data=ordered"}, + id: 189, + parentID: 80, + majorMinor: "8:1", + root: "/var/lib/kubelet", + source: "/dev/sda1", + mountPoint: "/var/lib/kubelet", + optionalFields: []string{"shared:30"}, + fsType: "ext4", + mountOptions: []string{"rw", "relatime"}, + superOptions: []string{"rw", "commit=30", "data=ordered"}, + }, + }, + { + "bind mount a directory", + 222, + mountInfo{ + id: 222, + parentID: 24, + majorMinor: "253:0", + root: "/tmp/src", + source: "/dev/mapper/vagrant--vg-root", + mountPoint: "/mnt/dst", + optionalFields: []string{"shared:1"}, + fsType: "ext4", + mountOptions: []string{"rw", "relatime"}, + superOptions: []string{"rw", "errors=remount-ro", "data=ordered"}, + }, + }, + { + "more than one optional fields", + 224, + mountInfo{ + id: 224, + parentID: 62, + majorMinor: "253:0", + root: "/var/lib/docker/devicemapper/test/shared", + source: "/dev/mapper/ssd-root", + mountPoint: "/var/lib/docker/devicemapper/test/shared", + optionalFields: []string{"master:1", "shared:44"}, + fsType: "ext4", + mountOptions: []string{"rw", "relatime"}, + superOptions: []string{"rw", "seclabel", "data=ordered"}, + }, + }, + { + "cgroup-mountpoint", + 28, + mountInfo{ + id: 28, + parentID: 18, + majorMinor: "0:24", + root: "/", + source: "tmpfs", + mountPoint: "/sys/fs/cgroup", + optionalFields: []string{"shared:9"}, + fsType: "tmpfs", + mountOptions: []string{"ro", "nosuid", "nodev", "noexec"}, + superOptions: []string{"ro", "mode=755"}, + }, + }, + { + "cgroup-subsystem-systemd-mountpoint", + 29, + mountInfo{ + id: 29, + parentID: 28, + majorMinor: "0:25", + root: "/", + source: "cgroup", + mountPoint: "/sys/fs/cgroup/systemd", + optionalFields: []string{"shared:10"}, + fsType: "cgroup", + mountOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + superOptions: []string{"rw", "xattr", "release_agent=/lib/systemd/systemd-cgroups-agent", "name=systemd"}, + }, + }, + { + "cgroup-subsystem-cpuset-mountpoint", + 31, + mountInfo{ + id: 31, + parentID: 28, + majorMinor: "0:27", + root: "/", + source: "cgroup", + mountPoint: "/sys/fs/cgroup/cpuset", + optionalFields: []string{"shared:13"}, + fsType: "cgroup", + mountOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + superOptions: []string{"rw", "cpuset"}, }, }, } @@ -1415,7 +1514,7 @@ func TestParseMountInfo(t *testing.T) { for _, test := range tests { found := false for _, info := range infos { - if info.mountPoint == test.mountPoint { + if info.id == test.id { found = true if !reflect.DeepEqual(info, test.expectedInfo) { t.Errorf("Test case %q:\n expected: %+v\n got: %+v", test.name, test.expectedInfo, info) @@ -1424,7 +1523,7 @@ func TestParseMountInfo(t *testing.T) { } } if !found { - t.Errorf("Test case %q: mountPoint %s not found", test.name, test.mountPoint) + t.Errorf("Test case %q: mountPoint %d not found", test.name, test.id) } } } @@ -1917,3 +2016,153 @@ func isOperationNotPermittedError(err error) bool { } return false } + +func TestSearchMountPoints(t *testing.T) { + base := ` +19 25 0:18 / /sys rw,nosuid,nodev,noexec,relatime shared:7 - sysfs sysfs rw +20 25 0:4 / /proc rw,nosuid,nodev,noexec,relatime shared:12 - proc proc rw +21 25 0:6 / /dev rw,nosuid,relatime shared:2 - devtmpfs udev rw,size=4058156k,nr_inodes=1014539,mode=755 +22 21 0:14 / /dev/pts rw,nosuid,noexec,relatime shared:3 - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +23 25 0:19 / /run rw,nosuid,noexec,relatime shared:5 - tmpfs tmpfs rw,size=815692k,mode=755 +25 0 252:0 / / rw,relatime shared:1 - ext4 /dev/mapper/ubuntu--vg-root rw,errors=remount-ro,data=ordered +26 19 0:12 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:8 - securityfs securityfs rw +27 21 0:21 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw +28 23 0:22 / /run/lock rw,nosuid,nodev,noexec,relatime shared:6 - tmpfs tmpfs rw,size=5120k +29 19 0:23 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:9 - tmpfs tmpfs ro,mode=755 +30 29 0:24 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd +31 19 0:25 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:11 - pstore pstore rw +32 29 0:26 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,devices +33 29 0:27 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,freezer +34 29 0:28 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,pids +35 29 0:29 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,blkio +36 29 0:30 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,memory +37 29 0:31 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event +38 29 0:32 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb +39 29 0:33 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:20 - cgroup cgroup rw,cpu,cpuacct +40 29 0:34 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:21 - cgroup cgroup rw,cpuset +41 29 0:35 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:22 - cgroup cgroup rw,net_cls,net_prio +58 25 7:1 / /mnt/disks/blkvol1 rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordere +` + + testcases := []struct { + name string + source string + mountInfos string + expectedRefs []string + expectedErr error + }{ + { + "dir", + "/mnt/disks/vol1", + base, + nil, + nil, + }, + { + "dir-used", + "/mnt/disks/vol1", + base + ` +56 25 252:0 /mnt/disks/vol1 /var/lib/kubelet/pods/1890aef5-5a60-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test rw,relatime shared:1 - ext4 /dev/mapper/ubuntu--vg-root rw,errors=remount-ro,data=ordered +57 25 0:45 / /mnt/disks/vol rw,relatime shared:36 - tmpfs tmpfs rw +`, + []string{"/var/lib/kubelet/pods/1890aef5-5a60-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test"}, + nil, + }, + { + "tmpfs-vol", + "/mnt/disks/vol1", + base + `120 25 0:76 / /mnt/disks/vol1 rw,relatime shared:41 - tmpfs vol1 rw,size=10000k +`, + nil, + nil, + }, + { + "tmpfs-vol-used-by-two-pods", + "/mnt/disks/vol1", + base + `120 25 0:76 / /mnt/disks/vol1 rw,relatime shared:41 - tmpfs vol1 rw,size=10000k +196 25 0:76 / /var/lib/kubelet/pods/ade3ac21-5a5b-11e8-8559-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-8f263585 rw,relatime shared:41 - tmpfs vol1 rw,size=10000k +228 25 0:76 / /var/lib/kubelet/pods/ac60532d-5a5b-11e8-8559-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-8f263585 rw,relatime shared:41 - tmpfs vol1 rw,size=10000k +`, + []string{ + "/var/lib/kubelet/pods/ade3ac21-5a5b-11e8-8559-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-8f263585", + "/var/lib/kubelet/pods/ac60532d-5a5b-11e8-8559-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-8f263585", + }, + nil, + }, + { + "tmpfs-subdir-used-indirectly-via-bindmount-dir-by-one-pod", + "/mnt/vol1/foo", + base + `177 25 0:46 / /mnt/data rw,relatime shared:37 - tmpfs data rw +190 25 0:46 /vol1 /mnt/vol1 rw,relatime shared:37 - tmpfs data rw +191 25 0:46 /vol2 /mnt/vol2 rw,relatime shared:37 - tmpfs data rw +62 25 0:46 /vol1/foo /var/lib/kubelet/pods/e25f2f01-5b06-11e8-8694-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test rw,relatime shared:37 - tmpfs data rw +`, + []string{"/var/lib/kubelet/pods/e25f2f01-5b06-11e8-8694-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test"}, + nil, + }, + { + "dir-bindmounted", + "/mnt/disks/vol2", + base + `342 25 252:0 /mnt/disks/vol2 /mnt/disks/vol2 rw,relatime shared:1 - ext4 /dev/mapper/ubuntu--vg-root rw,errors=remount-ro,data=ordered +`, + nil, + nil, + }, + { + "dir-bindmounted-used-by-one-pod", + "/mnt/disks/vol2", + base + `342 25 252:0 /mnt/disks/vol2 /mnt/disks/vol2 rw,relatime shared:1 - ext4 /dev/mapper/ubuntu--vg-root rw,errors=remount-ro,data=ordered +77 25 252:0 /mnt/disks/vol2 /var/lib/kubelet/pods/f30dc360-5a5d-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-1fb30a1c rw,relatime shared:1 - ext4 /dev/mapper/ubuntu--vg-root rw,errors=remount-ro,data=ordered +`, + []string{"/var/lib/kubelet/pods/f30dc360-5a5d-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-1fb30a1c"}, + nil, + }, + { + "blockfs", + "/mnt/disks/blkvol1", + base + `58 25 7:1 / /mnt/disks/blkvol1 rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordered +`, + nil, + nil, + }, + { + "blockfs-used-by-one-pod", + "/mnt/disks/blkvol1", + base + `58 25 7:1 / /mnt/disks/blkvol1 rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordered +62 25 7:1 / /var/lib/kubelet/pods/f19fe4e2-5a63-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordered +`, + []string{"/var/lib/kubelet/pods/f19fe4e2-5a63-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test"}, + nil, + }, + { + "blockfs-used-by-two-pods", + "/mnt/disks/blkvol1", + base + `58 25 7:1 / /mnt/disks/blkvol1 rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordered +62 25 7:1 / /var/lib/kubelet/pods/f19fe4e2-5a63-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordered +95 25 7:1 / /var/lib/kubelet/pods/4854a48b-5a64-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test rw,relatime shared:38 - ext4 /dev/loop1 rw,data=ordered +`, + []string{"/var/lib/kubelet/pods/f19fe4e2-5a63-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test", + "/var/lib/kubelet/pods/4854a48b-5a64-11e8-962f-000c29bb0377/volumes/kubernetes.io~local-volume/local-pv-test"}, + nil, + }, + } + tmpFile, err := ioutil.TempFile("", "test-get-filetype") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + for _, v := range testcases { + tmpFile.Truncate(0) + tmpFile.Seek(0, 0) + tmpFile.WriteString(v.mountInfos) + tmpFile.Sync() + refs, err := searchMountPoints(v.source, tmpFile.Name()) + if !reflect.DeepEqual(refs, v.expectedRefs) { + t.Errorf("test %q: expected Refs: %#v, got %#v", v.name, v.expectedRefs, refs) + } + if !reflect.DeepEqual(err, v.expectedErr) { + t.Errorf("test %q: expected err: %v, got %v", v.name, v.expectedErr, err) + } + } +} diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index 4c48d673254..9b0464c329a 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -333,7 +333,7 @@ func (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) { if err != nil { return nil, err } - return getMountRefsByDev(mounter, hostpath) + return searchMountPoints(hostpath, procMountInfoPath) } func (mounter *NsenterMounter) GetFSGroup(pathname string) (int64, error) { diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 1d8121024f4..4406d35899d 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -290,14 +290,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) It("should set fsGroup for one pod", func() { - skipTypes := sets.NewString( - string(DirectoryBindMountedLocalVolumeType), - string(DirectoryLinkBindMountedLocalVolumeType), - ) - if skipTypes.Has(string(testVolType)) { - // TODO(cofyc): Test it when bug is fixed. - framework.Skipf("Skipped when volume type is %v", testVolType) - } By("Checking fsGroup is set") pod := createPodWithFsGroupTest(config, testVol, 1234, 1234) By("Deleting pod") @@ -305,14 +297,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) It("should set same fsGroup for two pods simultaneously", func() { - skipTypes := sets.NewString( - string(DirectoryBindMountedLocalVolumeType), - string(DirectoryLinkBindMountedLocalVolumeType), - ) - if skipTypes.Has(string(testVolType)) { - // TODO(cofyc): Test it when bug is fixed. - framework.Skipf("Skipped when volume type is %v", testVolType) - } fsGroup := int64(1234) By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) @@ -325,14 +309,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) It("should set different fsGroup for second pod if first pod is deleted", func() { - skipTypes := sets.NewString( - string(DirectoryBindMountedLocalVolumeType), - string(DirectoryLinkBindMountedLocalVolumeType), - ) - if skipTypes.Has(string(testVolType)) { - // TODO(cofyc): Test it when bug is fixed. - framework.Skipf("Skipped when volume type is %v", testVolType) - } fsGroup1, fsGroup2 := int64(1234), int64(4321) By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) @@ -346,16 +322,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) It("should not set different fsGroups for two pods simultaneously", func() { - skipTypes := sets.NewString( - string(DirectoryLocalVolumeType), - string(DirectoryLinkLocalVolumeType), - string(DirectoryBindMountedLocalVolumeType), - string(DirectoryLinkBindMountedLocalVolumeType), - ) - if skipTypes.Has(string(testVolType)) { - // TODO(cofyc): Test it when bug is fixed. - framework.Skipf("Skipped when volume type is %v", testVolType) - } fsGroup1, fsGroup2 := int64(1234), int64(4321) By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) From 40abe94a402a460ce9fdf18039f13e1ea603806d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 23 Apr 2018 13:16:46 +0800 Subject: [PATCH 124/307] Validate cgroups-per-qos for windows --- .../apis/kubeletconfig/validation/BUILD | 39 ++++++++++++++++- .../kubeletconfig/validation/validation.go | 4 ++ .../validation/validation_others.go | 28 +++++++++++++ .../validation/validation_windows.go | 42 +++++++++++++++++++ 4 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 pkg/kubelet/apis/kubeletconfig/validation/validation_others.go create mode 100644 pkg/kubelet/apis/kubeletconfig/validation/validation_windows.go diff --git a/pkg/kubelet/apis/kubeletconfig/validation/BUILD b/pkg/kubelet/apis/kubeletconfig/validation/BUILD index 7fd457643ea..071a4b16f2e 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/BUILD +++ b/pkg/kubelet/apis/kubeletconfig/validation/BUILD @@ -8,7 +8,44 @@ load( go_library( name = "go_default_library", - srcs = ["validation.go"], + srcs = [ + "validation.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "validation_others.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "validation_windows.go", + ], + "//conditions:default": [], + }), importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", deps = [ "//pkg/features:go_default_library", diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation.go b/pkg/kubelet/apis/kubeletconfig/validation/validation.go index ab3bc4e14b4..624abdba4e4 100644 --- a/pkg/kubelet/apis/kubeletconfig/validation/validation.go +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation.go @@ -118,5 +118,9 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error allErrors = append(allErrors, fmt.Errorf("invalid configuration: option %q specified for HairpinMode (--hairpin-mode). Valid options are %q, %q or %q", kc.HairpinMode, kubeletconfig.HairpinNone, kubeletconfig.HairpinVeth, kubeletconfig.PromiscuousBridge)) } + + if err := validateKubeletOSConfiguration(kc); err != nil { + allErrors = append(allErrors, err) + } return utilerrors.NewAggregate(allErrors) } diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation_others.go b/pkg/kubelet/apis/kubeletconfig/validation/validation_others.go new file mode 100644 index 00000000000..4cad825825e --- /dev/null +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation_others.go @@ -0,0 +1,28 @@ +// +build !windows + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" +) + +// validateKubeletOSConfiguration validates os specific kubelet configuration and returns an error if it is invalid. +func validateKubeletOSConfiguration(kc *kubeletconfig.KubeletConfiguration) error { + return nil +} diff --git a/pkg/kubelet/apis/kubeletconfig/validation/validation_windows.go b/pkg/kubelet/apis/kubeletconfig/validation/validation_windows.go new file mode 100644 index 00000000000..d1a8ec1dd6f --- /dev/null +++ b/pkg/kubelet/apis/kubeletconfig/validation/validation_windows.go @@ -0,0 +1,42 @@ +// +build windows + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" +) + +// validateKubeletOSConfiguration validates os specific kubelet configuration and returns an error if it is invalid. +func validateKubeletOSConfiguration(kc *kubeletconfig.KubeletConfiguration) error { + message := "invalid configuration: %v (%v) %v is not supported on Windows" + allErrors := []error{} + + if kc.CgroupsPerQOS { + allErrors = append(allErrors, fmt.Errorf(message, "CgroupsPerQOS", "--cgroups-per-qos", kc.CgroupsPerQOS)) + } + + if len(kc.EnforceNodeAllocatable) > 0 { + allErrors = append(allErrors, fmt.Errorf(message, "EnforceNodeAllocatable", "--enforce-node-allocatable", kc.EnforceNodeAllocatable)) + } + + return utilerrors.NewAggregate(allErrors) +} From 956bbfd1a6764e6bb17e756ffb5065c5b385fb95 Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Fri, 18 May 2018 16:50:22 +0800 Subject: [PATCH 125/307] should not ignore err when convert controllermanagerconfiguration api --- .../app/controllermanager.go | 6 +++++- .../app/options/options.go | 17 +++++++++++------ .../app/options/options_test.go | 4 ++-- .../app/controllermanager.go | 6 +++++- .../app/options/options.go | 18 ++++++++++++------ .../app/options/options_test.go | 2 +- 6 files changed, 36 insertions(+), 17 deletions(-) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 988bf5edf36..01c4bfe2ba0 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -54,7 +54,11 @@ const ( // NewCloudControllerManagerCommand creates a *cobra.Command object with default parameters func NewCloudControllerManagerCommand() *cobra.Command { - s := options.NewCloudControllerManagerOptions() + s, err := options.NewCloudControllerManagerOptions() + if err != nil { + glog.Fatalf("unable to initialize command options: %v", err) + } + cmd := &cobra.Command{ Use: "cloud-controller-manager", Long: `The Cloud controller manager is a daemon that embeds diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index 433d9935164..5c213555891 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -67,8 +67,11 @@ type CloudControllerManagerOptions struct { } // NewCloudControllerManagerOptions creates a new ExternalCMServer with a default config. -func NewCloudControllerManagerOptions() *CloudControllerManagerOptions { - componentConfig := NewDefaultComponentConfig(ports.InsecureCloudControllerManagerPort) +func NewCloudControllerManagerOptions() (*CloudControllerManagerOptions, error) { + componentConfig, err := NewDefaultComponentConfig(ports.InsecureCloudControllerManagerPort) + if err != nil { + return nil, err + } s := CloudControllerManagerOptions{ CloudProvider: &cmoptions.CloudProviderOptions{}, @@ -96,11 +99,11 @@ func NewCloudControllerManagerOptions() *CloudControllerManagerOptions { // TODO: enable HTTPS by default s.SecureServing.BindPort = 0 - return &s + return &s, nil } // NewDefaultComponentConfig returns cloud-controller manager configuration object. -func NewDefaultComponentConfig(insecurePort int32) componentconfig.CloudControllerManagerConfiguration { +func NewDefaultComponentConfig(insecurePort int32) (componentconfig.CloudControllerManagerConfiguration, error) { scheme := runtime.NewScheme() componentconfigv1alpha1.AddToScheme(scheme) componentconfig.AddToScheme(scheme) @@ -109,9 +112,11 @@ func NewDefaultComponentConfig(insecurePort int32) componentconfig.CloudControll scheme.Default(&versioned) internal := componentconfig.CloudControllerManagerConfiguration{} - scheme.Convert(&versioned, &internal, nil) + if err := scheme.Convert(&versioned, &internal, nil); err != nil { + return internal, err + } internal.KubeCloudShared.Port = insecurePort - return internal + return internal, nil } // AddFlags adds flags for a specific ExternalCMServer to the specified FlagSet diff --git a/cmd/cloud-controller-manager/app/options/options_test.go b/cmd/cloud-controller-manager/app/options/options_test.go index fe2f62f0b35..9df6e0f2163 100644 --- a/cmd/cloud-controller-manager/app/options/options_test.go +++ b/cmd/cloud-controller-manager/app/options/options_test.go @@ -32,7 +32,7 @@ import ( ) func TestDefaultFlags(t *testing.T) { - s := NewCloudControllerManagerOptions() + s, _ := NewCloudControllerManagerOptions() expected := &CloudControllerManagerOptions{ CloudProvider: &cmoptions.CloudProviderOptions{ @@ -95,7 +95,7 @@ func TestDefaultFlags(t *testing.T) { func TestAddFlags(t *testing.T) { f := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError) - s := NewCloudControllerManagerOptions() + s, _ := NewCloudControllerManagerOptions() s.AddFlags(f) args := []string{ diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index b03ba75667e..321d6e41ba9 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -70,7 +70,11 @@ const ( // NewControllerManagerCommand creates a *cobra.Command object with default parameters func NewControllerManagerCommand() *cobra.Command { - s := options.NewKubeControllerManagerOptions() + s, err := options.NewKubeControllerManagerOptions() + if err != nil { + glog.Fatalf("unable to initialize command options: %v", err) + } + cmd := &cobra.Command{ Use: "kube-controller-manager", Long: `The Kubernetes controller manager is a daemon that embeds diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index 2841dba49e4..2b26024e5e8 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -90,8 +90,12 @@ type KubeControllerManagerOptions struct { } // NewKubeControllerManagerOptions creates a new KubeControllerManagerOptions with a default config. -func NewKubeControllerManagerOptions() *KubeControllerManagerOptions { - componentConfig := NewDefaultComponentConfig(ports.InsecureKubeControllerManagerPort) +func NewKubeControllerManagerOptions() (*KubeControllerManagerOptions, error) { + componentConfig, err := NewDefaultComponentConfig(ports.InsecureKubeControllerManagerPort) + if err != nil { + return nil, err + } + s := KubeControllerManagerOptions{ CloudProvider: &cmoptions.CloudProviderOptions{}, Debugging: &cmoptions.DebuggingOptions{}, @@ -193,11 +197,11 @@ func NewKubeControllerManagerOptions() *KubeControllerManagerOptions { s.GarbageCollectorController.GCIgnoredResources = gcIgnoredResources - return &s + return &s, nil } // NewDefaultComponentConfig returns kube-controller manager configuration object. -func NewDefaultComponentConfig(insecurePort int32) componentconfig.KubeControllerManagerConfiguration { +func NewDefaultComponentConfig(insecurePort int32) (componentconfig.KubeControllerManagerConfiguration, error) { scheme := runtime.NewScheme() componentconfigv1alpha1.AddToScheme(scheme) componentconfig.AddToScheme(scheme) @@ -206,9 +210,11 @@ func NewDefaultComponentConfig(insecurePort int32) componentconfig.KubeControlle scheme.Default(&versioned) internal := componentconfig.KubeControllerManagerConfiguration{} - scheme.Convert(&versioned, &internal, nil) + if err := scheme.Convert(&versioned, &internal, nil); err != nil { + return internal, err + } internal.KubeCloudShared.Port = insecurePort - return internal + return internal, nil } // AddFlags adds flags for a specific KubeControllerManagerOptions to the specified FlagSet diff --git a/cmd/kube-controller-manager/app/options/options_test.go b/cmd/kube-controller-manager/app/options/options_test.go index 0e69b91b857..d32848ed8e3 100644 --- a/cmd/kube-controller-manager/app/options/options_test.go +++ b/cmd/kube-controller-manager/app/options/options_test.go @@ -34,7 +34,7 @@ import ( func TestAddFlags(t *testing.T) { f := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError) - s := NewKubeControllerManagerOptions() + s, _ := NewKubeControllerManagerOptions() s.AddFlags(f, []string{""}, []string{""}) args := []string{ From a3578c864ed7db5a73daa88839eed11d86af209b Mon Sep 17 00:00:00 2001 From: wojtekt Date: Tue, 22 May 2018 15:32:25 +0200 Subject: [PATCH 126/307] Fix incorrectly set resource version in List --- .../apiserver/pkg/storage/etcd/etcd_helper.go | 5 +- .../pkg/storage/etcd/etcd_helper_test.go | 60 +++++++++++++++++++ .../apiserver/pkg/storage/etcd3/store.go | 28 ++++----- .../apiserver/pkg/storage/etcd3/store_test.go | 3 + .../pkg/storage/tests/cacher_test.go | 56 +++++++++++++++++ 5 files changed, 137 insertions(+), 15 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go index 3a34841f919..2fe2bbada2f 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go @@ -347,7 +347,10 @@ func (h *etcdHelper) GetToList(ctx context.Context, key string, resourceVersion metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime) if err != nil { if etcdutil.IsEtcdNotFound(err) { - return nil + if etcdErr, ok := err.(etcd.Error); ok { + return h.versioner.UpdateList(listObj, etcdErr.Index, "") + } + return fmt.Errorf("unexpected error from storage: %#v", err) } return toStorageErr(err, key, 0) } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper_test.go index b0c8fa6c19e..277ad9ac7b2 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcd_helper_test.go @@ -329,6 +329,66 @@ func TestGet(t *testing.T) { } } +func TestGetToList(t *testing.T) { + _, codecs := testScheme(t) + codec := apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion) + server := etcdtesting.NewEtcdTestClientServer(t) + defer server.Terminate(t) + key := "/some/key" + helper := newEtcdHelper(server.Client, codec, etcdtest.PathPrefix()) + + storedObj := &example.Pod{} + if err := helper.Create(context.TODO(), key, &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}, storedObj, 0); err != nil { + t.Errorf("Unexpected error %#v", err) + } + + tests := []struct { + key string + pred storage.SelectionPredicate + expectedOut []*example.Pod + }{{ // test GetToList on existing key + key: key, + pred: storage.Everything, + expectedOut: []*example.Pod{storedObj}, + }, { // test GetToList on non-existing key + key: "/non-existing", + pred: storage.Everything, + expectedOut: nil, + }, { // test GetToList with matching pod name + key: "/non-existing", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=" + storedObj.Name), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, bool, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, pod.Initializers != nil, nil + }, + }, + expectedOut: nil, + }} + + for i, tt := range tests { + out := &example.PodList{} + err := helper.GetToList(context.TODO(), tt.key, "", tt.pred, out) + if err != nil { + t.Fatalf("GetToList failed: %v", err) + } + if len(out.ResourceVersion) == 0 { + t.Errorf("#%d: unset resourceVersion", i) + } + if len(out.Items) != len(tt.expectedOut) { + t.Errorf("#%d: length of list want=%d, get=%d", i, len(tt.expectedOut), len(out.Items)) + continue + } + for j, wantPod := range tt.expectedOut { + getPod := &out.Items[j] + if !reflect.DeepEqual(wantPod, getPod) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, wantPod, getPod) + } + } + } +} + func TestGetNotFoundErr(t *testing.T) { _, codecs := testScheme(t) codec := apitesting.TestCodec(codecs, schema.GroupVersion{Version: "v1"}) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go index 17384346a0b..fabb083e0ce 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -388,26 +388,26 @@ func (s *store) GetToList(ctx context.Context, key string, resourceVersion strin if err != nil { return err } - key = path.Join(s.pathPrefix, key) - - getResp, err := s.client.KV.Get(ctx, key, s.getOps...) - if err != nil { - return err - } - if len(getResp.Kvs) == 0 { - return nil - } - data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) - if err != nil { - return storage.NewInternalError(err.Error()) - } v, err := conversion.EnforcePtr(listPtr) if err != nil || v.Kind() != reflect.Slice { panic("need ptr to slice") } - if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner); err != nil { + + key = path.Join(s.pathPrefix, key) + getResp, err := s.client.KV.Get(ctx, key, s.getOps...) + if err != nil { return err } + + if len(getResp.Kvs) > 0 { + data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key)) + if err != nil { + return storage.NewInternalError(err.Error()) + } + if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner); err != nil { + return err + } + } // update version with cluster level revision return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "") } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go index 10496fb7a0c..b3e34be3989 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store_test.go @@ -326,6 +326,9 @@ func TestGetToList(t *testing.T) { if err != nil { t.Fatalf("GetToList failed: %v", err) } + if len(out.ResourceVersion) == 0 { + t.Errorf("#%d: unset resourceVersion", i) + } if len(out.Items) != len(tt.expectedOut) { t.Errorf("#%d: length of list want=%d, get=%d", i, len(tt.expectedOut), len(out.Items)) continue diff --git a/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go index bd3034f08aa..e6e7eec6492 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/tests/cacher_test.go @@ -166,6 +166,62 @@ func TestGet(t *testing.T) { } } +func TestGetToList(t *testing.T) { + server, etcdStorage := newEtcdTestStorage(t, etcdtest.PathPrefix()) + defer server.Terminate(t) + cacher, _ := newTestCacher(etcdStorage, 10) + defer cacher.Stop() + + storedObj := updatePod(t, etcdStorage, makeTestPod("foo"), nil) + key := "pods/" + storedObj.Namespace + "/" + storedObj.Name + + tests := []struct { + key string + pred storage.SelectionPredicate + expectedOut []*example.Pod + }{{ // test GetToList on existing key + key: key, + pred: storage.Everything, + expectedOut: []*example.Pod{storedObj}, + }, { // test GetToList on non-existing key + key: "/non-existing", + pred: storage.Everything, + expectedOut: nil, + }, { // test GetToList with matching pod name + key: "/non-existing", + pred: storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.ParseSelectorOrDie("metadata.name!=" + storedObj.Name), + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, bool, error) { + pod := obj.(*example.Pod) + return nil, fields.Set{"metadata.name": pod.Name}, pod.Initializers != nil, nil + }, + }, + expectedOut: nil, + }} + + for i, tt := range tests { + out := &example.PodList{} + err := cacher.GetToList(context.TODO(), tt.key, "", tt.pred, out) + if err != nil { + t.Fatalf("GetToList failed: %v", err) + } + if len(out.ResourceVersion) == 0 { + t.Errorf("#%d: unset resourceVersion", i) + } + if len(out.Items) != len(tt.expectedOut) { + t.Errorf("#%d: length of list want=%d, get=%d", i, len(tt.expectedOut), len(out.Items)) + continue + } + for j, wantPod := range tt.expectedOut { + getPod := &out.Items[j] + if !reflect.DeepEqual(wantPod, getPod) { + t.Errorf("#%d: pod want=%#v, get=%#v", i, wantPod, getPod) + } + } + } +} + func TestList(t *testing.T) { server, etcdStorage := newEtcdTestStorage(t, etcdtest.PathPrefix()) defer server.Terminate(t) From 3b5afd8809c93c31d1787347160f947f68f1fe03 Mon Sep 17 00:00:00 2001 From: Nail Islamov Date: Wed, 14 Feb 2018 15:32:57 +1100 Subject: [PATCH 127/307] Use DeleteOptions.PropagationPolicy instead of OrphanDependents (deprecated) in kubectl --- hack/make-rules/test-cmd-util.sh | 4 ++-- pkg/kubectl/cmd/delete.go | 8 +++---- pkg/kubectl/cmd/delete_test.go | 24 +++++++++---------- pkg/kubectl/delete.go | 40 +++++++++++++++++++------------- pkg/kubectl/rolling_updater.go | 4 ++-- 5 files changed, 44 insertions(+), 36 deletions(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 428268c8ca9..3929e20ee57 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -1838,7 +1838,7 @@ run_non_native_resource_tests() { kubectl "${kube_flags[@]}" delete resources myobj --cascade=true # Make sure it's gone - kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" '' # Test that we can create a new resource of type Foo kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}" @@ -1919,7 +1919,7 @@ run_non_native_resource_tests() { kubectl "${kube_flags[@]}" delete foos test --cascade=true # Make sure it's gone - kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' # Test that we can create a new resource of type Bar kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}" diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 30b1872e3af..043b6bc3a47 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -302,12 +302,12 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { found++ // if we're here, it means that cascade=false (not the default), so we should orphan as requested - orphan := true options := &metav1.DeleteOptions{} if o.GracePeriod >= 0 { options = metav1.NewDeleteOptions(int64(o.GracePeriod)) } - options.OrphanDependents = &orphan + policy := metav1.DeletePropagationOrphan + options.PropagationPolicy = &policy return o.deleteResource(info, options) }) if err != nil { @@ -350,8 +350,8 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { } func (o *DeleteOptions) cascadingDeleteResource(info *resource.Info) error { - falseVar := false - return o.deleteResource(info, &metav1.DeleteOptions{OrphanDependents: &falseVar}) + policy := metav1.DeletePropagationForeground + return o.deleteResource(info, &metav1.DeleteOptions{PropagationPolicy: &policy}) } func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav1.DeleteOptions) error { diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index 38ce65a22c7..55a4d915377 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -104,17 +104,17 @@ func TestDeleteObjectByTuple(t *testing.T) { } } -func hasExpectedOrphanDependents(body io.ReadCloser, expectedOrphanDependents *bool) bool { - if body == nil || expectedOrphanDependents == nil { - return body == nil && expectedOrphanDependents == nil +func hasExpectedPropagationPolicy(body io.ReadCloser, policy *metav1.DeletionPropagation) bool { + if body == nil || policy == nil { + return body == nil && policy == nil } var parsedBody metav1.DeleteOptions rawBody, _ := ioutil.ReadAll(body) json.Unmarshal(rawBody, &parsedBody) - if parsedBody.OrphanDependents == nil { + if parsedBody.PropagationPolicy == nil { return false } - return *expectedOrphanDependents == *parsedBody.OrphanDependents + return *policy == *parsedBody.PropagationPolicy } // Tests that DeleteOptions.OrphanDependents is appropriately set while deleting objects. @@ -127,13 +127,13 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - var expectedOrphanDependents *bool + var policy *metav1.DeletionPropagation tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { switch p, m, b := req.URL.Path, req.Method, req.Body; { - case p == "/namespaces/test/secrets/mysecret" && m == "DELETE" && hasExpectedOrphanDependents(b, expectedOrphanDependents): + case p == "/namespaces/test/secrets/mysecret" && m == "DELETE" && hasExpectedPropagationPolicy(b, policy): return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &rc.Items[0])}, nil default: @@ -143,9 +143,9 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { } tf.Namespace = "test" - // DeleteOptions.OrphanDependents should be false, when cascade is true (default). - falseVar := false - expectedOrphanDependents = &falseVar + // DeleteOptions.PropagationPolicy should be Foreground, when cascade is true (default). + foregroundPolicy := metav1.DeletePropagationForeground + policy = &foregroundPolicy streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) cmd.Flags().Set("namespace", "test") @@ -156,8 +156,8 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { } // Test that delete options should be set to orphan when cascade is false. - trueVar := true - expectedOrphanDependents = &trueVar + orphanPolicy := metav1.DeletePropagationOrphan + policy = &orphanPolicy streams, _, buf, _ = genericclioptions.NewTestIOStreams() cmd = NewCmdDelete(tf, streams) cmd.Flags().Set("namespace", "test") diff --git a/pkg/kubectl/delete.go b/pkg/kubectl/delete.go index 0d1f8a46ea6..adeb829a14a 100644 --- a/pkg/kubectl/delete.go +++ b/pkg/kubectl/delete.go @@ -211,8 +211,10 @@ func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout return err } } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the replication controller + // has already been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return rc.Delete(name, deleteOptions) } @@ -282,8 +284,10 @@ func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Durati } } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the replica set has already + // been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return rsc.Delete(name, deleteOptions) } @@ -319,8 +323,10 @@ func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duratio return err } - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the daemon set has already + // been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return reaper.client.DaemonSets(namespace).Delete(name, deleteOptions) } @@ -347,8 +353,10 @@ func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Durat // TODO: Cleanup volumes? We don't want to accidentally delete volumes from // stop, so just leave this up to the statefulset. - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + // Using a background deletion policy because the stateful set has already + // been scaled down. + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return statefulsets.Delete(name, deleteOptions) } @@ -394,8 +402,8 @@ func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gra return utilerrors.NewAggregate(errList) } // once we have all the pods removed we can safely remove the job itself. - falseVar := false - deleteOptions := &metav1.DeleteOptions{OrphanDependents: &falseVar} + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} return jobs.Delete(name, deleteOptions) } @@ -415,9 +423,9 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati return err } if deployment.Initializers != nil { - var falseVar = false - nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar} - return deployments.Delete(name, &nonOrphanOption) + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} + return deployments.Delete(name, deleteOptions) } // Use observedGeneration to determine if the deployment controller noticed the pause. @@ -459,9 +467,9 @@ func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Durati // Delete deployment at the end. // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. - var falseVar = false - nonOrphanOption := metav1.DeleteOptions{OrphanDependents: &falseVar} - return deployments.Delete(name, &nonOrphanOption) + policy := metav1.DeletePropagationBackground + deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} + return deployments.Delete(name, deleteOptions) } type updateDeploymentFunc func(d *extensions.Deployment) diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 084dce74d49..d0a88598652 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -549,8 +549,8 @@ func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationContro rc.Name = newName rc.ResourceVersion = "" // First delete the oldName RC and orphan its pods. - trueVar := true - err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{OrphanDependents: &trueVar}) + policy := metav1.DeletePropagationOrphan + err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &metav1.DeleteOptions{PropagationPolicy: &policy}) if err != nil && !errors.IsNotFound(err) { return err } From 54d8466c71a07047304b733c877e042bc48aa3fb Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 23 May 2018 10:59:38 +0800 Subject: [PATCH 128/307] add missing flag for kubeadm config images pull command --- cmd/kubeadm/app/cmd/config.go | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index 481a9b6c748..a06cc315d37 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -274,7 +274,7 @@ func uploadConfiguration(client clientset.Interface, cfgPath string, defaultcfg return uploadconfig.UploadConfiguration(internalcfg, client) } -// NewCmdConfigImages returns the "config images" command +// NewCmdConfigImages returns the "kubeadm config images" command func NewCmdConfigImages(out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "images", @@ -286,12 +286,13 @@ func NewCmdConfigImages(out io.Writer) *cobra.Command { return cmd } -// NewCmdConfigImagesPull returns the `config images pull` command +// NewCmdConfigImagesPull returns the `kubeadm config images pull` command func NewCmdConfigImagesPull() *cobra.Command { cfg := &kubeadmapiv1alpha2.MasterConfiguration{} kubeadmscheme.Scheme.Default(cfg) var cfgPath, featureGatesString string var err error + cmd := &cobra.Command{ Use: "pull", Short: "Pull images used by kubeadm.", @@ -306,7 +307,9 @@ func NewCmdConfigImagesPull() *cobra.Command { kubeadmutil.CheckErr(imagesPull.PullAll()) }, } - AddImagesCommonConfigFlags(cmd.PersistentFlags(), cfg, &featureGatesString) + AddImagesCommonConfigFlags(cmd.PersistentFlags(), cfg, &cfgPath, &featureGatesString) + AddImagesPullFlags(cmd.PersistentFlags(), cfg) + return cmd } @@ -316,7 +319,7 @@ type ImagesPull struct { images []string } -// NewImagesPull initializes and returns the `config images pull` command +// NewImagesPull initializes and returns the `kubeadm config images pull` command func NewImagesPull(puller images.Puller, images []string) *ImagesPull { return &ImagesPull{ puller: puller, @@ -353,8 +356,7 @@ func NewCmdConfigImagesList(out io.Writer) *cobra.Command { kubeadmutil.CheckErr(imagesList.Run(out)) }, } - AddImagesCommonConfigFlags(cmd.PersistentFlags(), cfg, &featureGatesString) - AddImagesListFlags(cmd.PersistentFlags(), &cfgPath) + AddImagesCommonConfigFlags(cmd.PersistentFlags(), cfg, &cfgPath, &featureGatesString) return cmd } @@ -387,21 +389,17 @@ func (i *ImagesList) Run(out io.Writer) error { } // AddImagesCommonConfigFlags adds the flags that configure kubeadm (and affect the images kubeadm will use) -func AddImagesCommonConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.MasterConfiguration, featureGatesString *string) { +func AddImagesCommonConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.MasterConfiguration, cfgPath *string, featureGatesString *string) { flagSet.StringVar( &cfg.KubernetesVersion, "kubernetes-version", cfg.KubernetesVersion, `Choose a specific Kubernetes version for the control plane.`, ) flagSet.StringVar(featureGatesString, "feature-gates", *featureGatesString, "A set of key=value pairs that describe feature gates for various features. "+ "Options are:\n"+strings.Join(features.KnownFeatures(&features.InitFeatureGates), "\n")) -} - -// AddImagesListFlags adds the flag that defines the location of the config file -func AddImagesListFlags(flagSet *flag.FlagSet, cfgPath *string) { flagSet.StringVar(cfgPath, "config", *cfgPath, "Path to kubeadm config file.") } -// AddImagesPullFlags adds flags related to the `config images pull` command -func AddImagesPullFlags(flagSet *flag.FlagSet, criSocketPath *string) { - flagSet.StringVar(criSocketPath, "cri-socket-path", *criSocketPath, "Path to the CRI socket.") +// AddImagesPullFlags adds flags related to the `kubeadm config images pull` command +func AddImagesPullFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.MasterConfiguration) { + flagSet.StringVar(&cfg.CRISocket, "cri-socket-path", cfg.CRISocket, "Path to the CRI socket.") } From be7f087ade7db66700736a0a072acb1bad6fab3f Mon Sep 17 00:00:00 2001 From: stewart-yu Date: Wed, 23 May 2018 14:44:46 +0800 Subject: [PATCH 129/307] do some code clean for cloud-controller manager --- cmd/cloud-controller-manager/app/options/options.go | 12 +++++++++--- .../app/options/options_test.go | 6 ++---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index 433d9935164..c29b0ee2f37 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -45,6 +45,11 @@ import ( "github.com/spf13/pflag" ) +const ( + // CloudControllerManagerUserAgent is the userAgent name when starting cloud-controller managers. + CloudControllerManagerUserAgent = "cloud-controller-manager" +) + // CloudControllerManagerOptions is the main context object for the controller manager. type CloudControllerManagerOptions struct { CloudProvider *cmoptions.CloudProviderOptions @@ -165,7 +170,7 @@ func (o *CloudControllerManagerOptions) ApplyTo(c *cloudcontrollerconfig.Config, } // sync back to component config - // TODO: find more elegant way than synching back the values. + // TODO: find more elegant way than syncing back the values. c.ComponentConfig.KubeCloudShared.Port = int32(o.InsecureServing.BindPort) c.ComponentConfig.KubeCloudShared.Address = o.InsecureServing.BindAddress.String() @@ -213,13 +218,13 @@ func (o *CloudControllerManagerOptions) Validate() error { } // Config return a cloud controller manager config objective -func (o CloudControllerManagerOptions) Config() (*cloudcontrollerconfig.Config, error) { +func (o *CloudControllerManagerOptions) Config() (*cloudcontrollerconfig.Config, error) { if err := o.Validate(); err != nil { return nil, err } c := &cloudcontrollerconfig.Config{} - if err := o.ApplyTo(c, "cloud-controller-manager"); err != nil { + if err := o.ApplyTo(c, CloudControllerManagerUserAgent); err != nil { return nil, err } @@ -230,5 +235,6 @@ func createRecorder(kubeClient kubernetes.Interface, userAgent string) record.Ev eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + // TODO: remove dependence on the legacyscheme return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) } diff --git a/cmd/cloud-controller-manager/app/options/options_test.go b/cmd/cloud-controller-manager/app/options/options_test.go index fe2f62f0b35..363731e6598 100644 --- a/cmd/cloud-controller-manager/app/options/options_test.go +++ b/cmd/cloud-controller-manager/app/options/options_test.go @@ -111,7 +111,6 @@ func TestAddFlags(t *testing.T) { "--contention-profiling=true", "--controller-start-interval=2m", "--http2-max-streams-per-connection=47", - "--min-resync-period=5m", "--kube-api-burst=100", "--kube-api-content-type=application/vnd.kubernetes.protobuf", "--kube-api-qps=50.0", @@ -122,13 +121,12 @@ func TestAddFlags(t *testing.T) { "--leader-elect-resource-lock=configmap", "--leader-elect-retry-period=5s", "--master=192.168.4.20", - "--min-resync-period=8h", + "--min-resync-period=100m", + "--node-status-update-frequency=10m", "--port=10000", "--profiling=false", - "--node-status-update-frequency=10m", "--route-reconciliation-period=30s", "--secure-port=10001", - "--min-resync-period=100m", "--use-service-account-credentials=false", } f.Parse(args) From 5f5e3a59c01258bb6fe7ef1deddbbe0643153bba Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Mon, 21 May 2018 14:31:33 -0400 Subject: [PATCH 130/307] move more CanBeExposed from factory_client_access --- pkg/kubectl/cmd/expose.go | 5 +- pkg/kubectl/cmd/util/BUILD | 1 - pkg/kubectl/cmd/util/factory.go | 2 - pkg/kubectl/cmd/util/factory_client_access.go | 12 ----- pkg/kubectl/cmd/util/factory_test.go | 28 ----------- pkg/kubectl/polymorphichelpers/BUILD | 2 + .../polymorphichelpers/canbeexposed.go | 38 ++++++++++++++ .../polymorphichelpers/canbeexposed_test.go | 50 +++++++++++++++++++ pkg/kubectl/polymorphichelpers/interface.go | 6 +++ 9 files changed, 98 insertions(+), 46 deletions(-) create mode 100644 pkg/kubectl/polymorphichelpers/canbeexposed.go create mode 100644 pkg/kubectl/polymorphichelpers/canbeexposed_test.go diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index e4e7e7490dd..0f00a5c679e 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/dynamic" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -92,7 +91,7 @@ type ExposeServiceOptions struct { EnforceNamespace bool Generators func(string) map[string]kubectl.Generator - CanBeExposed func(kind schema.GroupKind) error + CanBeExposed polymorphichelpers.CanBeExposedFunc ClientForMapping func(*meta.RESTMapping) (resource.RESTClient, error) MapBasedSelectorForObject func(runtime.Object) (string, error) PortsForObject polymorphichelpers.PortsForObjectFunc @@ -191,7 +190,7 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e o.Generators = f.Generators o.Builder = f.NewBuilder() - o.CanBeExposed = f.CanBeExposed + o.CanBeExposed = polymorphichelpers.CanBeExposedFn o.ClientForMapping = f.ClientForMapping o.MapBasedSelectorForObject = f.MapBasedSelectorForObject o.PortsForObject = polymorphichelpers.PortsForObjectFn diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 41cba673fa6..4b83c6451f6 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -16,7 +16,6 @@ go_library( visibility = ["//build/visible_to:pkg_kubectl_cmd_util_CONSUMERS"], deps = [ "//pkg/api/legacyscheme:go_default_library", - "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index bc549c455e1..d13bd99dc82 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -107,8 +107,6 @@ type ClientAccessFactory interface { DefaultNamespace() (string, bool, error) // Generators returns the generators for the provided command Generators(cmdName string) map[string]kubectl.Generator - // Check whether the kind of resources could be exposed - CanBeExposed(kind schema.GroupKind) error } // ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 1cffea783f3..e357da65c77 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -47,7 +47,6 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/apps" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -459,17 +458,6 @@ func (f *ring0Factory) Generators(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) } -func (f *ring0Factory) CanBeExposed(kind schema.GroupKind) error { - switch kind { - case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), - extensions.Kind("Deployment"), apps.Kind("Deployment"), extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"): - // nothing to do here - default: - return fmt.Errorf("cannot expose a %s", kind) - } - return nil -} - // this method exists to help us find the points still relying on internal types. func InternalVersionDecoder() runtime.Decoder { return legacyscheme.Codecs.UniversalDecoder() diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index 360091f46c7..910a8b2da67 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -21,7 +21,6 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" @@ -66,33 +65,6 @@ func TestProtocolsForObject(t *testing.T) { } } -func TestCanBeExposed(t *testing.T) { - factory := NewFactory(genericclioptions.NewTestConfigFlags()) - tests := []struct { - kind schema.GroupKind - expectErr bool - }{ - { - kind: api.Kind("ReplicationController"), - expectErr: false, - }, - { - kind: api.Kind("Node"), - expectErr: true, - }, - } - - for _, test := range tests { - err := factory.CanBeExposed(test.kind) - if test.expectErr && err == nil { - t.Error("unexpected non-error") - } - if !test.expectErr && err != nil { - t.Errorf("unexpected error: %v", err) - } - } -} - func TestMakePortsString(t *testing.T) { tests := []struct { ports []api.ServicePort diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index 9848dc98c24..747a974c5ef 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -5,6 +5,7 @@ go_library( srcs = [ "attachablepodforobject.go", "canbeautoscaled.go", + "canbeexposed.go", "helpers.go", "historyviewer.go", "interface.go", @@ -48,6 +49,7 @@ go_library( go_test( name = "go_default_test", srcs = [ + "canbeexposed_test.go", "helpers_test.go", "logsforobject_test.go", "portsforobject_test.go", diff --git a/pkg/kubectl/polymorphichelpers/canbeexposed.go b/pkg/kubectl/polymorphichelpers/canbeexposed.go new file mode 100644 index 00000000000..af4463fe999 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/canbeexposed.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/apps" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +// Check whether the kind of resources could be exposed +func canBeExposed(kind schema.GroupKind) error { + switch kind { + case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), + extensions.Kind("Deployment"), apps.Kind("Deployment"), extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"): + // nothing to do here + default: + return fmt.Errorf("cannot expose a %s", kind) + } + return nil +} diff --git a/pkg/kubectl/polymorphichelpers/canbeexposed_test.go b/pkg/kubectl/polymorphichelpers/canbeexposed_test.go new file mode 100644 index 00000000000..b9e8d492dc2 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/canbeexposed_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + api "k8s.io/kubernetes/pkg/apis/core" +) + +func TestCanBeExposed(t *testing.T) { + tests := []struct { + kind schema.GroupKind + expectErr bool + }{ + { + kind: api.Kind("ReplicationController"), + expectErr: false, + }, + { + kind: api.Kind("Node"), + expectErr: true, + }, + } + + for _, test := range tests { + err := canBeExposed(test.kind) + if test.expectErr && err == nil { + t.Error("unexpected non-error") + } + if !test.expectErr && err != nil { + t.Errorf("unexpected error: %v", err) + } + } +} diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index bf0c230ce27..0ff35cd5b68 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -71,3 +71,9 @@ type CanBeAutoscaledFunc func(kind schema.GroupKind) error // CanBeAutoscaledFn gives a way to easily override the function for unit testing if needed var CanBeAutoscaledFn CanBeAutoscaledFunc = canBeAutoscaled + +// CanBeExposedFunc is a function type that can tell you whether a given GroupKind is capable of being exposed +type CanBeExposedFunc func(kind schema.GroupKind) error + +// CanBeExposedFn gives a way to easily override the function for unit testing if needed +var CanBeExposedFn CanBeExposedFunc = canBeExposed From fb0794976ae30ad1446177dfa34031da24e3e525 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Mon, 21 May 2018 17:56:56 -0400 Subject: [PATCH 131/307] move Pauser and Resumer from the factory --- pkg/kubectl/cmd/rollout/rollout_pause.go | 7 +-- pkg/kubectl/cmd/rollout/rollout_resume.go | 7 +-- pkg/kubectl/cmd/set/helper.go | 12 ++--- pkg/kubectl/cmd/set/set_env.go | 54 ++++++++++++++++--- pkg/kubectl/cmd/set/set_image.go | 8 +-- pkg/kubectl/cmd/set/set_resources.go | 8 +-- pkg/kubectl/cmd/set/set_selector.go | 2 +- pkg/kubectl/cmd/set/set_serviceaccount.go | 8 +-- pkg/kubectl/cmd/set/set_subject.go | 6 +-- pkg/kubectl/cmd/util/factory.go | 9 ---- pkg/kubectl/cmd/util/factory_client_access.go | 27 ---------- pkg/kubectl/polymorphichelpers/BUILD | 4 ++ pkg/kubectl/polymorphichelpers/interface.go | 16 ++++++ .../polymorphichelpers/objectpauser.go | 46 ++++++++++++++++ .../polymorphichelpers/objectresumer.go | 38 +++++++++++++ 15 files changed, 182 insertions(+), 70 deletions(-) create mode 100644 pkg/kubectl/polymorphichelpers/objectpauser.go create mode 100644 pkg/kubectl/polymorphichelpers/objectresumer.go diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index 65888248cd9..9460e70397c 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -41,7 +42,7 @@ type PauseConfig struct { PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinter, error) - Pauser func(info *resource.Info) ([]byte, error) + Pauser polymorphichelpers.ObjectPauserFunc Infos []*resource.Info genericclioptions.IOStreams @@ -101,7 +102,7 @@ func (o *PauseConfig) CompletePause(f cmdutil.Factory, cmd *cobra.Command, args return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } - o.Pauser = f.Pauser + o.Pauser = polymorphichelpers.ObjectPauserFn cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { @@ -136,7 +137,7 @@ func (o *PauseConfig) CompletePause(f cmdutil.Factory, cmd *cobra.Command, args func (o PauseConfig) RunPause() error { allErrs := []error{} - for _, patch := range set.CalculatePatches(o.Infos, cmdutil.InternalVersionJSONEncoder(), o.Pauser) { + for _, patch := range set.CalculatePatches(o.Infos, cmdutil.InternalVersionJSONEncoder(), set.PatchFn(o.Pauser)) { info := patch.Info if patch.Err != nil { resourceString := info.Mapping.Resource.Resource diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index 16dde958d83..3208bc3c2d7 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) @@ -41,7 +42,7 @@ type ResumeConfig struct { PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinter, error) - Resumer func(object *resource.Info) ([]byte, error) + Resumer polymorphichelpers.ObjectResumerFunc Infos []*resource.Info genericclioptions.IOStreams @@ -99,7 +100,7 @@ func (o *ResumeConfig) CompleteResume(f cmdutil.Factory, cmd *cobra.Command, arg return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } - o.Resumer = f.Resumer + o.Resumer = polymorphichelpers.ObjectResumerFn cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { @@ -140,7 +141,7 @@ func (o *ResumeConfig) CompleteResume(f cmdutil.Factory, cmd *cobra.Command, arg func (o ResumeConfig) RunResume() error { allErrs := []error{} - for _, patch := range set.CalculatePatches(o.Infos, cmdutil.InternalVersionJSONEncoder(), o.Resumer) { + for _, patch := range set.CalculatePatches(o.Infos, cmdutil.InternalVersionJSONEncoder(), set.PatchFn(o.Resumer)) { info := patch.Info if patch.Err != nil { diff --git a/pkg/kubectl/cmd/set/helper.go b/pkg/kubectl/cmd/set/helper.go index 4d267c28da2..42b1bb8a9e6 100644 --- a/pkg/kubectl/cmd/set/helper.go +++ b/pkg/kubectl/cmd/set/helper.go @@ -118,16 +118,16 @@ type Patch struct { Patch []byte } -// patchFn is a function type that accepts an info object and returns a byte slice. -// Implementations of patchFn should update the object and return it encoded. -type patchFn func(*resource.Info) ([]byte, error) +// PatchFn is a function type that accepts an info object and returns a byte slice. +// Implementations of PatchFn should update the object and return it encoded. +type PatchFn func(runtime.Object) ([]byte, error) // CalculatePatch calls the mutation function on the provided info object, and generates a strategic merge patch for // the changes in the object. Encoder must be able to encode the info into the appropriate destination type. // This function returns whether the mutation function made any change in the original object. -func CalculatePatch(patch *Patch, encoder runtime.Encoder, mutateFn patchFn) bool { +func CalculatePatch(patch *Patch, encoder runtime.Encoder, mutateFn PatchFn) bool { patch.Before, patch.Err = runtime.Encode(encoder, patch.Info.Object) - patch.After, patch.Err = mutateFn(patch.Info) + patch.After, patch.Err = mutateFn(patch.Info.Object) if patch.Err != nil { return true } @@ -141,7 +141,7 @@ func CalculatePatch(patch *Patch, encoder runtime.Encoder, mutateFn patchFn) boo // CalculatePatches calculates patches on each provided info object. If the provided mutateFn // makes no change in an object, the object is not included in the final list of patches. -func CalculatePatches(infos []*resource.Info, encoder runtime.Encoder, mutateFn patchFn) []*Patch { +func CalculatePatches(infos []*resource.Info, encoder runtime.Encoder, mutateFn PatchFn) []*Patch { var patches []*Patch for _, info := range infos { patch := &Patch{Info: info} diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 87a83c7a10d..51d01e51294 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/cobra" "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -323,12 +324,53 @@ func (o *EnvOptions) RunEnv() error { if err != nil { return err } - patches := CalculatePatches(infos, scheme.DefaultJSONEncoder(), func(info *resource.Info) ([]byte, error) { - _, err := o.updatePodSpecForObject(info.Object, func(spec *v1.PodSpec) error { + patches := CalculatePatches(infos, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { + _, err := o.updatePodSpecForObject(obj, func(spec *v1.PodSpec) error { resolutionErrorsEncountered := false containers, _ := selectContainers(spec.Containers, o.ContainerSelector) + objName, err := meta.NewAccessor().Name(obj) + if err != nil { + return err + } + + gvks, _, err := scheme.Scheme.ObjectKinds(obj) + if err != nil { + return err + } + objKind := obj.GetObjectKind().GroupVersionKind().Kind + if len(objKind) == 0 { + for _, gvk := range gvks { + if len(gvk.Kind) == 0 { + continue + } + if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { + continue + } + + objKind = gvk.Kind + break + } + } + if len(containers) == 0 { - fmt.Fprintf(o.ErrOut, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, o.ContainerSelector) + if gvks, _, err := scheme.Scheme.ObjectKinds(obj); err == nil { + objKind := obj.GetObjectKind().GroupVersionKind().Kind + if len(objKind) == 0 { + for _, gvk := range gvks { + if len(gvk.Kind) == 0 { + continue + } + if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { + continue + } + + objKind = gvk.Kind + break + } + } + + fmt.Fprintf(o.ErrOut, "warning: %s/%s does not have any containers matching %q\n", objKind, objName, o.ContainerSelector) + } return nil } for _, c := range containers { @@ -343,7 +385,7 @@ func (o *EnvOptions) RunEnv() error { resolveErrors := map[string][]string{} store := envutil.NewResourceStore() - fmt.Fprintf(o.Out, "# %s %s, container %s\n", info.Mapping.Resource, info.Name, c.Name) + fmt.Fprintf(o.Out, "# %s %s, container %s\n", objKind, objName, c.Name) for _, env := range c.Env { // Print the simple value if env.ValueFrom == nil { @@ -357,7 +399,7 @@ func (o *EnvOptions) RunEnv() error { continue } - value, err := envutil.GetEnvVarRefValue(o.clientset, o.namespace, store, env.ValueFrom, info.Object, c) + value, err := envutil.GetEnvVarRefValue(o.clientset, o.namespace, store, env.ValueFrom, obj, c) // Print the resolved value if err == nil { fmt.Fprintf(o.Out, "%s=%s\n", env.Name, value) @@ -390,7 +432,7 @@ func (o *EnvOptions) RunEnv() error { }) if err == nil { - return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) + return runtime.Encode(scheme.DefaultJSONEncoder(), obj) } return nil, err }) diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 540e40bebc9..0e8251b944d 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -210,9 +210,9 @@ func (o *SetImageOptions) Validate() error { func (o *SetImageOptions) Run() error { allErrs := []error{} - patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(info *resource.Info) ([]byte, error) { + patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { transformed := false - _, err := o.UpdatePodSpecForObject(info.Object, func(spec *v1.PodSpec) error { + _, err := o.UpdatePodSpecForObject(obj, func(spec *v1.PodSpec) error { for name, image := range o.ContainerImages { var ( containerFound bool @@ -255,11 +255,11 @@ func (o *SetImageOptions) Run() error { return nil, nil } // record this change (for rollout history) - if err := o.Recorder.Record(info.Object); err != nil { + if err := o.Recorder.Record(obj); err != nil { glog.V(4).Infof("error recording current command: %v", err) } - return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) + return runtime.Encode(scheme.DefaultJSONEncoder(), obj) }) for _, patch := range patches { diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 1089b3f61f5..aa6b33a4fee 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -222,9 +222,9 @@ func (o *SetResourcesOptions) Validate() error { func (o *SetResourcesOptions) Run() error { allErrs := []error{} - patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(info *resource.Info) ([]byte, error) { + patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { transformed := false - _, err := o.UpdatePodSpecForObject(info.Object, func(spec *v1.PodSpec) error { + _, err := o.UpdatePodSpecForObject(obj, func(spec *v1.PodSpec) error { containers, _ := selectContainers(spec.Containers, o.ContainerSelector) if len(containers) != 0 { for i := range containers { @@ -255,11 +255,11 @@ func (o *SetResourcesOptions) Run() error { return nil, nil } // record this change (for rollout history) - if err := o.Recorder.Record(info.Object); err != nil { + if err := o.Recorder.Record(obj); err != nil { glog.V(4).Infof("error recording current command: %v", err) } - return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) + return runtime.Encode(scheme.DefaultJSONEncoder(), obj) }) for _, patch := range patches { diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index ce8cca2a16f..0a6d6200270 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -200,7 +200,7 @@ func (o *SetSelectorOptions) RunSelector() error { return r.Visit(func(info *resource.Info, err error) error { patch := &Patch{Info: info} - CalculatePatch(patch, scheme.DefaultJSONEncoder(), func(info *resource.Info) ([]byte, error) { + CalculatePatch(patch, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { selectErr := updateSelectorForObject(info.Object, *o.selector) if selectErr != nil { return nil, selectErr diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index e33e12ac3b7..ddf015ef10c 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -173,8 +173,8 @@ func (o *SetServiceAccountOptions) Complete(f cmdutil.Factory, cmd *cobra.Comman // Run creates and applies the patch either locally or calling apiserver. func (o *SetServiceAccountOptions) Run() error { patchErrs := []error{} - patchFn := func(info *resource.Info) ([]byte, error) { - _, err := o.updatePodSpecForObject(info.Object, func(podSpec *v1.PodSpec) error { + patchFn := func(obj runtime.Object) ([]byte, error) { + _, err := o.updatePodSpecForObject(obj, func(podSpec *v1.PodSpec) error { podSpec.ServiceAccountName = o.serviceAccountName return nil }) @@ -182,11 +182,11 @@ func (o *SetServiceAccountOptions) Run() error { return nil, err } // record this change (for rollout history) - if err := o.Recorder.Record(info.Object); err != nil { + if err := o.Recorder.Record(obj); err != nil { glog.V(4).Infof("error recording current command: %v", err) } - return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) + return runtime.Encode(scheme.DefaultJSONEncoder(), obj) } patches := CalculatePatches(o.infos, scheme.DefaultJSONEncoder(), patchFn) diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index 10fd4031e10..9a0cc736d5c 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -194,7 +194,7 @@ func (o *SubjectOptions) Validate() error { } func (o *SubjectOptions) Run(fn updateSubjects) error { - patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(info *resource.Info) ([]byte, error) { + patches := CalculatePatches(o.Infos, scheme.DefaultJSONEncoder(), func(obj runtime.Object) ([]byte, error) { subjects := []rbacv1.Subject{} for _, user := range sets.NewString(o.Users...).List() { subject := rbacv1.Subject{ @@ -227,10 +227,10 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { subjects = append(subjects, subject) } - transformed, err := updateSubjectForObject(info.Object, subjects, fn) + transformed, err := updateSubjectForObject(obj, subjects, fn) if transformed && err == nil { // TODO: switch UpdatePodSpecForObject to work on v1.PodSpec - return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) + return runtime.Encode(scheme.DefaultJSONEncoder(), obj) } return nil, err }) diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index d13bd99dc82..2ca941f9cab 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -92,15 +92,6 @@ type ClientAccessFactory interface { // SuggestedPodTemplateResources returns a list of resource types that declare a pod template SuggestedPodTemplateResources() []schema.GroupResource - // Pauser marks the object in the info as paused. Currently supported only for Deployments. - // Returns the patched object in bytes and any error that occurred during the encoding or - // in case the object is already paused. - Pauser(info *resource.Info) ([]byte, error) - // Resumer resumes a paused object inside the info. Currently supported only for Deployments. - // Returns the patched object in bytes and any error that occurred during the encoding or - // in case the object is already resumed. - Resumer(info *resource.Info) ([]byte, error) - // Returns the default namespace to use in cases where no // other namespace is specified and whether the namespace was // overridden. diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index e357da65c77..4aacd4dce95 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -19,7 +19,6 @@ limitations under the License. package util import ( - "errors" "fmt" "io" "os" @@ -218,32 +217,6 @@ func (f *ring0Factory) SuggestedPodTemplateResources() []schema.GroupResource { } } -func (f *ring0Factory) Pauser(info *resource.Info) ([]byte, error) { - switch obj := info.Object.(type) { - case *extensions.Deployment: - if obj.Spec.Paused { - return nil, errors.New("is already paused") - } - obj.Spec.Paused = true - return runtime.Encode(InternalVersionJSONEncoder(), info.Object) - default: - return nil, fmt.Errorf("pausing is not supported") - } -} - -func (f *ring0Factory) Resumer(info *resource.Info) ([]byte, error) { - switch obj := info.Object.(type) { - case *extensions.Deployment: - if !obj.Spec.Paused { - return nil, errors.New("is not paused") - } - obj.Spec.Paused = false - return runtime.Encode(InternalVersionJSONEncoder(), info.Object) - default: - return nil, fmt.Errorf("resuming is not supported") - } -} - func (f *ring0Factory) DefaultNamespace() (string, bool, error) { return f.clientGetter.ToRawKubeConfigLoader().Namespace() } diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index 747a974c5ef..2097c87d7c5 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -10,6 +10,8 @@ go_library( "historyviewer.go", "interface.go", "logsforobject.go", + "objectpauser.go", + "objectresumer.go", "portsforobject.go", "statusviewer.go", "updatepodspec.go", @@ -17,6 +19,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers", visibility = ["//visibility:public"], deps = [ + "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", @@ -37,6 +40,7 @@ go_library( "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index 0ff35cd5b68..02329a4b126 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -77,3 +77,19 @@ type CanBeExposedFunc func(kind schema.GroupKind) error // CanBeExposedFn gives a way to easily override the function for unit testing if needed var CanBeExposedFn CanBeExposedFunc = canBeExposed + +// ObjectPauserFunc is a function type that marks the object in a given info as paused. +type ObjectPauserFunc func(runtime.Object) ([]byte, error) + +// ObjectPauserFn gives a way to easily override the function for unit testing if needed. +// Returns the patched object in bytes and any error that occurred during the encoding or +// in case the object is already paused. +var ObjectPauserFn ObjectPauserFunc = defaultObjectPauser + +// ObjectResumerFunc is a function type that marks the object in a given info as resumed. +type ObjectResumerFunc func(runtime.Object) ([]byte, error) + +// ObjectResumerFn gives a way to easily override the function for unit testing if needed. +// Returns the patched object in bytes and any error that occurred during the encoding or +// in case the object is already resumed. +var ObjectResumerFn ObjectResumerFunc = defaultObjectResumer diff --git a/pkg/kubectl/polymorphichelpers/objectpauser.go b/pkg/kubectl/polymorphichelpers/objectpauser.go new file mode 100644 index 00000000000..f6fbde55893 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/objectpauser.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +// Currently only supports Deployments. +func defaultObjectPauser(obj runtime.Object) ([]byte, error) { + switch obj := obj.(type) { + case *extensions.Deployment: + if obj.Spec.Paused { + return nil, errors.New("is already paused") + } + obj.Spec.Paused = true + return runtime.Encode(internalVersionJSONEncoder(), obj) + default: + return nil, fmt.Errorf("pausing is not supported") + } +} + +func internalVersionJSONEncoder() runtime.Encoder { + encoder := legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...) + return unstructured.JSONFallbackEncoder{Encoder: encoder} +} diff --git a/pkg/kubectl/polymorphichelpers/objectresumer.go b/pkg/kubectl/polymorphichelpers/objectresumer.go new file mode 100644 index 00000000000..84d8dff91dc --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/objectresumer.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func defaultObjectResumer(obj runtime.Object) ([]byte, error) { + switch obj := obj.(type) { + case *extensions.Deployment: + if !obj.Spec.Paused { + return nil, errors.New("is not paused") + } + obj.Spec.Paused = false + return runtime.Encode(internalVersionJSONEncoder(), obj) + default: + return nil, fmt.Errorf("resuming is not supported") + } +} From b6db623799bc17d6d7b161ba5dcc6f82572b0ad6 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Tue, 22 May 2018 17:06:49 -0400 Subject: [PATCH 132/307] move rollbacker from the factory --- pkg/kubectl/cmd/rollout/rollout_undo.go | 3 +- pkg/kubectl/cmd/util/factory.go | 3 -- .../cmd/util/factory_object_mapping.go | 9 ----- pkg/kubectl/polymorphichelpers/BUILD | 1 + pkg/kubectl/polymorphichelpers/interface.go | 6 +++ pkg/kubectl/polymorphichelpers/rollbacker.go | 38 +++++++++++++++++++ 6 files changed, 47 insertions(+), 13 deletions(-) create mode 100644 pkg/kubectl/polymorphichelpers/rollbacker.go diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index 6d5f4e28b09..46a66a0dd7b 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -20,6 +20,7 @@ import ( "io" "github.com/spf13/cobra" + "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/api/legacyscheme" @@ -139,7 +140,7 @@ func (o *UndoOptions) CompleteUndo(f cmdutil.Factory, cmd *cobra.Command, out io if err != nil { return err } - rollbacker, err := f.Rollbacker(info.ResourceMapping()) + rollbacker, err := polymorphichelpers.RollbackerFn(f, info.ResourceMapping()) if err != nil { return err } diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 2ca941f9cab..dfb486436d7 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -111,9 +111,6 @@ type ObjectMappingFactory interface { // Returns a Describer for displaying the specified RESTMapping type or an error. Describer(mapping *meta.RESTMapping) (printers.Describer, error) - // Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error - Rollbacker(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) - // Returns a schema that can validate objects stored on disk. Validator(validate bool) (validation.Schema, error) // OpenAPISchema returns the schema openapi schema definition diff --git a/pkg/kubectl/cmd/util/factory_object_mapping.go b/pkg/kubectl/cmd/util/factory_object_mapping.go index 496f6d10e6d..aa7105a1bd0 100644 --- a/pkg/kubectl/cmd/util/factory_object_mapping.go +++ b/pkg/kubectl/cmd/util/factory_object_mapping.go @@ -26,7 +26,6 @@ import ( "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" @@ -132,14 +131,6 @@ func genericDescriber(clientAccessFactory ClientAccessFactory, mapping *meta.RES return printersinternal.GenericDescriberFor(mapping, dynamicClient, eventsClient), nil } -func (f *ring1Factory) Rollbacker(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { - external, err := f.clientAccessFactory.KubernetesClientSet() - if err != nil { - return nil, err - } - return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), external) -} - func (f *ring1Factory) Validator(validate bool) (validation.Schema, error) { if !validate { return validation.NullSchema{}, nil diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index 2097c87d7c5..b50030a3c86 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -13,6 +13,7 @@ go_library( "objectpauser.go", "objectresumer.go", "portsforobject.go", + "rollbacker.go", "statusviewer.go", "updatepodspec.go", ], diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index 02329a4b126..e2ecab3c20f 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -93,3 +93,9 @@ type ObjectResumerFunc func(runtime.Object) ([]byte, error) // Returns the patched object in bytes and any error that occurred during the encoding or // in case the object is already resumed. var ObjectResumerFn ObjectResumerFunc = defaultObjectResumer + +// RollbackerFunc gives a way to change the rollback version of the specified RESTMapping type +type RollbackerFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (kubectl.Rollbacker, error) + +// RollbackerFn gives a way to easily override the function for unit testing if needed +var RollbackerFn RollbackerFunc = rollbacker diff --git a/pkg/kubectl/polymorphichelpers/rollbacker.go b/pkg/kubectl/polymorphichelpers/rollbacker.go new file mode 100644 index 00000000000..f57f475c6b7 --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/rollbacker.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" +) + +// Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error +func rollbacker(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + external, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), external) +} From 9d6e7254a19bbff622c8d646b6a5306cc09fcb98 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 23 May 2018 17:23:59 +0200 Subject: [PATCH 133/307] apiextensions: extract orthortogonal behaviour from nopConverter This is preparation for adding more CR converters. --- .../pkg/apiserver/conversion/converter.go | 54 ++++++++++++++++++- .../pkg/apiserver/conversion/nop_converter.go | 37 +++---------- 2 files changed, 60 insertions(+), 31 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go index ae0776fae59..69f5340a9ff 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go @@ -20,6 +20,7 @@ import ( "fmt" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -33,13 +34,62 @@ func NewCRDConverter(crd *apiextensions.CustomResourceDefinition) (safe, unsafe // The only converter right now is nopConverter. More converters will be returned based on the // CRD object when they introduced. - unsafe = &nopConverter{ + unsafe = &crdConverter{ clusterScoped: crd.Spec.Scope == apiextensions.ClusterScoped, - validVersions: validVersions, + delegate: &nopConverter{ + validVersions: validVersions, + }, } return &safeConverterWrapper{unsafe}, unsafe } +var _ runtime.ObjectConvertor = &crdConverter{} + +// crdConverter extends the delegate with generic CRD conversion behaviour. The delegate will implement the +// user defined conversion strategy given in the CustomResourceDefinition. +type crdConverter struct { + delegate runtime.ObjectConvertor + clusterScoped bool +} + +func (c *crdConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + // We currently only support metadata.namespace and metadata.name. + switch { + case label == "metadata.name": + return label, value, nil + case !c.clusterScoped && label == "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} + +func (c *crdConverter) Convert(in, out, context interface{}) error { + return c.delegate.Convert(in, out, context) +} + +// ConvertToVersion converts in object to the given gvk in place and returns the same `in` object. +func (c *crdConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + // Run the converter on the list items instead of list itself + if list, ok := in.(*unstructured.UnstructuredList); ok { + for i := range list.Items { + obj, err := c.delegate.ConvertToVersion(&list.Items[i], target) + if err != nil { + return nil, err + } + + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("output type %T in not valid for unstructured conversion", obj) + } + list.Items[i] = *u + } + return list, nil + } + + return c.delegate.ConvertToVersion(in, target) +} + // safeConverterWrapper is a wrapper over an unsafe object converter that makes copy of the input and then delegate to the unsafe converter. type safeConverterWrapper struct { unsafe runtime.ObjectConvertor diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go index 3a98f5c6c0f..716930bfbe1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/nop_converter.go @@ -17,6 +17,7 @@ limitations under the License. package conversion import ( + "errors" "fmt" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -24,24 +25,15 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// nopConverter is a converter that only sets the apiVersion fields, but does not real conversion. It supports fields selectors. +// nopConverter is a converter that only sets the apiVersion fields, but does not real conversion. type nopConverter struct { - clusterScoped bool validVersions map[schema.GroupVersion]bool } var _ runtime.ObjectConvertor = &nopConverter{} -func (c *nopConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - // We currently only support metadata.namespace and metadata.name. - switch { - case label == "metadata.name": - return label, value, nil - case !c.clusterScoped && label == "metadata.namespace": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } +func (nopConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") } func (c *nopConverter) Convert(in, out, context interface{}) error { @@ -72,29 +64,16 @@ func (c *nopConverter) Convert(in, out, context interface{}) error { return nil } -func (c *nopConverter) convertToVersion(in runtime.Object, target runtime.GroupVersioner) error { +func (c *nopConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { kind := in.GetObjectKind().GroupVersionKind() gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) if !ok { // TODO: should this be a typed error? - return fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) } if !c.validVersions[gvk.GroupVersion()] { - return fmt.Errorf("request to convert CRD to an invalid group/version: %s", gvk.String()) + return nil, fmt.Errorf("request to convert CRD to an invalid group/version: %s", gvk.String()) } in.GetObjectKind().SetGroupVersionKind(gvk) - return nil -} - -// ConvertToVersion converts in object to the given gvk in place and returns the same `in` object. -func (c *nopConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { - var err error - // Run the converter on the list items instead of list itself - if list, ok := in.(*unstructured.UnstructuredList); ok { - err = list.EachListItem(func(item runtime.Object) error { - return c.convertToVersion(item, target) - }) - } - err = c.convertToVersion(in, target) - return in, err + return in, nil } From bf48d39f390ba616c4ba55a124664d354f9d1228 Mon Sep 17 00:00:00 2001 From: hui luo Date: Tue, 22 May 2018 08:22:03 -0700 Subject: [PATCH 134/307] add test: verify kubelet.config.Restore only happen once --- pkg/kubelet/config/config_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index f41542a9a6c..9ebf5a66044 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -451,4 +451,10 @@ func TestPodRestore(t *testing.T) { t.Fatalf("Restore returned error: %v", err) } expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.RESTORE, kubetypes.ApiserverSource, pod)) + + // Verify Restore only happen once + if err := config.Restore(tmpDir, channel); err != nil { + t.Fatalf("The second restore returned error: %v", err) + } + expectNoPodUpdate(t, ch) } From 54970ea1cc9871fdd0965d8217b2ddf79096d2c7 Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Wed, 23 May 2018 11:07:35 -0700 Subject: [PATCH 135/307] Add clarification for GA in Version Priority sorting --- .../apiextensions-apiserver/pkg/apis/apiextensions/types.go | 5 +++-- .../pkg/apis/apiextensions/v1beta1/types.go | 5 +++-- .../k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go | 5 +++-- .../kube-aggregator/pkg/apis/apiregistration/v1/types.go | 5 +++-- .../pkg/apis/apiregistration/v1beta1/types.go | 5 +++-- 5 files changed, 15 insertions(+), 10 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index d74387ae770..debe74a5b0c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -45,8 +45,9 @@ type CustomResourceDefinitionSpec struct { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. Versions []CustomResourceDefinitionVersion } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index 97062fd2165..9d8d1cd80d1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -48,8 +48,9 @@ type CustomResourceDefinitionSpec struct { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go index 36bbd6243ca..3f042211606 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go @@ -71,8 +71,9 @@ type APIServiceSpec struct { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. VersionPriority int32 } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go index 0f746657d8a..ffaec409cb2 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go @@ -71,8 +71,9 @@ type APIServiceSpec struct { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. VersionPriority int32 `json:"versionPriority" protobuf:"varint,8,opt,name=versionPriority"` // leaving this here so everyone remembers why proto index 6 is skipped diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go index 26e408446d9..0d4ba49effe 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go @@ -71,8 +71,9 @@ type APIServiceSpec struct { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. VersionPriority int32 `json:"versionPriority" protobuf:"varint,8,opt,name=versionPriority"` // leaving this here so everyone remembers why proto index 6 is skipped From e73475311ce50649a7fff9698ce24890e705a731 Mon Sep 17 00:00:00 2001 From: Mehdy Bohlool Date: Wed, 23 May 2018 11:12:50 -0700 Subject: [PATCH 136/307] Update generated files --- api/openapi-spec/swagger.json | 6 +++--- .../pkg/apis/apiextensions/v1beta1/generated.proto | 5 +++-- .../pkg/apis/apiregistration/v1/generated.proto | 5 +++-- .../pkg/apis/apiregistration/v1beta1/generated.proto | 5 +++-- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 2d5e4fd43fb..75b05b807a5 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -84972,7 +84972,7 @@ "type": "string" }, "versions": { - "description": "Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha, and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "description": "Versions is the list of all supported versions for this resource. If Version field is provided, this field is optional. Validation: All versions must use the same validation schema for now. i.e., top level Validation field is applied to all of these versions. Order: The version name will be used to compute the order. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", "type": "array", "items": { "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionVersion" @@ -86405,7 +86405,7 @@ "type": "string" }, "versionPriority": { - "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha, and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", "type": "integer", "format": "int32" } @@ -86550,7 +86550,7 @@ "type": "string" }, "versionPriority": { - "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha, and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", "type": "integer", "format": "int32" } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 6e7f06a7f12..0d494317838 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -128,8 +128,9 @@ message CustomResourceDefinitionSpec { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing + // major version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. repeated CustomResourceDefinitionVersion versions = 7; } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto index e983874efd2..c699fb4ada3 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto @@ -107,8 +107,9 @@ message APIServiceSpec { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. optional int32 versionPriority = 8; } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index 77fba8effb0..88a746296da 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -107,8 +107,9 @@ message APIServiceSpec { // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first - // by GA > beta > alpha, and then by comparing major version, then minor version. An example sorted list of - // versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. optional int32 versionPriority = 8; } From 099e60b1db2cf4c9ca59187c4085c00d6b78549b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 23 May 2018 21:13:32 +0300 Subject: [PATCH 137/307] kubeadm: Refactor the .Etcd substruct in the v1alpha2 API --- cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 18 +++-- cmd/kubeadm/app/apis/kubeadm/types.go | 61 ++++++++++++----- .../app/apis/kubeadm/v1alpha1/conversion.go | 38 +++++++++++ .../app/apis/kubeadm/v1alpha2/defaults.go | 17 +++-- .../app/apis/kubeadm/v1alpha2/types.go | 49 ++++++++++---- .../app/apis/kubeadm/validation/validation.go | 66 ++++++++++++++++++- .../kubeadm/validation/validation_test.go | 10 +++ cmd/kubeadm/app/cmd/config_test.go | 4 +- cmd/kubeadm/app/cmd/init.go | 11 +++- cmd/kubeadm/app/cmd/upgrade/common_test.go | 31 +++++---- cmd/kubeadm/app/cmd/upgrade/plan.go | 10 +-- cmd/kubeadm/app/images/images.go | 4 +- cmd/kubeadm/app/phases/certs/certs.go | 3 +- cmd/kubeadm/app/phases/certs/certs_test.go | 23 +++++-- cmd/kubeadm/app/phases/certs/doc.go | 4 +- .../app/phases/certs/pkiutil/pki_helpers.go | 8 ++- .../phases/certs/pkiutil/pki_helpers_test.go | 24 ++++--- .../app/phases/controlplane/manifests.go | 25 ++----- .../app/phases/controlplane/manifests_test.go | 65 +++++------------- .../app/phases/controlplane/volumes.go | 6 +- .../app/phases/controlplane/volumes_test.go | 12 ++-- cmd/kubeadm/app/phases/etcd/local.go | 10 +-- cmd/kubeadm/app/phases/etcd/local_test.go | 26 ++++++-- cmd/kubeadm/app/phases/upgrade/staticpods.go | 16 ++--- .../app/phases/upgrade/staticpods_test.go | 12 +--- cmd/kubeadm/app/preflight/checks.go | 45 +++++++------ cmd/kubeadm/app/preflight/checks_test.go | 20 +++--- .../testdata/conversion/master/internal.yaml | 16 ++--- .../testdata/conversion/master/v1alpha2.yaml | 9 +-- .../testdata/defaulting/master/defaulted.yaml | 9 +-- cmd/kubeadm/app/util/staticpod/utils.go | 4 +- cmd/kubeadm/app/util/staticpod/utils_test.go | 18 +++-- 32 files changed, 429 insertions(+), 245 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index 3bd46500aa1..13416b31e9e 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -19,7 +19,7 @@ package fuzzer import ( "time" - "github.com/google/gofuzz" + fuzz "github.com/google/gofuzz" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" @@ -41,15 +41,12 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.Networking.DNSDomain = "foo" obj.CertificatesDir = "foo" obj.APIServerCertSANs = []string{"foo"} - obj.Etcd.ServerCertSANs = []string{"foo"} - obj.Etcd.PeerCertSANs = []string{"foo"} + obj.Token = "foo" obj.CRISocket = "foo" obj.TokenTTL = &metav1.Duration{Duration: 1 * time.Hour} obj.TokenUsages = []string{"foo"} obj.TokenGroups = []string{"foo"} - obj.Etcd.Image = "foo" - obj.Etcd.DataDir = "foo" obj.ImageRepository = "foo" obj.CIImageRepository = "" obj.UnifiedControlPlaneImage = "foo" @@ -62,7 +59,16 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { MountPath: "foo", Writable: false, }} - obj.Etcd.ExtraArgs = map[string]string{"foo": "foo"} + obj.Etcd.Local = &kubeadm.LocalEtcd{ + Image: "foo", + DataDir: "foo", + ServerCertSANs: []string{"foo"}, + PeerCertSANs: []string{"foo"}, + ExtraArgs: map[string]string{"foo": "foo"}, + } + // Note: We don't set values here for obj.Etcd.External, as these are mutually exlusive. + // And to make sure the fuzzer doesn't set a random value for obj.Etcd.External, we let + // kubeadmapi.Etcd implement fuzz.Interface (we handle that ourselves) obj.KubeletConfiguration = kubeadm.KubeletConfiguration{ BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{ StaticPodPath: "foo", diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index 17cbb15a67d..ea062c62184 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -17,6 +17,8 @@ limitations under the License. package kubeadm import ( + fuzz "github.com/google/gofuzz" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfigv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" @@ -160,6 +162,49 @@ type Networking struct { // Etcd contains elements describing Etcd configuration. type Etcd struct { + + // Local provides configuration knobs for configuring the local etcd instance + // Local and External are mutually exclusive + Local *LocalEtcd + + // External describes how to connect to an external etcd cluster + // Local and External are mutually exclusive + External *ExternalEtcd +} + +// Fuzz is a dummy function here to get the roundtrip tests working in cmd/kubeadm/app/apis/kubeadm/fuzzer working. +// As we split the monolith-etcd struct into two smaller pieces with pointers and they are mutually exclusive, roundtrip +// tests that randomize all values in this struct isn't feasible. Instead, we override the fuzzing function for .Etcd with +// this func by letting Etcd implement the fuzz.Interface interface. As this func does nothing, we rely on the values given +// in fuzzer/fuzzer.go for the roundtrip tests, which is exactly what we want. +// TODO: Remove this function when we remove the v1alpha1 API +func (e Etcd) Fuzz(c fuzz.Continue) {} + +// LocalEtcd describes that kubeadm should run an etcd cluster locally +type LocalEtcd struct { + + // Image specifies which container image to use for running etcd. + // If empty, automatically populated by kubeadm using the image + // repository and default etcd version. + Image string + + // DataDir is the directory etcd will place its data. + // Defaults to "/var/lib/etcd". + DataDir string + + // ExtraArgs are extra arguments provided to the etcd binary + // when run inside a static pod. + ExtraArgs map[string]string + + // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. + ServerCertSANs []string + // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. + PeerCertSANs []string +} + +// ExternalEtcd describes an external etcd cluster +type ExternalEtcd struct { + // Endpoints of etcd members. Useful for using external etcd. // If not provided, kubeadm will run etcd in a static pod. Endpoints []string @@ -169,22 +214,6 @@ type Etcd struct { CertFile string // KeyFile is an SSL key file used to secure etcd communication. KeyFile string - // DataDir is the directory etcd will place its data. - // Defaults to "/var/lib/etcd". - DataDir string - // ExtraArgs are extra arguments provided to the etcd binary - // when run inside a static pod. - ExtraArgs map[string]string - // Image specifies which container image to use for running etcd. - // If empty, automatically populated by kubeadm using the image - // repository and default etcd version. - Image string - // ServerCertSANs sets extra Subject Alternative Names for the etcd server - // signing cert. This is currently used for the etcd static-pod. - ServerCertSANs []string - // PeerCertSANs sets extra Subject Alternative Names for the etcd peer - // signing cert. This is currently used for the etcd static-pod. - PeerCertSANs []string } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go index dcbb59a19d6..d5492c3333a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go @@ -30,6 +30,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error { err := scheme.AddConversionFuncs( Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration, Convert_v1alpha1_Etcd_To_kubeadm_Etcd, + Convert_kubeadm_Etcd_To_v1alpha1_Etcd, ) if err != nil { return err @@ -56,10 +57,47 @@ func Convert_v1alpha1_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conver return err } + // The .Etcd schema changed between v1alpha1 and v1alpha2 API types. The change was to basically only split up the fields into two sub-structs, which can be seen here + if len(in.Endpoints) != 0 { + out.External = &kubeadm.ExternalEtcd{ + Endpoints: in.Endpoints, + CAFile: in.CAFile, + CertFile: in.CertFile, + KeyFile: in.KeyFile, + } + } else { + out.Local = &kubeadm.LocalEtcd{ + Image: in.Image, + DataDir: in.DataDir, + ExtraArgs: in.ExtraArgs, + ServerCertSANs: in.ServerCertSANs, + PeerCertSANs: in.PeerCertSANs, + } + } + // No need to transfer information about .Etcd.Selfhosted to v1alpha2 return nil } +// no-op, as we don't support converting from newer API to old alpha API +func Convert_kubeadm_Etcd_To_v1alpha1_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { + + if in.External != nil { + out.Endpoints = in.External.Endpoints + out.CAFile = in.External.CAFile + out.CertFile = in.External.CertFile + out.KeyFile = in.External.KeyFile + } else { + out.Image = in.Local.Image + out.DataDir = in.Local.DataDir + out.ExtraArgs = in.Local.ExtraArgs + out.ServerCertSANs = in.Local.ServerCertSANs + out.PeerCertSANs = in.Local.PeerCertSANs + } + + return nil +} + // UpgradeCloudProvider handles the removal of .CloudProvider as smoothly as possible func UpgradeCloudProvider(in *MasterConfiguration, out *kubeadm.MasterConfiguration) { if len(in.CloudProvider) != 0 { diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go index 946d3f2e6e8..6ada8fc6146 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go @@ -119,19 +119,28 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { obj.ImageRepository = DefaultImageRepository } - if obj.Etcd.DataDir == "" { - obj.Etcd.DataDir = DefaultEtcdDataDir - } - if obj.ClusterName == "" { obj.ClusterName = DefaultClusterName } SetDefaults_KubeletConfiguration(obj) + SetDefaults_Etcd(obj) SetDefaults_ProxyConfiguration(obj) SetDefaults_AuditPolicyConfiguration(obj) } +// SetDefaults_Etcd assigns default values for the Proxy +func SetDefaults_Etcd(obj *MasterConfiguration) { + if obj.Etcd.External == nil && obj.Etcd.Local == nil { + obj.Etcd.Local = &LocalEtcd{} + } + if obj.Etcd.Local != nil { + if obj.Etcd.Local.DataDir == "" { + obj.Etcd.Local.DataDir = DefaultEtcdDataDir + } + } +} + // SetDefaults_ProxyConfiguration assigns default values for the Proxy func SetDefaults_ProxyConfiguration(obj *MasterConfiguration) { if obj.KubeProxy.Config == nil { diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go index 289f1a70afe..dd48f2b9277 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go @@ -153,6 +153,41 @@ type Networking struct { // Etcd contains elements describing Etcd configuration. type Etcd struct { + + // Local provides configuration knobs for configuring the local etcd instance + // Local and External are mutually exclusive + Local *LocalEtcd `json:"local,omitempty"` + + // External describes how to connect to an external etcd cluster + // Local and External are mutually exclusive + External *ExternalEtcd `json:"external,omitempty"` +} + +// LocalEtcd describes that kubeadm should run an etcd cluster locally +type LocalEtcd struct { + + // Image specifies which container image to use for running etcd. + // If empty, automatically populated by kubeadm using the image + // repository and default etcd version. + Image string `json:"image"` + + // DataDir is the directory etcd will place its data. + // Defaults to "/var/lib/etcd". + DataDir string `json:"dataDir"` + + // ExtraArgs are extra arguments provided to the etcd binary + // when run inside a static pod. + ExtraArgs map[string]string `json:"extraArgs,omitempty"` + + // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. + ServerCertSANs []string `json:"serverCertSANs,omitempty"` + // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. + PeerCertSANs []string `json:"peerCertSANs,omitempty"` +} + +// ExternalEtcd describes an external etcd cluster +type ExternalEtcd struct { + // Endpoints of etcd members. Useful for using external etcd. // If not provided, kubeadm will run etcd in a static pod. Endpoints []string `json:"endpoints"` @@ -162,20 +197,6 @@ type Etcd struct { CertFile string `json:"certFile"` // KeyFile is an SSL key file used to secure etcd communication. KeyFile string `json:"keyFile"` - // DataDir is the directory etcd will place its data. - // Defaults to "/var/lib/etcd". - DataDir string `json:"dataDir"` - // ExtraArgs are extra arguments provided to the etcd binary - // when run inside a static pod. - ExtraArgs map[string]string `json:"extraArgs,omitempty"` - // Image specifies which container image to use for running etcd. - // If empty, automatically populated by kubeadm using the image - // repository and default etcd version. - Image string `json:"image"` - // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. - ServerCertSANs []string `json:"serverCertSANs,omitempty"` - // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. - PeerCertSANs []string `json:"peerCertSANs,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index a038a723591..116bfce6c8d 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -53,8 +53,6 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNetworking(&c.Networking, field.NewPath("networking"))...) allErrs = append(allErrs, ValidateCertSANs(c.APIServerCertSANs, field.NewPath("apiServerCertSANs"))...) - allErrs = append(allErrs, ValidateCertSANs(c.Etcd.ServerCertSANs, field.NewPath("etcd").Child("serverCertSANs"))...) - allErrs = append(allErrs, ValidateCertSANs(c.Etcd.PeerCertSANs, field.NewPath("etcd").Child("peerCertSANs"))...) allErrs = append(allErrs, ValidateAbsolutePath(c.CertificatesDir, field.NewPath("certificatesDir"))...) allErrs = append(allErrs, ValidateNodeName(c.NodeName, field.NewPath("nodeName"))...) allErrs = append(allErrs, ValidateToken(c.Token, field.NewPath("token"))...) @@ -63,6 +61,7 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs = append(allErrs, ValidateFeatureGates(c.FeatureGates, field.NewPath("featureGates"))...) allErrs = append(allErrs, ValidateAPIEndpoint(&c.API, field.NewPath("api"))...) allErrs = append(allErrs, ValidateProxy(c.KubeProxy.Config, field.NewPath("kubeProxy").Child("config"))...) + allErrs = append(allErrs, ValidateEtcd(&c.Etcd, field.NewPath("etcd"))...) if features.Enabled(c.FeatureGates, features.DynamicKubeletConfig) { allErrs = append(allErrs, ValidateKubeletConfiguration(&c.KubeletConfiguration, field.NewPath("kubeletConfiguration"))...) } @@ -222,6 +221,54 @@ func ValidateTokenUsages(usages []string, fldPath *field.Path) field.ErrorList { return allErrs } +// ValidateEtcd validates the .Etcd sub-struct. +func ValidateEtcd(e *kubeadm.Etcd, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + localPath := fldPath.Child("local") + externalPath := fldPath.Child("external") + + if e.Local == nil && e.External == nil { + allErrs = append(allErrs, field.Invalid(fldPath, "", "either .Etcd.Local or .Etcd.External is required")) + return allErrs + } + if e.Local != nil && e.External != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "", ".Etcd.Local and .Etcd.External are mutually exclusive")) + return allErrs + } + if e.Local != nil { + allErrs = append(allErrs, ValidateAbsolutePath(e.Local.DataDir, localPath.Child("dataDir"))...) + allErrs = append(allErrs, ValidateCertSANs(e.Local.ServerCertSANs, localPath.Child("serverCertSANs"))...) + allErrs = append(allErrs, ValidateCertSANs(e.Local.PeerCertSANs, localPath.Child("peerCertSANs"))...) + } + if e.External != nil { + requireHTTPS := true + // Only allow the http scheme if no certs/keys are passed + if e.External.CAFile == "" && e.External.CertFile == "" && e.External.KeyFile == "" { + requireHTTPS = false + } + // Require either none or both of the cert/key pair + if (e.External.CertFile == "" && e.External.KeyFile != "") || (e.External.CertFile != "" && e.External.KeyFile == "") { + allErrs = append(allErrs, field.Invalid(externalPath, "", "either both or none of .Etcd.External.CertFile and .Etcd.External.KeyFile must be set")) + } + // If the cert and key are specified, require the VA as well + if e.External.CertFile != "" && e.External.KeyFile != "" && e.External.CAFile == "" { + allErrs = append(allErrs, field.Invalid(externalPath, "", "setting .Etcd.External.CertFile and .Etcd.External.KeyFile requires .Etcd.External.CAFile")) + } + + allErrs = append(allErrs, ValidateURLs(e.External.Endpoints, requireHTTPS, externalPath.Child("endpoints"))...) + if e.External.CAFile != "" { + allErrs = append(allErrs, ValidateAbsolutePath(e.External.CAFile, externalPath.Child("caFile"))...) + } + if e.External.CertFile != "" { + allErrs = append(allErrs, ValidateAbsolutePath(e.External.CertFile, externalPath.Child("certFile"))...) + } + if e.External.KeyFile != "" { + allErrs = append(allErrs, ValidateAbsolutePath(e.External.KeyFile, externalPath.Child("keyFile"))...) + } + } + return allErrs +} + // ValidateCertSANs validates alternative names func ValidateCertSANs(altnames []string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -233,6 +280,21 @@ func ValidateCertSANs(altnames []string, fldPath *field.Path) field.ErrorList { return allErrs } +// ValidateURLs validates the URLs given in the string slice, makes sure they are parseable. Optionally, it can enforcs HTTPS usage. +func ValidateURLs(urls []string, requireHTTPS bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, urlstr := range urls { + u, err := url.Parse(urlstr) + if err != nil || u.Scheme == "" { + allErrs = append(allErrs, field.Invalid(fldPath, urlstr, "not a valid URL")) + } + if requireHTTPS && u.Scheme != "https" { + allErrs = append(allErrs, field.Invalid(fldPath, urlstr, "the URL must be using the HTTPS scheme")) + } + } + return allErrs +} + // ValidateIPFromString validates ip address func ValidateIPFromString(ipaddr string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index 8c51a354000..b0dd24a683b 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -457,6 +457,11 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", BindPort: 6443, }, + Etcd: kubeadm.Etcd{ + Local: &kubeadm.LocalEtcd{ + DataDir: "/some/path", + }, + }, KubeProxy: kubeadm.KubeProxy{ Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ BindAddress: "192.168.59.103", @@ -498,6 +503,11 @@ func TestValidateMasterConfiguration(t *testing.T) { AdvertiseAddress: "1:2:3::4", BindPort: 3446, }, + Etcd: kubeadm.Etcd{ + Local: &kubeadm.LocalEtcd{ + DataDir: "/some/path", + }, + }, KubeProxy: kubeadm.KubeProxy{ Config: &kubeproxyconfigv1alpha1.KubeProxyConfiguration{ BindAddress: "192.168.59.103", diff --git a/cmd/kubeadm/app/cmd/config_test.go b/cmd/kubeadm/app/cmd/config_test.go index f68b504af92..622d6983e26 100644 --- a/cmd/kubeadm/app/cmd/config_test.go +++ b/cmd/kubeadm/app/cmd/config_test.go @@ -135,7 +135,9 @@ func TestConfigImagesListRunWithoutPath(t *testing.T) { name: "external etcd configuration", cfg: kubeadmapiv1alpha2.MasterConfiguration{ Etcd: kubeadmapiv1alpha2.Etcd{ - Endpoints: []string{"hi"}, + External: &kubeadmapiv1alpha2.ExternalEtcd{ + Endpoints: []string{"https://some.etcd.com:2379"}, + }, }, }, expectedImages: defaultNumberOfImages - 1, diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 7c03b5b4c49..e763d8fd7c0 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -95,7 +95,7 @@ var ( - {{ .APIServerImage }} - {{ .ControllerManagerImage }} - {{ .SchedulerImage }} - - {{ .EtcdImage }} (only if no external etcd endpoints are configured) +{{ .EtcdImage }} - You can check or miligate this in beforehand with "kubeadm config images pull" to make sure the images are downloaded locally and cached. @@ -338,7 +338,7 @@ func (i *Init) Run(out io.Writer) error { return fmt.Errorf("error creating init static pod manifest files: %v", err) } // Add etcd static pod spec only if external etcd is not configured - if len(i.cfg.Etcd.Endpoints) == 0 { + if i.cfg.Etcd.External == nil { glog.V(1).Infof("[init] no external etcd found. Creating manifest for local etcd static pod") if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(manifestDir, i.cfg); err != nil { return fmt.Errorf("error creating local etcd static pod manifest file: %v", err) @@ -380,7 +380,12 @@ func (i *Init) Run(out io.Writer) error { "APIServerImage": images.GetCoreImage(kubeadmconstants.KubeAPIServer, i.cfg.GetControlPlaneImageRepository(), i.cfg.KubernetesVersion, i.cfg.UnifiedControlPlaneImage), "ControllerManagerImage": images.GetCoreImage(kubeadmconstants.KubeControllerManager, i.cfg.GetControlPlaneImageRepository(), i.cfg.KubernetesVersion, i.cfg.UnifiedControlPlaneImage), "SchedulerImage": images.GetCoreImage(kubeadmconstants.KubeScheduler, i.cfg.GetControlPlaneImageRepository(), i.cfg.KubernetesVersion, i.cfg.UnifiedControlPlaneImage), - "EtcdImage": images.GetCoreImage(kubeadmconstants.Etcd, i.cfg.ImageRepository, i.cfg.KubernetesVersion, i.cfg.Etcd.Image), + } + // Set .EtcdImage conditionally + if i.cfg.Etcd.Local != nil { + ctx["EtcdImage"] = fmt.Sprintf(" - %s", images.GetCoreImage(kubeadmconstants.Etcd, i.cfg.ImageRepository, i.cfg.KubernetesVersion, i.cfg.Etcd.Local.Image)) + } else { + ctx["EtcdImage"] = "" } kubeletFailTempl.Execute(out, ctx) diff --git a/cmd/kubeadm/app/cmd/upgrade/common_test.go b/cmd/kubeadm/app/cmd/upgrade/common_test.go index e9864154cbe..ac170b04bb6 100644 --- a/cmd/kubeadm/app/cmd/upgrade/common_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/common_test.go @@ -36,6 +36,11 @@ func TestPrintConfiguration(t *testing.T) { { cfg: &kubeadmapi.MasterConfiguration{ KubernetesVersion: "v1.7.1", + Etcd: kubeadmapi.Etcd{ + Local: &kubeadmapi.LocalEtcd{ + DataDir: "/some/path", + }, + }, }, expectedBytes: []byte(`[upgrade/config] Configuration used: api: @@ -48,12 +53,9 @@ func TestPrintConfiguration(t *testing.T) { path: "" certificatesDir: "" etcd: - caFile: "" - certFile: "" - dataDir: "" - endpoints: null - image: "" - keyFile: "" + local: + dataDir: /some/path + image: "" imageRepository: "" kind: MasterConfiguration kubeProxy: {} @@ -74,6 +76,11 @@ func TestPrintConfiguration(t *testing.T) { Networking: kubeadmapi.Networking{ ServiceSubnet: "10.96.0.1/12", }, + Etcd: kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"https://one-etcd-instance:2379"}, + }, + }, }, expectedBytes: []byte(`[upgrade/config] Configuration used: api: @@ -86,12 +93,12 @@ func TestPrintConfiguration(t *testing.T) { path: "" certificatesDir: "" etcd: - caFile: "" - certFile: "" - dataDir: "" - endpoints: null - image: "" - keyFile: "" + external: + caFile: "" + certFile: "" + endpoints: + - https://one-etcd-instance:2379 + keyFile: "" imageRepository: "" kind: MasterConfiguration kubeProxy: {} diff --git a/cmd/kubeadm/app/cmd/upgrade/plan.go b/cmd/kubeadm/app/cmd/upgrade/plan.go index 0f21dae3671..b769709f370 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan.go @@ -94,13 +94,13 @@ func RunPlan(flags *planFlags) error { // Currently this is the only method we have for distinguishing // external etcd vs static pod etcd - isExternalEtcd := len(upgradeVars.cfg.Etcd.Endpoints) > 0 + isExternalEtcd := upgradeVars.cfg.Etcd.External != nil if isExternalEtcd { client, err := etcdutil.New( - upgradeVars.cfg.Etcd.Endpoints, - upgradeVars.cfg.Etcd.CAFile, - upgradeVars.cfg.Etcd.CertFile, - upgradeVars.cfg.Etcd.KeyFile) + upgradeVars.cfg.Etcd.External.Endpoints, + upgradeVars.cfg.Etcd.External.CAFile, + upgradeVars.cfg.Etcd.External.CertFile, + upgradeVars.cfg.Etcd.External.KeyFile) if err != nil { return err } diff --git a/cmd/kubeadm/app/images/images.go b/cmd/kubeadm/app/images/images.go index 3b25bd8bf25..999180828cd 100644 --- a/cmd/kubeadm/app/images/images.go +++ b/cmd/kubeadm/app/images/images.go @@ -56,8 +56,8 @@ func GetAllImages(cfg *kubeadmapi.MasterConfiguration) []string { imgs = append(imgs, fmt.Sprintf("%v/pause-%v:%v", cfg.ImageRepository, runtime.GOARCH, "3.1")) // if etcd is not external then add the image as it will be required - if len(cfg.Etcd.Endpoints) == 0 { - imgs = append(imgs, GetCoreImage(constants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image)) + if cfg.Etcd.Local != nil { + imgs = append(imgs, GetCoreImage(constants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Local.Image)) } dnsImage := fmt.Sprintf("%v/k8s-dns-kube-dns-%v:%v", cfg.ImageRepository, runtime.GOARCH, dns.GetDNSVersion(nil, constants.KubeDNS)) diff --git a/cmd/kubeadm/app/phases/certs/certs.go b/cmd/kubeadm/app/phases/certs/certs.go index b67f4655c0e..b4d27a2e0db 100644 --- a/cmd/kubeadm/app/phases/certs/certs.go +++ b/cmd/kubeadm/app/phases/certs/certs.go @@ -51,8 +51,7 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error { CreateAPIServerEtcdClientCertAndKeyFiles, } - // Currently this is the only way we have to identify static pod etcd vs external etcd - if len(cfg.Etcd.Endpoints) == 0 { + if cfg.Etcd.Local != nil { certActions = append(certActions, etcdCertActions...) } diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index 502683d675b..c6fbdcca840 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -325,9 +325,11 @@ func TestNewEtcdServerCertAndKey(t *testing.T) { cfg := &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{ - ServerCertSANs: []string{ - proxy, - proxyIP, + Local: &kubeadmapi.LocalEtcd{ + ServerCertSANs: []string{ + proxy, + proxyIP, + }, }, }, } @@ -358,9 +360,11 @@ func TestNewEtcdPeerCertAndKey(t *testing.T) { API: kubeadmapi.API{AdvertiseAddress: addr}, NodeName: hostname, Etcd: kubeadmapi.Etcd{ - PeerCertSANs: []string{ - proxy, - proxyIP, + Local: &kubeadmapi.LocalEtcd{ + PeerCertSANs: []string{ + proxy, + proxyIP, + }, }, }, } @@ -693,13 +697,18 @@ func TestCreateCertificateFilesMethods(t *testing.T) { cfg := &kubeadmapi.MasterConfiguration{ API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, + Etcd: kubeadmapi.Etcd{Local: &kubeadmapi.LocalEtcd{}}, Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, NodeName: "valid-hostname", CertificatesDir: tmpdir, } if test.externalEtcd { - cfg.Etcd.Endpoints = []string{"192.168.1.1:2379"} + if cfg.Etcd.External == nil { + cfg.Etcd.External = &kubeadmapi.ExternalEtcd{} + } + cfg.Etcd.Local = nil + cfg.Etcd.External.Endpoints = []string{"192.168.1.1:2379"} } // executes setup func (if necessary) diff --git a/cmd/kubeadm/app/phases/certs/doc.go b/cmd/kubeadm/app/phases/certs/doc.go index 0a60f992dcc..6f9801fd513 100644 --- a/cmd/kubeadm/app/phases/certs/doc.go +++ b/cmd/kubeadm/app/phases/certs/doc.go @@ -24,8 +24,8 @@ package certs From MasterConfiguration .API.AdvertiseAddress is an optional parameter that can be passed for an extra addition to the SAN IPs .APIServerCertSANs is an optional parameter for adding DNS names and IPs to the API Server serving cert SAN - .Etcd.ServerCertSANs is an optional parameter for adding DNS names and IPs to the etcd serving cert SAN - .Etcd.PeerCertSANs is an optional parameter for adding DNS names and IPs to the etcd peer cert SAN + .Etcd.Local.ServerCertSANs is an optional parameter for adding DNS names and IPs to the etcd serving cert SAN + .Etcd.Local.PeerCertSANs is an optional parameter for adding DNS names and IPs to the etcd peer cert SAN .Networking.DNSDomain is needed for knowing which DNS name the internal kubernetes service has .Networking.ServiceSubnet is needed for knowing which IP the internal kubernetes service is going to point to .CertificatesDir is required for knowing where all certificates should be stored diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go index 6cebbe3a92e..39ca139cba7 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go +++ b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go @@ -316,7 +316,9 @@ func GetEtcdAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNames, e IPs: []net.IP{net.IPv4(127, 0, 0, 1)}, } - appendSANsToAltNames(altNames, cfg.Etcd.ServerCertSANs, kubeadmconstants.EtcdServerCertName) + if cfg.Etcd.Local != nil { + appendSANsToAltNames(altNames, cfg.Etcd.Local.ServerCertSANs, kubeadmconstants.EtcdServerCertName) + } return altNames, nil } @@ -338,7 +340,9 @@ func GetEtcdPeerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltName IPs: []net.IP{advertiseAddress}, } - appendSANsToAltNames(altNames, cfg.Etcd.PeerCertSANs, kubeadmconstants.EtcdPeerCertName) + if cfg.Etcd.Local != nil { + appendSANsToAltNames(altNames, cfg.Etcd.Local.PeerCertSANs, kubeadmconstants.EtcdPeerCertName) + } return altNames, nil } diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go index 2ff497c1550..9ff34b0b20a 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go +++ b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go @@ -508,11 +508,13 @@ func TestGetEtcdAltNames(t *testing.T) { proxyIP := "10.10.10.100" cfg := &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{ - ServerCertSANs: []string{ - proxy, - proxyIP, - "1.2.3.L", - "invalid,commas,in,DNS", + Local: &kubeadmapi.LocalEtcd{ + ServerCertSANs: []string{ + proxy, + proxyIP, + "1.2.3.L", + "invalid,commas,in,DNS", + }, }, }, } @@ -562,11 +564,13 @@ func TestGetEtcdPeerAltNames(t *testing.T) { API: kubeadmapi.API{AdvertiseAddress: advertiseIP}, NodeName: hostname, Etcd: kubeadmapi.Etcd{ - PeerCertSANs: []string{ - proxy, - proxyIP, - "1.2.3.L", - "invalid,commas,in,DNS", + Local: &kubeadmapi.LocalEtcd{ + PeerCertSANs: []string{ + proxy, + proxyIP, + "1.2.3.L", + "invalid,commas,in,DNS", + }, }, }, } diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index 473472f50ee..17ff9de4390 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -169,16 +169,16 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string { command := []string{"kube-apiserver"} // If the user set endpoints for an external etcd cluster - if len(cfg.Etcd.Endpoints) > 0 { - defaultArguments["etcd-servers"] = strings.Join(cfg.Etcd.Endpoints, ",") + if cfg.Etcd.External != nil { + defaultArguments["etcd-servers"] = strings.Join(cfg.Etcd.External.Endpoints, ",") // Use any user supplied etcd certificates - if cfg.Etcd.CAFile != "" { - defaultArguments["etcd-cafile"] = cfg.Etcd.CAFile + if cfg.Etcd.External.CAFile != "" { + defaultArguments["etcd-cafile"] = cfg.Etcd.External.CAFile } - if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" { - defaultArguments["etcd-certfile"] = cfg.Etcd.CertFile - defaultArguments["etcd-keyfile"] = cfg.Etcd.KeyFile + if cfg.Etcd.External.CertFile != "" && cfg.Etcd.External.KeyFile != "" { + defaultArguments["etcd-certfile"] = cfg.Etcd.External.CertFile + defaultArguments["etcd-keyfile"] = cfg.Etcd.External.KeyFile } } else { // Default to etcd static pod on localhost @@ -186,17 +186,6 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string { defaultArguments["etcd-cafile"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdCACertName) defaultArguments["etcd-certfile"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientCertName) defaultArguments["etcd-keyfile"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerEtcdClientKeyName) - - // Warn for unused user supplied variables - if cfg.Etcd.CAFile != "" { - glog.Warningf("[controlplane] configuration for %s CAFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.CAFile, kubeadmconstants.Etcd) - } - if cfg.Etcd.CertFile != "" { - glog.Warningf("[controlplane] configuration for %s CertFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.CertFile, kubeadmconstants.Etcd) - } - if cfg.Etcd.KeyFile != "" { - glog.Warningf("[controlplane] configuration for %s KeyFile, %s, is unused without providing Endpoints for external %s\n", kubeadmconstants.Etcd, cfg.Etcd.KeyFile, kubeadmconstants.Etcd) - } } if features.Enabled(cfg.FeatureGates, features.HighAvailability) { diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index ea26780ff50..3e1a7325d33 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -181,50 +181,11 @@ func TestGetAPIServerCommand(t *testing.T) { "--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key", }, }, - { - name: "custom etcd cert and key files", - cfg: &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "4.3.2.1"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, - Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"}, - CertificatesDir: testCertsDir, - }, - expected: []string{ - "kube-apiserver", - "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", - "--service-cluster-ip-range=bar", - "--service-account-key-file=" + testCertsDir + "/sa.pub", - "--client-ca-file=" + testCertsDir + "/ca.crt", - "--tls-cert-file=" + testCertsDir + "/apiserver.crt", - "--tls-private-key-file=" + testCertsDir + "/apiserver.key", - "--kubelet-client-certificate=" + testCertsDir + "/apiserver-kubelet-client.crt", - "--kubelet-client-key=" + testCertsDir + "/apiserver-kubelet-client.key", - "--enable-bootstrap-token-auth=true", - "--secure-port=123", - "--allow-privileged=true", - "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname", - "--proxy-client-cert-file=/var/lib/certs/front-proxy-client.crt", - "--proxy-client-key-file=/var/lib/certs/front-proxy-client.key", - "--requestheader-username-headers=X-Remote-User", - "--requestheader-group-headers=X-Remote-Group", - "--requestheader-extra-headers-prefix=X-Remote-Extra-", - "--requestheader-client-ca-file=" + testCertsDir + "/front-proxy-ca.crt", - "--requestheader-allowed-names=front-proxy-client", - "--authorization-mode=Node,RBAC", - "--advertise-address=4.3.2.1", - "--etcd-servers=https://127.0.0.1:2379", - "--etcd-cafile=" + testCertsDir + "/etcd/ca.crt", - "--etcd-certfile=" + testCertsDir + "/apiserver-etcd-client.crt", - "--etcd-keyfile=" + testCertsDir + "/apiserver-etcd-client.key", - }, - }, { name: "ignores the audit policy if the feature gate is not enabled", cfg: &kubeadmapi.MasterConfiguration{ API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "4.3.2.1"}, Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, - Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"}, CertificatesDir: testCertsDir, AuditPolicyConfiguration: kubeadmapi.AuditPolicyConfiguration{ Path: "/foo/bar", @@ -267,7 +228,6 @@ func TestGetAPIServerCommand(t *testing.T) { cfg: &kubeadmapi.MasterConfiguration{ API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"}, Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, - Etcd: kubeadmapi.Etcd{CertFile: "fiz", KeyFile: "faz"}, CertificatesDir: testCertsDir, }, expected: []string{ @@ -303,10 +263,17 @@ func TestGetAPIServerCommand(t *testing.T) { { name: "an external etcd with custom ca, certs and keys", cfg: &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, - FeatureGates: map[string]bool{features.HighAvailability: true}, - Etcd: kubeadmapi.Etcd{Endpoints: []string{"https://8.6.4.1:2379", "https://8.6.4.2:2379"}, CAFile: "fuz", CertFile: "fiz", KeyFile: "faz"}, + API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"}, + Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, + FeatureGates: map[string]bool{features.HighAvailability: true}, + Etcd: kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"https://8.6.4.1:2379", "https://8.6.4.2:2379"}, + CAFile: "fuz", + CertFile: "fiz", + KeyFile: "faz", + }, + }, CertificatesDir: testCertsDir, }, expected: []string{ @@ -343,9 +310,13 @@ func TestGetAPIServerCommand(t *testing.T) { { name: "an insecure etcd", cfg: &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, - Etcd: kubeadmapi.Etcd{Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:2380"}}, + API: kubeadmapi.API{BindPort: 123, AdvertiseAddress: "2001:db8::1"}, + Networking: kubeadmapi.Networking{ServiceSubnet: "bar"}, + Etcd: kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:2380"}, + }, + }, CertificatesDir: testCertsDir, }, expected: []string{ diff --git a/cmd/kubeadm/app/phases/controlplane/volumes.go b/cmd/kubeadm/app/phases/controlplane/volumes.go index 5f8b967cdc0..9237d2c108a 100644 --- a/cmd/kubeadm/app/phases/controlplane/volumes.go +++ b/cmd/kubeadm/app/phases/controlplane/volumes.go @@ -62,8 +62,8 @@ func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.MasterConfiguration) c mounts.NewHostPathMount(kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeAuditPolicyLogVolumeName, cfg.AuditPolicyConfiguration.LogDir, kubeadmconstants.StaticPodAuditPolicyLogDir, false, &hostPathDirectoryOrCreate) } // If external etcd is specified, mount the directories needed for accessing the CA/serving certs and the private key - if len(cfg.Etcd.Endpoints) != 0 { - etcdVols, etcdVolMounts := getEtcdCertVolumes(cfg.Etcd, cfg.CertificatesDir) + if cfg.Etcd.External != nil { + etcdVols, etcdVolMounts := getEtcdCertVolumes(cfg.Etcd.External, cfg.CertificatesDir) mounts.AddHostPathMounts(kubeadmconstants.KubeAPIServer, etcdVols, etcdVolMounts) } @@ -178,7 +178,7 @@ func (c *controlPlaneHostPathMounts) addComponentVolumeMount(component string, v } // getEtcdCertVolumes returns the volumes/volumemounts needed for talking to an external etcd cluster -func getEtcdCertVolumes(etcdCfg kubeadmapi.Etcd, k8sCertificatesDir string) ([]v1.Volume, []v1.VolumeMount) { +func getEtcdCertVolumes(etcdCfg *kubeadmapi.ExternalEtcd, k8sCertificatesDir string) ([]v1.Volume, []v1.VolumeMount) { certPaths := []string{etcdCfg.CAFile, etcdCfg.CertFile, etcdCfg.KeyFile} certDirs := sets.NewString() for _, certPath := range certPaths { diff --git a/cmd/kubeadm/app/phases/controlplane/volumes_test.go b/cmd/kubeadm/app/phases/controlplane/volumes_test.go index 68e943a0dba..492dc16ab02 100644 --- a/cmd/kubeadm/app/phases/controlplane/volumes_test.go +++ b/cmd/kubeadm/app/phases/controlplane/volumes_test.go @@ -234,7 +234,7 @@ func TestGetEtcdCertVolumes(t *testing.T) { } for _, rt := range tests { - actualVol, actualVolMount := getEtcdCertVolumes(kubeadmapi.Etcd{ + actualVol, actualVolMount := getEtcdCertVolumes(&kubeadmapi.ExternalEtcd{ CAFile: rt.ca, CertFile: rt.cert, KeyFile: rt.key, @@ -525,10 +525,12 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) { cfg: &kubeadmapi.MasterConfiguration{ CertificatesDir: testCertsDir, Etcd: kubeadmapi.Etcd{ - Endpoints: []string{"foo"}, - CAFile: "/etc/certs/etcd/my-etcd-ca.crt", - CertFile: testCertsDir + "/etcd/my-etcd.crt", - KeyFile: "/var/lib/etcd/certs/my-etcd.key", + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"foo"}, + CAFile: "/etc/certs/etcd/my-etcd-ca.crt", + CertFile: testCertsDir + "/etcd/my-etcd.crt", + KeyFile: "/var/lib/etcd/certs/my-etcd.key", + }, }, }, vol: volMap2, diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index 8af1bd8db34..80000dabe1c 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -54,17 +54,17 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.Ma func GetEtcdPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.Pod { pathType := v1.HostPathDirectoryOrCreate etcdMounts := map[string]v1.Volume{ - etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.DataDir, &pathType), + etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.Local.DataDir, &pathType), certsVolumeName: staticpodutil.NewVolume(certsVolumeName, cfg.CertificatesDir+"/etcd", &pathType), } return staticpodutil.ComponentPod(v1.Container{ Name: kubeadmconstants.Etcd, Command: getEtcdCommand(cfg), - Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Image), + Image: images.GetCoreImage(kubeadmconstants.Etcd, cfg.ImageRepository, cfg.KubernetesVersion, cfg.Etcd.Local.Image), ImagePullPolicy: v1.PullIfNotPresent, // Mount the etcd datadir path read-write so etcd can store data in a more persistent manner VolumeMounts: []v1.VolumeMount{ - staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.DataDir, false), + staticpodutil.NewVolumeMount(etcdVolumeName, cfg.Etcd.Local.DataDir, false), staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir+"/etcd", false), }, LivenessProbe: staticpodutil.EtcdProbe( @@ -79,7 +79,7 @@ func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string { defaultArguments := map[string]string{ "listen-client-urls": "https://127.0.0.1:2379", "advertise-client-urls": "https://127.0.0.1:2379", - "data-dir": cfg.Etcd.DataDir, + "data-dir": cfg.Etcd.Local.DataDir, "cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerCertName), "key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName), "trusted-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdCACertName), @@ -92,6 +92,6 @@ func getEtcdCommand(cfg *kubeadmapi.MasterConfiguration) []string { } command := []string{"etcd"} - command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.Etcd.ExtraArgs)...) + command = append(command, kubeadmutil.BuildArgumentListFromMap(defaultArguments, cfg.Etcd.Local.ExtraArgs)...) return command } diff --git a/cmd/kubeadm/app/phases/etcd/local_test.go b/cmd/kubeadm/app/phases/etcd/local_test.go index ff0a484ac67..7c884abc53b 100644 --- a/cmd/kubeadm/app/phases/etcd/local_test.go +++ b/cmd/kubeadm/app/phases/etcd/local_test.go @@ -34,6 +34,12 @@ func TestGetEtcdPodSpec(t *testing.T) { // Creates a Master Configuration cfg := &kubeadmapi.MasterConfiguration{ KubernetesVersion: "v1.7.0", + Etcd: kubeadmapi.Etcd{ + Local: &kubeadmapi.LocalEtcd{ + DataDir: "/var/lib/etcd", + Image: "", + }, + }, } // Executes GetEtcdPodSpec @@ -54,6 +60,12 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) { // Creates a Master Configuration cfg := &kubeadmapi.MasterConfiguration{ KubernetesVersion: "v1.7.0", + Etcd: kubeadmapi.Etcd{ + Local: &kubeadmapi.LocalEtcd{ + DataDir: "/var/lib/etcd", + Image: "k8s.gcr.io/etcd", + }, + }, } // Execute createStaticPodFunction @@ -75,7 +87,7 @@ func TestGetEtcdCommand(t *testing.T) { }{ { cfg: &kubeadmapi.MasterConfiguration{ - Etcd: kubeadmapi.Etcd{DataDir: "/var/lib/etcd"}, + Etcd: kubeadmapi.Etcd{Local: &kubeadmapi.LocalEtcd{DataDir: "/var/lib/etcd"}}, }, expected: []string{ "etcd", @@ -96,10 +108,12 @@ func TestGetEtcdCommand(t *testing.T) { { cfg: &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{ - DataDir: "/var/lib/etcd", - ExtraArgs: map[string]string{ - "listen-client-urls": "https://10.0.1.10:2379", - "advertise-client-urls": "https://10.0.1.10:2379", + Local: &kubeadmapi.LocalEtcd{ + DataDir: "/var/lib/etcd", + ExtraArgs: map[string]string{ + "listen-client-urls": "https://10.0.1.10:2379", + "advertise-client-urls": "https://10.0.1.10:2379", + }, }, }, }, @@ -121,7 +135,7 @@ func TestGetEtcdCommand(t *testing.T) { }, { cfg: &kubeadmapi.MasterConfiguration{ - Etcd: kubeadmapi.Etcd{DataDir: "/etc/foo"}, + Etcd: kubeadmapi.Etcd{Local: &kubeadmapi.LocalEtcd{DataDir: "/etc/foo"}}, }, expected: []string{ "etcd", diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 488a850636f..a041c9669cf 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -226,7 +226,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP // performEtcdStaticPodUpgrade performs upgrade of etcd, it returns bool which indicates fatal error or not and the actual error. func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, recoverManifests map[string]string, isTLSUpgrade bool, oldEtcdClient, newEtcdClient etcdutil.ClusterInterrogator) (bool, error) { // Add etcd static pod spec only if external etcd is not configured - if len(cfg.Etcd.Endpoints) != 0 { + if cfg.Etcd.External != nil { return false, fmt.Errorf("external etcd detected, won't try to change any etcd state") } @@ -238,7 +238,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM // Backing up etcd data store backupEtcdDir := pathMgr.BackupEtcdDir() - runningEtcdDir := cfg.Etcd.DataDir + runningEtcdDir := cfg.Etcd.Local.DataDir if err := util.CopyDir(runningEtcdDir, backupEtcdDir); err != nil { return true, fmt.Errorf("failed to back up etcd data: %v", err) } @@ -382,14 +382,14 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager } if oldEtcdClient == nil { - if len(cfg.Etcd.Endpoints) > 0 { + if cfg.Etcd.External != nil { // External etcd isExternalEtcd = true client, err := etcdutil.New( - cfg.Etcd.Endpoints, - cfg.Etcd.CAFile, - cfg.Etcd.CertFile, - cfg.Etcd.KeyFile, + cfg.Etcd.External.Endpoints, + cfg.Etcd.External.CAFile, + cfg.Etcd.External.CertFile, + cfg.Etcd.External.KeyFile, ) if err != nil { return fmt.Errorf("failed to create etcd client for external etcd: %v", err) @@ -482,7 +482,7 @@ func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr // When the folder contents are successfully rolled back, nil is returned, otherwise an error is returned. func rollbackEtcdData(cfg *kubeadmapi.MasterConfiguration, pathMgr StaticPodPathManager) error { backupEtcdDir := pathMgr.BackupEtcdDir() - runningEtcdDir := cfg.Etcd.DataDir + runningEtcdDir := cfg.Etcd.Local.DataDir if err := util.CopyDir(backupEtcdDir, runningEtcdDir); err != nil { // Let the user know there we're problems, but we tried to reçover diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 060f5e094be..44a299b5a3a 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -56,15 +56,9 @@ apiServerExtraArgs: null certificatesDir: %s controllerManagerExtraArgs: null etcd: - caFile: "" - certFile: "" - dataDir: %s - endpoints: null - extraArgs: null - image: "" - keyFile: "" - serverCertSANs: null - peerCertSANs: null + local: + dataDir: %s + image: "" featureFlags: null imageRepository: k8s.gcr.io kubernetesVersion: %s diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index d7cbbefb093..8b3c5cb6a98 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -689,8 +689,15 @@ func (ExternalEtcdVersionCheck) Name() string { } // Check validates external etcd version +// TODO: Use the official etcd Golang client for this instead? func (evc ExternalEtcdVersionCheck) Check() (warnings, errors []error) { glog.V(1).Infoln("validating the external etcd version") + + // Return quickly if the user isn't using external etcd + if evc.Etcd.External.Endpoints == nil { + return nil, nil + } + var config *tls.Config var err error if config, err = evc.configRootCAs(config); err != nil { @@ -703,7 +710,7 @@ func (evc ExternalEtcdVersionCheck) Check() (warnings, errors []error) { } client := evc.getHTTPClient(config) - for _, endpoint := range evc.Etcd.Endpoints { + for _, endpoint := range evc.Etcd.External.Endpoints { if _, err := url.Parse(endpoint); err != nil { errors = append(errors, fmt.Errorf("failed to parse external etcd endpoint %s : %v", endpoint, err)) continue @@ -739,10 +746,10 @@ func (evc ExternalEtcdVersionCheck) Check() (warnings, errors []error) { // configRootCAs configures and returns a reference to tls.Config instance if CAFile is provided func (evc ExternalEtcdVersionCheck) configRootCAs(config *tls.Config) (*tls.Config, error) { var CACertPool *x509.CertPool - if evc.Etcd.CAFile != "" { - CACert, err := ioutil.ReadFile(evc.Etcd.CAFile) + if evc.Etcd.External.CAFile != "" { + CACert, err := ioutil.ReadFile(evc.Etcd.External.CAFile) if err != nil { - return nil, fmt.Errorf("couldn't load external etcd's server certificate %s: %v", evc.Etcd.CAFile, err) + return nil, fmt.Errorf("couldn't load external etcd's server certificate %s: %v", evc.Etcd.External.CAFile, err) } CACertPool = x509.NewCertPool() CACertPool.AppendCertsFromPEM(CACert) @@ -759,11 +766,11 @@ func (evc ExternalEtcdVersionCheck) configRootCAs(config *tls.Config) (*tls.Conf // configCertAndKey configures and returns a reference to tls.Config instance if CertFile and KeyFile pair is provided func (evc ExternalEtcdVersionCheck) configCertAndKey(config *tls.Config) (*tls.Config, error) { var cert tls.Certificate - if evc.Etcd.CertFile != "" && evc.Etcd.KeyFile != "" { + if evc.Etcd.External.CertFile != "" && evc.Etcd.External.KeyFile != "" { var err error - cert, err = tls.LoadX509KeyPair(evc.Etcd.CertFile, evc.Etcd.KeyFile) + cert, err = tls.LoadX509KeyPair(evc.Etcd.External.CertFile, evc.Etcd.External.KeyFile) if err != nil { - return nil, fmt.Errorf("couldn't load external etcd's certificate and key pair %s, %s: %v", evc.Etcd.CertFile, evc.Etcd.KeyFile, err) + return nil, fmt.Errorf("couldn't load external etcd's certificate and key pair %s, %s: %v", evc.Etcd.External.CertFile, evc.Etcd.External.KeyFile, err) } if config == nil { config = &tls.Config{} @@ -874,26 +881,26 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi ) } - if len(cfg.Etcd.Endpoints) == 0 { + if cfg.Etcd.Local != nil { // Only do etcd related checks when no external endpoints were specified checks = append(checks, PortOpenCheck{port: 2379}, - DirAvailableCheck{Path: cfg.Etcd.DataDir}, + DirAvailableCheck{Path: cfg.Etcd.Local.DataDir}, ) - } else { + } + + if cfg.Etcd.External != nil { // Only check etcd version when external endpoints are specified - if cfg.Etcd.CAFile != "" { - checks = append(checks, FileExistingCheck{Path: cfg.Etcd.CAFile}) + if cfg.Etcd.External.CAFile != "" { + checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.CAFile}) } - if cfg.Etcd.CertFile != "" { - checks = append(checks, FileExistingCheck{Path: cfg.Etcd.CertFile}) + if cfg.Etcd.External.CertFile != "" { + checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.CertFile}) } - if cfg.Etcd.KeyFile != "" { - checks = append(checks, FileExistingCheck{Path: cfg.Etcd.KeyFile}) + if cfg.Etcd.External.KeyFile != "" { + checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.KeyFile}) } - checks = append(checks, - ExternalEtcdVersionCheck{Etcd: cfg.Etcd}, - ) + checks = append(checks, ExternalEtcdVersionCheck{Etcd: cfg.Etcd}) } if ip := net.ParseIP(cfg.API.AdvertiseAddress); ip != nil { diff --git a/cmd/kubeadm/app/preflight/checks_test.go b/cmd/kubeadm/app/preflight/checks_test.go index 0c533a67f17..cfea0abc450 100644 --- a/cmd/kubeadm/app/preflight/checks_test.go +++ b/cmd/kubeadm/app/preflight/checks_test.go @@ -196,21 +196,21 @@ func TestRunInitMasterChecks(t *testing.T) { { name: "Test CA file exists if specfied", cfg: &kubeadmapi.MasterConfiguration{ - Etcd: kubeadmapi.Etcd{CAFile: "/foo"}, + Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CAFile: "/foo"}}, }, expected: false, }, { name: "Test Cert file exists if specfied", cfg: &kubeadmapi.MasterConfiguration{ - Etcd: kubeadmapi.Etcd{CertFile: "/foo"}, + Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CertFile: "/foo"}}, }, expected: false, }, { name: "Test Key file exists if specfied", cfg: &kubeadmapi.MasterConfiguration{ - Etcd: kubeadmapi.Etcd{CertFile: "/foo"}, + Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CertFile: "/foo"}}, }, expected: false, }, @@ -319,7 +319,7 @@ func TestConfigRootCAs(t *testing.T) { t.Errorf("failed configRootCAs:\n\texpected: succeed writing contents to temp CA file %s\n\tactual:%v", f.Name(), err) } - c := ExternalEtcdVersionCheck{Etcd: kubeadmapi.Etcd{CAFile: f.Name()}} + c := ExternalEtcdVersionCheck{Etcd: kubeadmapi.Etcd{External: &kubeadmapi.ExternalEtcd{CAFile: f.Name()}}} config, err := c.configRootCAs(nil) if err != nil { @@ -367,10 +367,14 @@ func TestConfigCertAndKey(t *testing.T) { err, ) } - c := ExternalEtcdVersionCheck{Etcd: kubeadmapi.Etcd{ - CertFile: certFile.Name(), - KeyFile: keyFile.Name(), - }} + c := ExternalEtcdVersionCheck{ + Etcd: kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + CertFile: certFile.Name(), + KeyFile: keyFile.Name(), + }, + }, + } config, err := c.configCertAndKey(nil) if err != nil { diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index aa8c9942bbb..cf4d1d1c370 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -17,15 +17,13 @@ ClusterName: kubernetes ControllerManagerExtraArgs: null ControllerManagerExtraVolumes: null Etcd: - CAFile: "" - CertFile: "" - DataDir: /var/lib/etcd - Endpoints: null - ExtraArgs: null - Image: "" - KeyFile: "" - PeerCertSANs: null - ServerCertSANs: null + External: null + Local: + DataDir: /var/lib/etcd + ExtraArgs: null + Image: "" + PeerCertSANs: null + ServerCertSANs: null FeatureGates: null ImageRepository: k8s.gcr.io KubeProxy: diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml index de6b2724910..959229b286a 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml @@ -13,12 +13,9 @@ certificatesDir: /etc/kubernetes/pki clusterName: kubernetes criSocket: /var/run/dockershim.sock etcd: - caFile: "" - certFile: "" - dataDir: /var/lib/etcd - endpoints: null - image: "" - keyFile: "" + local: + dataDir: /var/lib/etcd + image: "" imageRepository: k8s.gcr.io kind: MasterConfiguration kubeProxy: diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index a852a56a357..7db1119f2f4 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -11,12 +11,9 @@ certificatesDir: /var/lib/kubernetes/pki clusterName: kubernetes criSocket: /var/run/criruntime.sock etcd: - caFile: "" - certFile: "" - dataDir: /var/lib/etcd - endpoints: null - image: "" - keyFile: "" + local: + dataDir: /var/lib/etcd + image: "" imageRepository: my-company.com kind: MasterConfiguration kubeProxy: diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go index e4ecb7cd16a..d09518e11ab 100644 --- a/cmd/kubeadm/app/util/staticpod/utils.go +++ b/cmd/kubeadm/app/util/staticpod/utils.go @@ -242,8 +242,8 @@ func GetProbeAddress(cfg *kubeadmapi.MasterConfiguration, componentName string) return addr } case componentName == kubeadmconstants.Etcd: - if cfg.Etcd.ExtraArgs != nil { - if arg, exists := cfg.Etcd.ExtraArgs[etcdListenClientURLsArg]; exists { + if cfg.Etcd.Local != nil && cfg.Etcd.Local.ExtraArgs != nil { + if arg, exists := cfg.Etcd.Local.ExtraArgs[etcdListenClientURLsArg]; exists { // Use the first url in the listen-client-urls if multiple url's are specified. if strings.ContainsAny(arg, ",") { arg = strings.Split(arg, ",")[0] diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go index 834e1f5a0c6..96c3d8dc419 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_test.go +++ b/cmd/kubeadm/app/util/staticpod/utils_test.go @@ -207,8 +207,10 @@ func TestEtcdProbe(t *testing.T) { name: "valid etcd probe using listen-client-urls IPv4 addresses", cfg: &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{ - ExtraArgs: map[string]string{ - "listen-client-urls": "http://1.2.3.4:2379,http://4.3.2.1:2379"}, + Local: &kubeadmapi.LocalEtcd{ + ExtraArgs: map[string]string{ + "listen-client-urls": "http://1.2.3.4:2379,http://4.3.2.1:2379"}, + }, }, }, component: kubeadmconstants.Etcd, @@ -223,8 +225,10 @@ func TestEtcdProbe(t *testing.T) { name: "valid etcd probe using listen-client-urls IPv6 addresses", cfg: &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{ - ExtraArgs: map[string]string{ - "listen-client-urls": "http://[2001:db8::1]:2379,http://[2001:db8::2]:2379"}, + Local: &kubeadmapi.LocalEtcd{ + ExtraArgs: map[string]string{ + "listen-client-urls": "http://[2001:db8::1]:2379,http://[2001:db8::2]:2379"}, + }, }, }, component: kubeadmconstants.Etcd, @@ -239,8 +243,10 @@ func TestEtcdProbe(t *testing.T) { name: "valid IPv4 etcd probe using hostname for listen-client-urls", cfg: &kubeadmapi.MasterConfiguration{ Etcd: kubeadmapi.Etcd{ - ExtraArgs: map[string]string{ - "listen-client-urls": "http://localhost:2379"}, + Local: &kubeadmapi.LocalEtcd{ + ExtraArgs: map[string]string{ + "listen-client-urls": "http://localhost:2379"}, + }, }, }, component: kubeadmconstants.Etcd, From 2d0efea35b26b50f738c410a45ea581d40619a36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 23 May 2018 21:13:42 +0300 Subject: [PATCH 138/307] autogenerated --- cmd/kubeadm/app/apis/kubeadm/BUILD | 1 + .../v1alpha1/zz_generated.conversion.go | 34 +++---- .../v1alpha2/zz_generated.conversion.go | 80 +++++++++++++---- .../kubeadm/v1alpha2/zz_generated.deepcopy.go | 88 +++++++++++++++---- .../app/apis/kubeadm/zz_generated.deepcopy.go | 88 +++++++++++++++---- 5 files changed, 212 insertions(+), 79 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/BUILD b/cmd/kubeadm/app/apis/kubeadm/BUILD index 2d64123a87d..d37bb68e4a7 100644 --- a/cmd/kubeadm/app/apis/kubeadm/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/BUILD @@ -17,6 +17,7 @@ go_library( deps = [ "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", + "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index 0b06b5a9e41..e602d3aaafb 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -112,37 +112,25 @@ func Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha1_AuditPolicyConfigurati } func autoConvert_v1alpha1_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { - out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) - out.CAFile = in.CAFile - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.DataDir = in.DataDir - out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) - out.Image = in.Image + // WARNING: in.Endpoints requires manual conversion: does not exist in peer-type + // WARNING: in.CAFile requires manual conversion: does not exist in peer-type + // WARNING: in.CertFile requires manual conversion: does not exist in peer-type + // WARNING: in.KeyFile requires manual conversion: does not exist in peer-type + // WARNING: in.DataDir requires manual conversion: does not exist in peer-type + // WARNING: in.ExtraArgs requires manual conversion: does not exist in peer-type + // WARNING: in.Image requires manual conversion: does not exist in peer-type // WARNING: in.SelfHosted requires manual conversion: does not exist in peer-type - out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) - out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + // WARNING: in.ServerCertSANs requires manual conversion: does not exist in peer-type + // WARNING: in.PeerCertSANs requires manual conversion: does not exist in peer-type return nil } func autoConvert_kubeadm_Etcd_To_v1alpha1_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { - out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) - out.CAFile = in.CAFile - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.DataDir = in.DataDir - out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) - out.Image = in.Image - out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) - out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + // WARNING: in.Local requires manual conversion: does not exist in peer-type + // WARNING: in.External requires manual conversion: does not exist in peer-type return nil } -// Convert_kubeadm_Etcd_To_v1alpha1_Etcd is an autogenerated conversion function. -func Convert_kubeadm_Etcd_To_v1alpha1_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { - return autoConvert_kubeadm_Etcd_To_v1alpha1_Etcd(in, out, s) -} - func autoConvert_v1alpha1_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error { out.Name = in.Name out.HostPath = in.HostPath diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go index fa158471d1d..dd661e08773 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go @@ -46,12 +46,16 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration, Convert_v1alpha2_Etcd_To_kubeadm_Etcd, Convert_kubeadm_Etcd_To_v1alpha2_Etcd, + Convert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd, + Convert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd, Convert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount, Convert_kubeadm_HostPathMount_To_v1alpha2_HostPathMount, Convert_v1alpha2_KubeProxy_To_kubeadm_KubeProxy, Convert_kubeadm_KubeProxy_To_v1alpha2_KubeProxy, Convert_v1alpha2_KubeletConfiguration_To_kubeadm_KubeletConfiguration, Convert_kubeadm_KubeletConfiguration_To_v1alpha2_KubeletConfiguration, + Convert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd, + Convert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd, Convert_v1alpha2_MasterConfiguration_To_kubeadm_MasterConfiguration, Convert_kubeadm_MasterConfiguration_To_v1alpha2_MasterConfiguration, Convert_v1alpha2_Networking_To_kubeadm_Networking, @@ -112,15 +116,8 @@ func Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfigurati } func autoConvert_v1alpha2_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { - out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) - out.CAFile = in.CAFile - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.DataDir = in.DataDir - out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) - out.Image = in.Image - out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) - out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + out.Local = (*kubeadm.LocalEtcd)(unsafe.Pointer(in.Local)) + out.External = (*kubeadm.ExternalEtcd)(unsafe.Pointer(in.External)) return nil } @@ -130,15 +127,8 @@ func Convert_v1alpha2_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conver } func autoConvert_kubeadm_Etcd_To_v1alpha2_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { - out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) - out.CAFile = in.CAFile - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.DataDir = in.DataDir - out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) - out.Image = in.Image - out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) - out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + out.Local = (*LocalEtcd)(unsafe.Pointer(in.Local)) + out.External = (*ExternalEtcd)(unsafe.Pointer(in.External)) return nil } @@ -147,6 +137,32 @@ func Convert_kubeadm_Etcd_To_v1alpha2_Etcd(in *kubeadm.Etcd, out *Etcd, s conver return autoConvert_kubeadm_Etcd_To_v1alpha2_Etcd(in, out, s) } +func autoConvert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error { + out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) + out.CAFile = in.CAFile + out.CertFile = in.CertFile + out.KeyFile = in.KeyFile + return nil +} + +// Convert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd is an autogenerated conversion function. +func Convert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error { + return autoConvert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(in, out, s) +} + +func autoConvert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error { + out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) + out.CAFile = in.CAFile + out.CertFile = in.CertFile + out.KeyFile = in.KeyFile + return nil +} + +// Convert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd is an autogenerated conversion function. +func Convert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error { + return autoConvert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(in, out, s) +} + func autoConvert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error { out.Name = in.Name out.HostPath = in.HostPath @@ -215,6 +231,34 @@ func Convert_kubeadm_KubeletConfiguration_To_v1alpha2_KubeletConfiguration(in *k return autoConvert_kubeadm_KubeletConfiguration_To_v1alpha2_KubeletConfiguration(in, out, s) } +func autoConvert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { + out.Image = in.Image + out.DataDir = in.DataDir + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) + out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + return nil +} + +// Convert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd is an autogenerated conversion function. +func Convert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { + return autoConvert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(in, out, s) +} + +func autoConvert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { + out.Image = in.Image + out.DataDir = in.DataDir + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) + out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) + return nil +} + +// Convert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd is an autogenerated conversion function. +func Convert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { + return autoConvert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(in, out, s) +} + func autoConvert_v1alpha2_MasterConfiguration_To_kubeadm_MasterConfiguration(in *MasterConfiguration, out *kubeadm.MasterConfiguration, s conversion.Scope) error { if err := Convert_v1alpha2_API_To_kubeadm_API(&in.API, &out.API, s); err != nil { return err diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go index 108bfbd0dc3..af11b89b973 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go @@ -71,27 +71,23 @@ func (in *AuditPolicyConfiguration) DeepCopy() *AuditPolicyConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Etcd) DeepCopyInto(out *Etcd) { *out = *in - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExtraArgs != nil { - in, out := &in.ExtraArgs, &out.ExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + if in.Local != nil { + in, out := &in.Local, &out.Local + if *in == nil { + *out = nil + } else { + *out = new(LocalEtcd) + (*in).DeepCopyInto(*out) } } - if in.ServerCertSANs != nil { - in, out := &in.ServerCertSANs, &out.ServerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PeerCertSANs != nil { - in, out := &in.PeerCertSANs, &out.PeerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) + if in.External != nil { + in, out := &in.External, &out.External + if *in == nil { + *out = nil + } else { + *out = new(ExternalEtcd) + (*in).DeepCopyInto(*out) + } } return } @@ -106,6 +102,27 @@ func (in *Etcd) DeepCopy() *Etcd { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd. +func (in *ExternalEtcd) DeepCopy() *ExternalEtcd { + if in == nil { + return nil + } + out := new(ExternalEtcd) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostPathMount) DeepCopyInto(out *HostPathMount) { *out = *in @@ -172,6 +189,39 @@ func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { + *out = *in + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServerCertSANs != nil { + in, out := &in.ServerCertSANs, &out.ServerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PeerCertSANs != nil { + in, out := &in.PeerCertSANs, &out.PeerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalEtcd. +func (in *LocalEtcd) DeepCopy() *LocalEtcd { + if in == nil { + return nil + } + out := new(LocalEtcd) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { *out = *in diff --git a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go index 0e080e0323d..ce680355710 100644 --- a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -71,27 +71,23 @@ func (in *AuditPolicyConfiguration) DeepCopy() *AuditPolicyConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Etcd) DeepCopyInto(out *Etcd) { *out = *in - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExtraArgs != nil { - in, out := &in.ExtraArgs, &out.ExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + if in.Local != nil { + in, out := &in.Local, &out.Local + if *in == nil { + *out = nil + } else { + *out = new(LocalEtcd) + (*in).DeepCopyInto(*out) } } - if in.ServerCertSANs != nil { - in, out := &in.ServerCertSANs, &out.ServerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PeerCertSANs != nil { - in, out := &in.PeerCertSANs, &out.PeerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) + if in.External != nil { + in, out := &in.External, &out.External + if *in == nil { + *out = nil + } else { + *out = new(ExternalEtcd) + (*in).DeepCopyInto(*out) + } } return } @@ -106,6 +102,27 @@ func (in *Etcd) DeepCopy() *Etcd { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) { + *out = *in + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd. +func (in *ExternalEtcd) DeepCopy() *ExternalEtcd { + if in == nil { + return nil + } + out := new(ExternalEtcd) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostPathMount) DeepCopyInto(out *HostPathMount) { *out = *in @@ -172,6 +189,39 @@ func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { + *out = *in + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServerCertSANs != nil { + in, out := &in.ServerCertSANs, &out.ServerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PeerCertSANs != nil { + in, out := &in.PeerCertSANs, &out.PeerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalEtcd. +func (in *LocalEtcd) DeepCopy() *LocalEtcd { + if in == nil { + return nil + } + out := new(LocalEtcd) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { *out = *in From 2de996856f1d570e40305689a1e6da9adc9b6919 Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Fri, 27 Apr 2018 18:02:04 -0700 Subject: [PATCH 139/307] Do not use DeepEqual to compare slices in test. This wraps DeepEqual with a helper that considers nil slices and empty slices to be equal. Scheduler code might use a nil slice or empty slice to represent an empty list, so tests should not be sensitive to the difference. Tests could fail because DeepEqual considers nil to be different from an empty slice. --- pkg/scheduler/core/equivalence_cache_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pkg/scheduler/core/equivalence_cache_test.go b/pkg/scheduler/core/equivalence_cache_test.go index 9654515c2b4..de5756bc2b2 100644 --- a/pkg/scheduler/core/equivalence_cache_test.go +++ b/pkg/scheduler/core/equivalence_cache_test.go @@ -378,6 +378,14 @@ func TestUpdateResult(t *testing.T) { } } +// slicesEqual wraps reflect.DeepEqual, but returns true when comparing nil and empty slice. +func slicesEqual(a, b []algorithm.PredicateFailureReason) bool { + if len(a) == 0 && len(b) == 0 { + return true + } + return reflect.DeepEqual(a, b) +} + func TestLookupResult(t *testing.T) { tests := []struct { name string @@ -504,9 +512,9 @@ func TestLookupResult(t *testing.T) { if fit != test.expectedPredicateItem.fit { t.Errorf("Failed: %s, expected fit: %v, but got: %v", test.name, test.cachedItem.fit, fit) } - if !reflect.DeepEqual(reasons, test.expectedPredicateItem.reasons) { + if !slicesEqual(reasons, test.expectedPredicateItem.reasons) { t.Errorf("Failed: %s, expected reasons: %v, but got: %v", - test.name, test.cachedItem.reasons, reasons) + test.name, test.expectedPredicateItem.reasons, reasons) } } } From 7288e8828f5bf659d41e71760f9d9923e405e2e4 Mon Sep 17 00:00:00 2001 From: David Eads Date: Wed, 23 May 2018 15:00:37 -0400 Subject: [PATCH 140/307] prevent zero for leader election timeouts --- .../client-go/tools/leaderelection/leaderelection.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go index e41b420c987..aed55574a8f 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -74,6 +74,16 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) { return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor") } + if lec.LeaseDuration < 1 { + return nil, fmt.Errorf("leaseDuration must be greater than zero") + } + if lec.RenewDeadline < 1 { + return nil, fmt.Errorf("renewDeadline must be greater than zero") + } + if lec.RetryPeriod < 1 { + return nil, fmt.Errorf("retryPeriod must be greater than zero") + } + if lec.Lock == nil { return nil, fmt.Errorf("Lock must not be nil.") } From dca376a03ef43df2714eafd431a76646346c9094 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Wed, 23 May 2018 12:03:27 -0700 Subject: [PATCH 141/307] Add KUBE_CGO_OVERRIDES env var to force enabling CGO --- hack/lib/golang.sh | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 9f229168a8a..88e0a8f6520 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -213,12 +213,24 @@ readonly KUBE_STATIC_LIBRARIES=( kubectl ) +# KUBE_CGO_OVERRIDES is a space-separated list of binaries which should be built +# with CGO enabled, assuming CGO is supported on the target platform. +# This overrides any entry in KUBE_STATIC_LIBRARIES. +IFS=" " read -ra KUBE_CGO_OVERRIDES <<< "${KUBE_CGO_OVERRIDES:-}" +readonly KUBE_CGO_OVERRIDES +# KUBE_STATIC_OVERRIDES is a space-separated list of binaries which should be +# built with CGO disabled. This is in addition to the list in +# KUBE_STATIC_LIBRARIES. +IFS=" " read -ra KUBE_STATIC_OVERRIDES <<< "${KUBE_STATIC_OVERRIDES:-}" +readonly KUBE_STATIC_OVERRIDES + kube::golang::is_statically_linked_library() { local e + if [[ -n "${KUBE_CGO_OVERRIDES:+x}" ]]; then + for e in "${KUBE_CGO_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 1; done; + fi for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done; - # Allow individual overrides--e.g., so that you can get a static build of - # kubectl for inclusion in a container. - if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then + if [[ -n "${KUBE_STATIC_OVERRIDES:+x}" ]]; then for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done; fi return 1; From e4ded2b3ecb3084bfb6111e81b2741bead8ccc14 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Wed, 23 May 2018 12:23:00 -0700 Subject: [PATCH 142/307] Explictly enable cgo when building kubectl for darwin from darwin --- hack/lib/golang.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 88e0a8f6520..c5d463483bf 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -226,6 +226,9 @@ readonly KUBE_STATIC_OVERRIDES kube::golang::is_statically_linked_library() { local e + # Explicitly enable cgo when building kubectl for darwin from darwin. + [[ "$(go env GOHOSTOS)" == "darwin" && "$(go env GOOS)" == "darwin" && + "$1" == *"/kubectl" ]] && return 1 if [[ -n "${KUBE_CGO_OVERRIDES:+x}" ]]; then for e in "${KUBE_CGO_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 1; done; fi From 818147d6fb474c5d7a0c614b55827b90a80429aa Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 23 May 2018 16:39:43 +0200 Subject: [PATCH 143/307] apiextensions: make CreateNewCustomResourceDefinition return created CRD --- .../test/integration/basic_test.go | 30 +++++++++---------- .../test/integration/finalization_test.go | 4 +-- .../test/integration/registration_test.go | 14 ++++----- .../test/integration/subresources_test.go | 18 +++++------ .../test/integration/testserver/resources.go | 18 +++++------ .../test/integration/validation_test.go | 18 +++++------ .../test/integration/versioning_test.go | 6 ++-- .../test/integration/yaml_test.go | 4 +-- test/e2e/apimachinery/crd_watch.go | 2 +- .../custom_resource_definition.go | 2 +- test/e2e/apimachinery/garbage_collector.go | 2 +- test/e2e/auth/audit.go | 2 +- test/e2e/framework/crd_util.go | 2 +- .../garbage_collector_test.go | 2 +- 14 files changed, 62 insertions(+), 62 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go index a4a93d8f084..39e8e6080fb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go @@ -51,7 +51,7 @@ func TestNamespaceScopedCRUD(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -70,7 +70,7 @@ func TestClusterScopedCRUD(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -215,7 +215,7 @@ func testFieldSelector(t *testing.T, ns string, noxuDefinition *apiextensionsv1b if err != nil { t.Fatal(err) } - if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, initialListTypeMeta.GetAPIVersion(); e != a { + if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Versions[0].Name, initialListTypeMeta.GetAPIVersion(); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := noxuDefinition.Spec.Names.ListKind, initialListTypeMeta.GetKind(); e != a { @@ -270,7 +270,7 @@ func testFieldSelector(t *testing.T, ns string, noxuDefinition *apiextensionsv1b if err != nil { t.Fatal(err) } - if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a { + if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Versions[0].Name, createdTypeMeta.GetAPIVersion(); e != a { t.Errorf("expected %v, got %v", e, a) } if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { @@ -357,7 +357,7 @@ func TestDiscovery(t *testing.T) { scope := apiextensionsv1beta1.NamespaceScoped noxuDefinition := testserver.NewNoxuCustomResourceDefinition(scope) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -404,7 +404,7 @@ func TestNoNamespaceReject(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -443,7 +443,7 @@ func TestSameNameDiffNamespace(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -464,7 +464,7 @@ func TestSelfLink(t *testing.T) { // namespace scoped noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -484,7 +484,7 @@ func TestSelfLink(t *testing.T) { // cluster scoped curletDefinition := testserver.NewCurletCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, dynamicClient) + curletDefinition, err = testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -510,7 +510,7 @@ func TestPreserveInt(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -552,7 +552,7 @@ func TestPatch(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -612,7 +612,7 @@ func TestCrossNamespaceListWatch(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -748,13 +748,13 @@ func TestNameConflict(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } noxu2Definition := testserver.NewNoxu2CustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - _, err = apiExtensionClient.Apiextensions().CustomResourceDefinitions().Create(noxu2Definition) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(noxu2Definition) if err != nil { t.Fatal(err) } @@ -809,7 +809,7 @@ func TestStatusGetAndPatch(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go index fc62c5377e0..ae456713abd 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go @@ -36,7 +36,7 @@ func TestFinalization(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) require.NoError(t, err) ns := "not-the-default" @@ -100,7 +100,7 @@ func TestFinalizationAndDeletion(t *testing.T) { // Create a CRD. noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) require.NoError(t, err) // Create a CR with a finalizer. diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go index 9cdb3393f05..302559d59b5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go @@ -122,7 +122,7 @@ func TestMultipleResourceInstances(t *testing.T) { ns := "not-the-default" noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -247,7 +247,7 @@ func TestMultipleRegistration(t *testing.T) { ns := "not-the-default" sameInstanceName := "foo" noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -266,7 +266,7 @@ func TestMultipleRegistration(t *testing.T) { } curletDefinition := testserver.NewCurletCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, dynamicClient) + curletDefinition, err = testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -303,7 +303,7 @@ func TestDeRegistrationAndReRegistration(t *testing.T) { ns := "not-the-default" sameInstanceName := "foo" func() { - err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -329,7 +329,7 @@ func TestDeRegistrationAndReRegistration(t *testing.T) { if _, err := testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient); err == nil || !errors.IsNotFound(err) { t.Fatalf("expected a NotFound error, got:%v", err) } - err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -406,7 +406,7 @@ func TestEtcdStorage(t *testing.T) { ns1 := "another-default-is-possible" curletDefinition := testserver.NewCurletCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, dynamicClient) + curletDefinition, err = testserver.CreateNewCustomResourceDefinition(curletDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -417,7 +417,7 @@ func TestEtcdStorage(t *testing.T) { ns2 := "the-cruel-default" noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go index 00fa47ad762..770ead2872e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go @@ -92,7 +92,7 @@ func TestStatusSubresource(t *testing.T) { defer close(stopCh) noxuDefinition := NewNoxuSubresourcesCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -215,13 +215,13 @@ func TestScaleSubresource(t *testing.T) { // set invalid json path for specReplicasPath noxuDefinition.Spec.Subresources.Scale.SpecReplicasPath = "foo,bar" - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf("unexpected non-error: specReplicasPath should be a valid json path under .spec") } noxuDefinition.Spec.Subresources.Scale.SpecReplicasPath = ".spec.replicas" - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -342,7 +342,7 @@ func TestValidationSchema(t *testing.T) { noxuDefinition.Spec.Subresources = &apiextensionsv1beta1.CustomResourceSubresources{ Status: &apiextensionsv1beta1.CustomResourceSubresourceStatus{}, } - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf(`unexpected non-error, expected: must only have "properties" or "required" at the root if the status subresource is enabled`) } @@ -361,7 +361,7 @@ func TestValidationSchema(t *testing.T) { }, Required: []string{"spec"}, } - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatalf("unable to created crd %v: %v", noxuDefinition.Name, err) } @@ -407,7 +407,7 @@ func TestValidateOnlyStatus(t *testing.T) { OpenAPIV3Schema: schema, } - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -471,7 +471,7 @@ func TestSubresourcesDiscovery(t *testing.T) { } noxuDefinition := NewNoxuSubresourcesCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -546,7 +546,7 @@ func TestGeneration(t *testing.T) { defer close(stopCh) noxuDefinition := NewNoxuSubresourcesCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -625,7 +625,7 @@ func TestSubresourcePatch(t *testing.T) { } noxuDefinition := NewNoxuSubresourcesCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go index 2876bdc6170..b84d623dd81 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go @@ -211,10 +211,10 @@ func NewCurletInstance(namespace, name string) *unstructured.Unstructured { // the apiextension apiserver has installed the CRD. But it's not safe to watch // the created CR. Please call CreateNewCustomResourceDefinition if you need to // watch the CR. -func CreateNewCustomResourceDefinitionWatchUnsafe(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - _, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) +func CreateNewCustomResourceDefinitionWatchUnsafe(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { + crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) if err != nil { - return err + return nil, err } // wait until the resource appears in discovery @@ -231,13 +231,13 @@ func CreateNewCustomResourceDefinitionWatchUnsafe(crd *apiextensionsv1beta1.Cust return false, nil }) - return err + return crd, err } -func CreateNewCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface, dynamicClientSet dynamic.Interface) error { - err := CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionsClient) +func CreateNewCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface, dynamicClientSet dynamic.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { + crd, err := CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionsClient) if err != nil { - return err + return nil, err } // This is only for a test. We need the watch cache to have a resource version that works for the test. @@ -259,10 +259,10 @@ func CreateNewCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceD return false, nil }) if primingErr != nil { - return primingErr + return nil, primingErr } - return nil + return crd, nil } func checkForWatchCachePrimed(crd *apiextensionsv1beta1.CustomResourceDefinition, dynamicClientSet dynamic.Interface) error { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index bf85a41b98d..ac7b608ad23 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -38,7 +38,7 @@ func TestForProperValidationErrors(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -176,7 +176,7 @@ func TestCustomResourceValidation(t *testing.T) { defer close(stopCh) noxuDefinition := newNoxuValidationCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -197,7 +197,7 @@ func TestCustomResourceUpdateValidation(t *testing.T) { defer close(stopCh) noxuDefinition := newNoxuValidationCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -240,7 +240,7 @@ func TestCustomResourceValidationErrors(t *testing.T) { defer close(stopCh) noxuDefinition := newNoxuValidationCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -335,7 +335,7 @@ func TestCRValidationOnCRDUpdate(t *testing.T) { // set stricter schema noxuDefinition.Spec.Validation.OpenAPIV3Schema.Required = []string{"alpha", "beta", "epsilon"} - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -384,7 +384,7 @@ func TestForbiddenFieldsInSchema(t *testing.T) { noxuDefinition := newNoxuValidationCRD(apiextensionsv1beta1.NamespaceScoped) noxuDefinition.Spec.Validation.OpenAPIV3Schema.AdditionalProperties.Allows = false - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf("unexpected non-error: additionalProperties cannot be set to false") } @@ -395,7 +395,7 @@ func TestForbiddenFieldsInSchema(t *testing.T) { } noxuDefinition.Spec.Validation.OpenAPIV3Schema.AdditionalProperties.Allows = true - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf("unexpected non-error: uniqueItems cannot be set to true") } @@ -406,14 +406,14 @@ func TestForbiddenFieldsInSchema(t *testing.T) { UniqueItems: false, } - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatal("unexpected non-error: $ref cannot be non-empty string") } noxuDefinition.Spec.Validation.OpenAPIV3Schema.Ref = nil - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go index a4dbb0d8ce5..62d59f5dfd1 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -37,7 +37,7 @@ func TestVersionedNamspacedScopedCRD(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.NamespaceScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -54,7 +54,7 @@ func TestVersionedClusterScopedCRD(t *testing.T) { defer close(stopCh) noxuDefinition := testserver.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -107,7 +107,7 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api defer close(stopCh) noxuDefinition.Spec.Versions = versionsV1Beta1Storage - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go index 09564304d11..2dc27955943 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/yaml_test.go @@ -51,7 +51,7 @@ func TestYAML(t *testing.T) { } noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } @@ -370,7 +370,7 @@ func TestYAMLSubresource(t *testing.T) { } noxuDefinition := NewNoxuSubresourcesCRD(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } diff --git a/test/e2e/apimachinery/crd_watch.go b/test/e2e/apimachinery/crd_watch.go index a101f549256..1f0ea85e3fb 100644 --- a/test/e2e/apimachinery/crd_watch.go +++ b/test/e2e/apimachinery/crd_watch.go @@ -64,7 +64,7 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() { } noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped) - err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient) + noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient) if err != nil { framework.Failf("failed to create CustomResourceDefinition: %v", err) } diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index e16e8607778..18f19b6847b 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() { randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped) //create CRD and waits for the resource to be recognized and available. - err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient) + randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient) if err != nil { framework.Failf("failed to create CustomResourceDefinition: %v", err) } diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index d8a37575618..f9e3c74938c 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -917,7 +917,7 @@ var _ = SIGDescribe("Garbage collector", func() { framework.Failf("failed to delete CustomResourceDefinition: %v", err) } }() - err = apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient) + definition, err = apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient) if err != nil { framework.Failf("failed to create CustomResourceDefinition: %v", err) } diff --git a/test/e2e/auth/audit.go b/test/e2e/auth/audit.go index 80a4bbc2889..e45694624ab 100644 --- a/test/e2e/auth/audit.go +++ b/test/e2e/auth/audit.go @@ -619,7 +619,7 @@ var _ = SIGDescribe("Advanced Audit", func() { // Create and delete custom resource definition. { func() { - err = testserver.CreateNewCustomResourceDefinition(crd, apiExtensionClient, f.DynamicClient) + crd, err = testserver.CreateNewCustomResourceDefinition(crd, apiExtensionClient, f.DynamicClient) framework.ExpectNoError(err, "failed to create custom resource definition") testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient) }, diff --git a/test/e2e/framework/crd_util.go b/test/e2e/framework/crd_util.go index 4a4acc8cb71..f7db7daef4b 100644 --- a/test/e2e/framework/crd_util.go +++ b/test/e2e/framework/crd_util.go @@ -76,7 +76,7 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) { crd := newCRDForTest(testcrd) //create CRD and waits for the resource to be recognized and available. - err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient) + crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient) if err != nil { Failf("failed to create CustomResourceDefinition: %v", err) return nil, err diff --git a/test/integration/garbagecollector/garbage_collector_test.go b/test/integration/garbagecollector/garbage_collector_test.go index b5dace145b1..a6fce7efd3a 100644 --- a/test/integration/garbagecollector/garbage_collector_test.go +++ b/test/integration/garbagecollector/garbage_collector_test.go @@ -175,7 +175,7 @@ func createRandomCustomResourceDefinition( // use. definition := apiextensionstestserver.NewRandomNameCustomResourceDefinition(apiextensionsv1beta1.NamespaceScoped) - err := apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, dynamicClient) + definition, err := apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, dynamicClient) if err != nil { t.Fatalf("failed to create CustomResourceDefinition: %v", err) } From e341803d4f9855ad92b9db0d714fea0e3d391f81 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 23 May 2018 16:21:58 +0200 Subject: [PATCH 144/307] apiextensions: reduce verbose logs in removeDeadStorage We don't need one log line per version. --- .../pkg/apiserver/customresource_handler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 83706c4b2a6..20231bdf866 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -349,8 +349,8 @@ func (r *crdHandler) removeDeadStorage() { } } if !found { - for version, storage := range s.storages { - glog.V(4).Infof("Removing dead CRD storage for %v", s.requestScopes[version].Resource) + glog.V(4).Infof("Removing dead CRD storage for %s/%s", s.spec.Group, s.spec.Names.Kind) + for _, storage := range s.storages { // destroy only the main storage. Those for the subresources share cacher and etcd clients. storage.CustomResource.DestroyFunc() } From 7653fb68878461824ebd37e8c202278d071755e6 Mon Sep 17 00:00:00 2001 From: Cheng Xing Date: Wed, 23 May 2018 12:47:24 -0700 Subject: [PATCH 145/307] Moving Regional PD e2e tests to regular test suites --- test/e2e/storage/regional_pd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index c478a386a67..ff3c3858035 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -44,7 +44,7 @@ const ( statefulSetReadyTimeout = 3 * time.Minute ) -var _ = utils.SIGDescribe("Regional PD [Feature:RegionalPD]", func() { +var _ = utils.SIGDescribe("Regional PD", func() { f := framework.NewDefaultFramework("regional-pd") // filled in BeforeEach From 1816d4eca4999dd3e141bfa1e1dd15b6847f9f80 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 20:14:17 -0700 Subject: [PATCH 146/307] conformance: normalize the test names To detect whether conformance tests have been changed, the test argument with the `ConformanceIt` is recorded to conformance.txt. This change remove additional tags (e.g., [Feature:]) and trim spaces, so that the detection is less-prone to noise. This change also updates conformance.txt with normalized names. --- test/conformance/testdata/conformance.txt | 206 +++++++++++----------- test/conformance/walk.go | 15 +- test/conformance/walk_test.go | 22 +++ 3 files changed, 139 insertions(+), 104 deletions(-) diff --git a/test/conformance/testdata/conformance.txt b/test/conformance/testdata/conformance.txt index 7c2fb28ba8f..9411ea594f1 100755 --- a/test/conformance/testdata/conformance.txt +++ b/test/conformance/testdata/conformance.txt @@ -1,4 +1,4 @@ -test/e2e/apimachinery/custom_resource_definition.go: "creating/deleting custom resource definition objects works " +test/e2e/apimachinery/custom_resource_definition.go: "creating/deleting custom resource definition objects works" test/e2e/apimachinery/garbage_collector.go: "should delete pods created by rc when not orphaning" test/e2e/apimachinery/garbage_collector.go: "should orphan pods created by rc if delete options say so" test/e2e/apimachinery/garbage_collector.go: "should delete RS created by deployment when not orphaning" @@ -11,53 +11,53 @@ test/e2e/apps/daemon_set.go: "should run and stop complex daemon" test/e2e/apps/daemon_set.go: "should retry creating failed daemon pods" test/e2e/apps/daemon_set.go: "should update pod when spec was updated and update strategy is RollingUpdate" test/e2e/apps/daemon_set.go: "should rollback without unnecessary restarts" -test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image " -test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image " +test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image" +test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image" test/e2e/apps/statefulset.go: "should perform rolling updates and roll backs of template modifications" test/e2e/apps/statefulset.go: "should perform canary updates and phased rolling updates of template modifications" test/e2e/apps/statefulset.go: "Scaling should happen in predictable order and halt if any stateful pod is unhealthy" test/e2e/apps/statefulset.go: "Burst scaling should run to completion even with unhealthy pods" test/e2e/apps/statefulset.go: "Should recreate evicted statefulset" -test/e2e/auth/service_accounts.go: "should mount an API token into pods " -test/e2e/auth/service_accounts.go: "should allow opting out of API token automount " -test/e2e/common/configmap.go: "should be consumable via environment variable " -test/e2e/common/configmap.go: "should be consumable via the environment " -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume " -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with defaultMode set " -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume as non-root " -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings " +test/e2e/auth/service_accounts.go: "should mount an API token into pods" +test/e2e/auth/service_accounts.go: "should allow opting out of API token automount" +test/e2e/common/configmap.go: "should be consumable via environment variable" +test/e2e/common/configmap.go: "should be consumable via the environment" +test/e2e/common/configmap_volume.go: "should be consumable from pods in volume" +test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with defaultMode set" +test/e2e/common/configmap_volume.go: "should be consumable from pods in volume as non-root" +test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings" test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings and Item mode set" -test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root " -test/e2e/common/configmap_volume.go: "updates should be reflected in volume " -test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume " -test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod " -test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart " -test/e2e/common/container_probe.go: "with readiness probe that fails should never be ready and never restart " -test/e2e/common/container_probe.go: "should be restarted with a exec \"cat /tmp/health\" liveness probe" -test/e2e/common/container_probe.go: "should *not* be restarted with a exec \"cat /tmp/health\" liveness probe" -test/e2e/common/container_probe.go: "should be restarted with a /healthz http liveness probe " -test/e2e/common/container_probe.go: "should have monotonically increasing restart count [Slow]" -test/e2e/common/container_probe.go: "should *not* be restarted with a /healthz http liveness probe " -test/e2e/common/docker_containers.go: "should use the image defaults if command and args are blank " -test/e2e/common/docker_containers.go: "should be able to override the image's default arguments (docker cmd) " -test/e2e/common/docker_containers.go: "should be able to override the image's default command (docker entrypoint) " -test/e2e/common/docker_containers.go: "should be able to override the image's default command and arguments " -test/e2e/common/downward_api.go: "should provide pod name, namespace and IP address as env vars " -test/e2e/common/downward_api.go: "should provide host IP as an env var " -test/e2e/common/downward_api.go: "should provide container's limits.cpu/memory and requests.cpu/memory as env vars " -test/e2e/common/downward_api.go: "should provide default limits.cpu/memory from node allocatable " -test/e2e/common/downward_api.go: "should provide pod UID as env vars " -test/e2e/common/downwardapi_volume.go: "should provide podname only " -test/e2e/common/downwardapi_volume.go: "should set DefaultMode on files " -test/e2e/common/downwardapi_volume.go: "should set mode on item file " -test/e2e/common/downwardapi_volume.go: "should update labels on modification " -test/e2e/common/downwardapi_volume.go: "should update annotations on modification " -test/e2e/common/downwardapi_volume.go: "should provide container's cpu limit " -test/e2e/common/downwardapi_volume.go: "should provide container's memory limit " -test/e2e/common/downwardapi_volume.go: "should provide container's cpu request " -test/e2e/common/downwardapi_volume.go: "should provide container's memory request " -test/e2e/common/downwardapi_volume.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set " -test/e2e/common/downwardapi_volume.go: "should provide node allocatable (memory) as default memory limit if the limit is not set " +test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root" +test/e2e/common/configmap_volume.go: "updates should be reflected in volume" +test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume" +test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod" +test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart" +test/e2e/common/container_probe.go: "with readiness probe that fails should never be ready and never restart" +test/e2e/common/container_probe.go: "should be restarted with a exec \\\"cat /tmp/health\\\" liveness probe" +test/e2e/common/container_probe.go: "should *not* be restarted with a exec \\\"cat /tmp/health\\\" liveness probe" +test/e2e/common/container_probe.go: "should be restarted with a /healthz http liveness probe" +test/e2e/common/container_probe.go: "should have monotonically increasing restart count" +test/e2e/common/container_probe.go: "should *not* be restarted with a /healthz http liveness probe" +test/e2e/common/docker_containers.go: "should use the image defaults if command and args are blank" +test/e2e/common/docker_containers.go: "should be able to override the image's default arguments (docker cmd)" +test/e2e/common/docker_containers.go: "should be able to override the image's default command (docker entrypoint)" +test/e2e/common/docker_containers.go: "should be able to override the image's default command and arguments" +test/e2e/common/downward_api.go: "should provide pod name, namespace and IP address as env vars" +test/e2e/common/downward_api.go: "should provide host IP as an env var" +test/e2e/common/downward_api.go: "should provide container's limits.cpu/memory and requests.cpu/memory as env vars" +test/e2e/common/downward_api.go: "should provide default limits.cpu/memory from node allocatable" +test/e2e/common/downward_api.go: "should provide pod UID as env vars" +test/e2e/common/downwardapi_volume.go: "should provide podname only" +test/e2e/common/downwardapi_volume.go: "should set DefaultMode on files" +test/e2e/common/downwardapi_volume.go: "should set mode on item file" +test/e2e/common/downwardapi_volume.go: "should update labels on modification" +test/e2e/common/downwardapi_volume.go: "should update annotations on modification" +test/e2e/common/downwardapi_volume.go: "should provide container's cpu limit" +test/e2e/common/downwardapi_volume.go: "should provide container's memory limit" +test/e2e/common/downwardapi_volume.go: "should provide container's cpu request" +test/e2e/common/downwardapi_volume.go: "should provide container's memory request" +test/e2e/common/downwardapi_volume.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set" +test/e2e/common/downwardapi_volume.go: "should provide node allocatable (memory) as default memory limit if the limit is not set" test/e2e/common/empty_dir.go: "volume on tmpfs should have the correct mode" test/e2e/common/empty_dir.go: "should support (root,0644,tmpfs)" test/e2e/common/empty_dir.go: "should support (root,0666,tmpfs)" @@ -72,20 +72,20 @@ test/e2e/common/empty_dir.go: "should support (root,0777,default)" test/e2e/common/empty_dir.go: "should support (non-root,0644,default)" test/e2e/common/empty_dir.go: "should support (non-root,0666,default)" test/e2e/common/empty_dir.go: "should support (non-root,0777,default)" -test/e2e/common/expansion.go: "should allow composing env vars into new env vars " -test/e2e/common/expansion.go: "should allow substituting values in a container's command " -test/e2e/common/expansion.go: "should allow substituting values in a container's args " +test/e2e/common/expansion.go: "should allow composing env vars into new env vars" +test/e2e/common/expansion.go: "should allow substituting values in a container's command" +test/e2e/common/expansion.go: "should allow substituting values in a container's args" test/e2e/common/host_path.go: "should give a volume the correct mode" -test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file " -test/e2e/common/networking.go: "should function for intra-pod communication: http " -test/e2e/common/networking.go: "should function for intra-pod communication: udp " -test/e2e/common/networking.go: "should function for node-pod communication: http " -test/e2e/common/networking.go: "should function for node-pod communication: udp " -test/e2e/common/pods.go: "should get a host IP " -test/e2e/common/pods.go: "should be submitted and removed " -test/e2e/common/pods.go: "should be updated " -test/e2e/common/pods.go: "should allow activeDeadlineSeconds to be updated " -test/e2e/common/pods.go: "should contain environment variables for services " +test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file" +test/e2e/common/networking.go: "should function for intra-pod communication: http" +test/e2e/common/networking.go: "should function for intra-pod communication: udp" +test/e2e/common/networking.go: "should function for node-pod communication: http" +test/e2e/common/networking.go: "should function for node-pod communication: udp" +test/e2e/common/pods.go: "should get a host IP" +test/e2e/common/pods.go: "should be submitted and removed" +test/e2e/common/pods.go: "should be updated" +test/e2e/common/pods.go: "should allow activeDeadlineSeconds to be updated" +test/e2e/common/pods.go: "should contain environment variables for services" test/e2e/common/projected.go: "should be consumable from pods in volume" test/e2e/common/projected.go: "should be consumable from pods in volume with defaultMode set" test/e2e/common/projected.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set" @@ -113,54 +113,54 @@ test/e2e/common/projected.go: "should provide container's cpu request" test/e2e/common/projected.go: "should provide container's memory request" test/e2e/common/projected.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set" test/e2e/common/projected.go: "should provide node allocatable (memory) as default memory limit if the limit is not set" -test/e2e/common/projected.go: "should project all components that make up the projection API [Projection]" -test/e2e/common/secrets.go: "should be consumable from pods in env vars " -test/e2e/common/secrets.go: "should be consumable via the environment " -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume " -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with defaultMode set " -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set " -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings " -test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set " -test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod " -test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume " -test/e2e/kubectl/kubectl.go: "should create and stop a replication controller " -test/e2e/kubectl/kubectl.go: "should scale a replication controller " -test/e2e/kubectl/kubectl.go: "should do a rolling update of a replication controller " -test/e2e/kubectl/kubectl.go: "should create and stop a working application " -test/e2e/kubectl/kubectl.go: "should check if v1 is in available api versions " -test/e2e/kubectl/kubectl.go: "should check if Kubernetes master services is included in cluster-info " -test/e2e/kubectl/kubectl.go: "should check if kubectl describe prints relevant information for rc and pods " -test/e2e/kubectl/kubectl.go: "should create services for rc " -test/e2e/kubectl/kubectl.go: "should update the label on a resource " -test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs " -test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc " -test/e2e/kubectl/kubectl.go: "should check is all data is printed " -test/e2e/kubectl/kubectl.go: "should create an rc or deployment from an image " -test/e2e/kubectl/kubectl.go: "should create an rc from an image " -test/e2e/kubectl/kubectl.go: "should support rolling-update to same image " -test/e2e/kubectl/kubectl.go: "should create a deployment from an image " -test/e2e/kubectl/kubectl.go: "should create a job from an image when restart is OnFailure " -test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never " -test/e2e/kubectl/kubectl.go: "should update a single-container pod's image " -test/e2e/kubectl/kubectl.go: "should create a job from an image, then delete the job " -test/e2e/kubectl/kubectl.go: "should support proxy with --port 0 " -test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path " -test/e2e/network/dns.go: "should provide DNS for the cluster " -test/e2e/network/dns.go: "should provide DNS for services " -test/e2e/network/proxy.go: "should proxy logs on node with explicit kubelet port using proxy subresource " -test/e2e/network/proxy.go: "should proxy logs on node using proxy subresource " -test/e2e/network/proxy.go: "should proxy through a service and a pod " -test/e2e/network/service.go: "should provide secure master service " -test/e2e/network/service.go: "should serve a basic endpoint from pods " -test/e2e/network/service.go: "should serve multiport endpoints from pods " -test/e2e/network/service_latency.go: "should not be very high " -test/e2e/node/events.go: "should be sent by kubelets and the scheduler about pods scheduling and running " -test/e2e/node/pods.go: "should be submitted and removed [Flaky]" -test/e2e/node/pods.go: "should be submitted and removed " -test/e2e/node/pre_stop.go: "should call prestop when killing a pod " -test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run " -test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching " -test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching " +test/e2e/common/projected.go: "should project all components that make up the projection API" +test/e2e/common/secrets.go: "should be consumable from pods in env vars" +test/e2e/common/secrets.go: "should be consumable via the environment" +test/e2e/common/secrets_volume.go: "should be consumable from pods in volume" +test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with defaultMode set" +test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set" +test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings" +test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set" +test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod" +test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume" +test/e2e/kubectl/kubectl.go: "should create and stop a replication controller" +test/e2e/kubectl/kubectl.go: "should scale a replication controller" +test/e2e/kubectl/kubectl.go: "should do a rolling update of a replication controller" +test/e2e/kubectl/kubectl.go: "should create and stop a working application" +test/e2e/kubectl/kubectl.go: "should check if v1 is in available api versions" +test/e2e/kubectl/kubectl.go: "should check if Kubernetes master services is included in cluster-info" +test/e2e/kubectl/kubectl.go: "should check if kubectl describe prints relevant information for rc and pods" +test/e2e/kubectl/kubectl.go: "should create services for rc" +test/e2e/kubectl/kubectl.go: "should update the label on a resource" +test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs" +test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc" +test/e2e/kubectl/kubectl.go: "should check is all data is printed" +test/e2e/kubectl/kubectl.go: "should create an rc or deployment from an image" +test/e2e/kubectl/kubectl.go: "should create an rc from an image" +test/e2e/kubectl/kubectl.go: "should support rolling-update to same image" +test/e2e/kubectl/kubectl.go: "should create a deployment from an image" +test/e2e/kubectl/kubectl.go: "should create a job from an image when restart is OnFailure" +test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never" +test/e2e/kubectl/kubectl.go: "should update a single-container pod's image" +test/e2e/kubectl/kubectl.go: "should create a job from an image, then delete the job" +test/e2e/kubectl/kubectl.go: "should support proxy with --port 0" +test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path" +test/e2e/network/dns.go: "should provide DNS for the cluster" +test/e2e/network/dns.go: "should provide DNS for services" +test/e2e/network/proxy.go: "should proxy logs on node with explicit kubelet port using proxy subresource" +test/e2e/network/proxy.go: "should proxy logs on node using proxy subresource" +test/e2e/network/proxy.go: "should proxy through a service and a pod" +test/e2e/network/service.go: "should provide secure master service" +test/e2e/network/service.go: "should serve a basic endpoint from pods" +test/e2e/network/service.go: "should serve multiport endpoints from pods" +test/e2e/network/service_latency.go: "should not be very high" +test/e2e/node/events.go: "should be sent by kubelets and the scheduler about pods scheduling and running" +test/e2e/node/pods.go: "should be submitted and removed" +test/e2e/node/pods.go: "should be submitted and removed" +test/e2e/node/pre_stop.go: "should call prestop when killing a pod" +test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run" +test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching" +test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching" test/e2e_node/kubelet_test.go: "it should print the output to logs" test/e2e_node/kubelet_test.go: "it should not write to root filesystem" test/e2e_node/lifecycle_hook_test.go: "should execute poststart exec hook properly" diff --git a/test/conformance/walk.go b/test/conformance/walk.go index a4bd553ce83..0cabab981fe 100644 --- a/test/conformance/walk.go +++ b/test/conformance/walk.go @@ -176,10 +176,11 @@ func (v *visitor) emit(arg ast.Expr) { return } + at.Value = normalizeTestName(at.Value) if *confDoc { v.convertToConformanceData(at) } else { - fmt.Printf("%s: %s\n", v.FileSet.Position(at.Pos()).Filename, at.Value) + fmt.Printf("%s: %q\n", v.FileSet.Position(at.Pos()).Filename, at.Value) } default: v.failf(at, "framework.ConformanceIt() called with non-literal argument") @@ -197,6 +198,18 @@ func (v *visitor) getDescription(value string) string { " " + strings.Trim(value, "\"") } +var ( + regexTag = regexp.MustCompile(`(\[[a-zA-Z0-9:-]+\])`) +) + +// normalizeTestName removes tags (e.g., [Feature:Foo]), double quotes and trim +// the spaces to normalize the test name. +func normalizeTestName(s string) string { + r := regexTag.ReplaceAllString(s, "") + r = strings.Trim(r, "\"") + return strings.TrimSpace(r) +} + // funcName converts a selectorExpr with two idents into a string, // x.y -> "x.y" func funcName(n ast.Expr) string { diff --git a/test/conformance/walk_test.go b/test/conformance/walk_test.go index 8fcb8c4a156..c4052b8e5af 100644 --- a/test/conformance/walk_test.go +++ b/test/conformance/walk_test.go @@ -93,3 +93,25 @@ func TestConformance(t *testing.T) { } } } + +func TestNormalizeTestNames(t *testing.T) { + testCases := []struct { + rawName string + normalizedName string + }{ + { + "should have monotonically increasing restart count [Slow]", + "should have monotonically increasing restart count", + }, + { + " should check is all data is printed ", + "should check is all data is printed", + }, + } + for i, tc := range testCases { + actualName := normalizeTestName(tc.rawName) + if actualName != tc.normalizedName { + t.Errorf("test case[%d]: expected normalized name %q, got %q", i, tc.normalizedName, actualName) + } + } +} From efc408944c1eba5e2709df8f5fd01971f18f989e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 23 May 2018 23:51:49 +0300 Subject: [PATCH 147/307] kubeadm: Improve the kubelet default configuration security-wise --- cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 1 + cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go | 9 +++++---- cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go | 9 +++++---- .../util/config/testdata/conversion/master/internal.yaml | 1 + .../util/config/testdata/conversion/master/v1alpha2.yaml | 1 + .../config/testdata/defaulting/master/defaulted.yaml | 1 + 6 files changed, 14 insertions(+), 8 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index 3bd46500aa1..54cb51b5263 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -79,6 +79,7 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { Enabled: utilpointer.BoolPtr(false), }, }, + RotateCertificates: true, }, } kubeletconfigv1beta1.SetDefaults_KubeletConfiguration(obj.KubeletConfiguration.BaseConfig) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go index 9b2b499f6cc..c0439cbc07a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go @@ -247,12 +247,13 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook // Let clients using other authentication methods like ServiceAccount tokens also access the kubelet API - // TODO: Enable in a future PR - // obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) + obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) // Disable the readonly port of the kubelet, in order to not expose unnecessary information - // TODO: Enable in a future PR - // obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 + obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 + + // Enables client certificate rotation for the kubelet + obj.KubeletConfiguration.BaseConfig.RotateCertificates = true // Serve a /healthz webserver on localhost:10248 that kubeadm can talk to obj.KubeletConfiguration.BaseConfig.HealthzBindAddress = "127.0.0.1" diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go index 946d3f2e6e8..79782b841a1 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go @@ -210,12 +210,13 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook // Let clients using other authentication methods like ServiceAccount tokens also access the kubelet API - // TODO: Enable in a future PR - // obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) + obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) // Disable the readonly port of the kubelet, in order to not expose unnecessary information - // TODO: Enable in a future PR - // obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 + obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 + + // Enables client certificate rotation for the kubelet + obj.KubeletConfiguration.BaseConfig.RotateCertificates = true // Serve a /healthz webserver on localhost:10248 that kubeadm can talk to obj.KubeletConfiguration.BaseConfig.HealthzBindAddress = "127.0.0.1" diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index aa8c9942bbb..c4b757ab5af 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -130,6 +130,7 @@ KubeletConfiguration: registryBurst: 10 registryPullQPS: 5 resolvConf: /etc/resolv.conf + rotateCertificates: true runtimeRequestTimeout: 2m0s serializeImagePulls: true staticPodPath: /etc/kubernetes/manifests diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml index de6b2724910..7587218a3e7 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml @@ -123,6 +123,7 @@ kubeletConfiguration: registryBurst: 10 registryPullQPS: 5 resolvConf: /etc/resolv.conf + rotateCertificates: true runtimeRequestTimeout: 2m0s serializeImagePulls: true staticPodPath: /etc/kubernetes/manifests diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index a852a56a357..6d7d199da63 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -118,6 +118,7 @@ kubeletConfiguration: registryBurst: 10 registryPullQPS: 5 resolvConf: /etc/resolv.conf + rotateCertificates: true runtimeRequestTimeout: 2m0s serializeImagePulls: true staticPodPath: /etc/kubernetes/manifests From 0a95581de08a9d83d803fbf760a2912272c71d46 Mon Sep 17 00:00:00 2001 From: Tim Wilfong Date: Wed, 23 May 2018 13:54:20 -0700 Subject: [PATCH 148/307] Update function hasClusterTag to fix issue #64230 Fixes issue #64230, by changing function hasClusterTag, in aws/tags.go, to ensure that a list of tags containing a tag with a key which matches clusterTagKey will return true even if a TagNameKubernetesClusterLegacy tag also exists in the list with a value other than the ClusterID. /sig aws --- pkg/cloudprovider/providers/aws/tags.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/tags.go b/pkg/cloudprovider/providers/aws/tags.go index 40947cb379f..2a293e7271f 100644 --- a/pkg/cloudprovider/providers/aws/tags.go +++ b/pkg/cloudprovider/providers/aws/tags.go @@ -137,14 +137,14 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool { clusterTagKey := t.clusterTagKey() for _, tag := range tags { tagKey := aws.StringValue(tag.Key) + // Check if this is a newer-style cluster tag before checking if legacy tag value matches ClusterID + if tagKey == clusterTagKey { + return true + } // For 1.6, we continue to recognize the legacy tags, for the 1.5 -> 1.6 upgrade if tagKey == TagNameKubernetesClusterLegacy { return aws.StringValue(tag.Value) == t.ClusterID } - - if tagKey == clusterTagKey { - return true - } } return false } From fd1f19fc423880b2b292d6f9e6fca1e941e87994 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Wed, 23 May 2018 16:12:54 -0700 Subject: [PATCH 149/307] add metadata to kubelet eviction event annotations --- pkg/controller/testutil/test_utils.go | 5 + pkg/kubelet/container/helpers.go | 7 ++ pkg/kubelet/eviction/eviction_manager.go | 13 +-- pkg/kubelet/eviction/helpers.go | 24 ++++- .../k8s.io/client-go/tools/record/event.go | 22 +++-- .../src/k8s.io/client-go/tools/record/fake.go | 4 + test/e2e_node/BUILD | 1 + test/e2e_node/eviction_test.go | 96 ++++++++++++++++--- 8 files changed, 140 insertions(+), 32 deletions(-) diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index 770df607c41..02119191028 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -365,6 +365,11 @@ func (f *FakeRecorder) Eventf(obj runtime.Object, eventtype, reason, messageFmt func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { } +// AnnotatedEventf emits a fake formatted event to the fake recorder +func (f *FakeRecorder) AnnotatedEventf(obj runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(obj, eventtype, reason, messageFmt, args) +} + func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, eventtype, reason, message string) { f.Lock() defer f.Unlock() diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 180a3e6df2d..399fa959f45 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -193,6 +193,13 @@ func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp } } +func (irecorder *innerEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + if ref, ok := irecorder.shouldRecordEvent(object); ok { + irecorder.recorder.AnnotatedEventf(ref, annotations, eventtype, reason, messageFmt, args...) + } + +} + // Pod must not be nil. func IsHostNetworkPod(pod *v1.Pod) bool { return pod.Spec.HostNetwork diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index b601fe4763d..cadc6afaa6c 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -429,7 +429,8 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act if !isHardEvictionThreshold(thresholdToReclaim) { gracePeriodOverride = m.config.MaxPodGracePeriodSeconds } - if m.evictPod(pod, gracePeriodOverride, evictionMessage(resourceToReclaim, pod, statsFunc)) { + message, annotations := evictionMessage(resourceToReclaim, pod, statsFunc) + if m.evictPod(pod, gracePeriodOverride, message, annotations) { return []*v1.Pod{pod} } } @@ -534,7 +535,7 @@ func (m *managerImpl) emptyDirLimitEviction(podStats statsapi.PodStats, pod *v1. used := podVolumeUsed[pod.Spec.Volumes[i].Name] if used != nil && size != nil && size.Sign() == 1 && used.Cmp(*size) > 0 { // the emptyDir usage exceeds the size limit, evict the pod - return m.evictPod(pod, 0, fmt.Sprintf(emptyDirMessage, pod.Spec.Volumes[i].Name, size.String())) + return m.evictPod(pod, 0, fmt.Sprintf(emptyDirMessage, pod.Spec.Volumes[i].Name, size.String()), nil) } } } @@ -566,7 +567,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat podEphemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage] if podEphemeralStorageTotalUsage.Cmp(podEphemeralStorageLimit) > 0 { // the total usage of pod exceeds the total size limit of containers, evict the pod - return m.evictPod(pod, 0, fmt.Sprintf(podEphemeralStorageMessage, podEphemeralStorageLimit.String())) + return m.evictPod(pod, 0, fmt.Sprintf(podEphemeralStorageMessage, podEphemeralStorageLimit.String()), nil) } return false } @@ -588,7 +589,7 @@ func (m *managerImpl) containerEphemeralStorageLimitEviction(podStats statsapi.P if ephemeralStorageThreshold, ok := thresholdsMap[containerStat.Name]; ok { if ephemeralStorageThreshold.Cmp(*containerUsed) < 0 { - return m.evictPod(pod, 0, fmt.Sprintf(containerEphemeralStorageMessage, containerStat.Name, ephemeralStorageThreshold.String())) + return m.evictPod(pod, 0, fmt.Sprintf(containerEphemeralStorageMessage, containerStat.Name, ephemeralStorageThreshold.String()), nil) } } @@ -596,7 +597,7 @@ func (m *managerImpl) containerEphemeralStorageLimitEviction(podStats statsapi.P return false } -func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg string) bool { +func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg string, annotations map[string]string) bool { // If the pod is marked as critical and static, and support for critical pod annotations is enabled, // do not evict such pods. Static pods are not re-admitted after evictions. // https://github.com/kubernetes/kubernetes/issues/40573 has more details. @@ -611,7 +612,7 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg Reason: Reason, } // record that we are evicting the pod - m.recorder.Eventf(pod, v1.EventTypeWarning, Reason, evictMsg) + m.recorder.AnnotatedEventf(pod, annotations, v1.EventTypeWarning, Reason, evictMsg) // this is a blocking call and should only return when the pod and its containers are killed. err := m.killPodFunc(pod, status, &gracePeriodOverride) if err != nil { diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index fd862ea1cef..1c309360a39 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -55,6 +55,12 @@ const ( // this prevents constantly updating the memcg notifier if synchronize // is run frequently. notifierRefreshInterval = 10 * time.Second + // OffendingContainersKey is the key in eviction event annotations for the list of container names which exceeded their requests + OffendingContainersKey = "offending_containers" + // OffendingContainersUsageKey is the key in eviction event annotations for the list of usage of containers which exceeded their requests + OffendingContainersUsageKey = "offending_containers_usage" + // StarvedResourceKey is the key for the starved resource in eviction event annotations + StarvedResourceKey = "starved_resource" ) var ( @@ -1053,12 +1059,15 @@ func buildSignalToNodeReclaimFuncs(imageGC ImageGC, containerGC ContainerGC, wit return signalToReclaimFunc } -// evictionMessage constructs a useful message about why an eviction occurred -func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats statsFunc) string { - message := fmt.Sprintf(message, resourceToReclaim) +// evictionMessage constructs a useful message about why an eviction occurred, and annotations to provide metadata about the eviction +func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats statsFunc) (message string, annotations map[string]string) { + annotations = make(map[string]string) + message = fmt.Sprintf(message, resourceToReclaim) + containers := []string{} + containerUsage := []string{} podStats, ok := stats(pod) if !ok { - return message + return } for _, containerStats := range podStats.Containers { for _, container := range pod.Spec.Containers { @@ -1077,11 +1086,16 @@ func evictionMessage(resourceToReclaim v1.ResourceName, pod *v1.Pod, stats stats } if usage != nil && usage.Cmp(requests) > 0 { message += fmt.Sprintf(containerMessage, container.Name, usage.String(), requests.String()) + containers = append(containers, container.Name) + containerUsage = append(containerUsage, usage.String()) } } } } - return message + annotations[OffendingContainersKey] = strings.Join(containers, ",") + annotations[OffendingContainersUsageKey] = strings.Join(containerUsage, ",") + annotations[StarvedResourceKey] = string(resourceToReclaim) + return } // thresholdStopCh is a ThresholdStopCh which can only be closed after notifierRefreshInterval time has passed diff --git a/staging/src/k8s.io/client-go/tools/record/event.go b/staging/src/k8s.io/client-go/tools/record/event.go index cc665d74e63..168dfa80c56 100644 --- a/staging/src/k8s.io/client-go/tools/record/event.go +++ b/staging/src/k8s.io/client-go/tools/record/event.go @@ -72,6 +72,9 @@ type EventRecorder interface { // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) + + // AnnotatedEventf is just like eventf, but with annotations attached + AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) } // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. @@ -250,7 +253,7 @@ type recorderImpl struct { clock clock.Clock } -func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp metav1.Time, eventtype, reason, message string) { +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) @@ -262,7 +265,7 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp met return } - event := recorder.makeEvent(ref, eventtype, reason, message) + event := recorder.makeEvent(ref, annotations, eventtype, reason, message) event.Source = recorder.source go func() { @@ -281,7 +284,7 @@ func validateEventType(eventtype string) bool { } func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, metav1.Now(), eventtype, reason, message) + recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) } func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { @@ -289,10 +292,14 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m } func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) + recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) } -func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event { +func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { t := metav1.Time{Time: recorder.clock.Now()} namespace := ref.Namespace if namespace == "" { @@ -300,8 +307,9 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, eventtype, reas } return &v1.Event{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), - Namespace: namespace, + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + Annotations: annotations, }, InvolvedObject: *ref, Reason: reason, diff --git a/staging/src/k8s.io/client-go/tools/record/fake.go b/staging/src/k8s.io/client-go/tools/record/fake.go index c0e8eedbb73..6e031daaff8 100644 --- a/staging/src/k8s.io/client-go/tools/record/fake.go +++ b/staging/src/k8s.io/client-go/tools/record/fake.go @@ -45,6 +45,10 @@ func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageF func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { } +func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(object, eventtype, reason, messageFmt, args) +} + // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 9113fa745d9..859fa688bc3 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -152,6 +152,7 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 21962c62f31..3e3a55d792a 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -20,11 +20,13 @@ import ( "fmt" "path/filepath" "strconv" + "strings" "time" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" nodeutil "k8s.io/kubernetes/pkg/api/v1/node" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" @@ -50,11 +52,13 @@ const ( // pressure conditions often surface after evictions because the kubelet only updates // node conditions periodically. // we wait this period after evictions to make sure that we wait out this delay - pressureDelay = 20 * time.Second - testContextFmt = "when we run containers that should cause %s" - noPressure = v1.NodeConditionType("NoPressure") - lotsOfDisk = 10240 // 10 Gb in Mb - lotsOfFiles = 1000000000 // 1 billion + pressureDelay = 20 * time.Second + testContextFmt = "when we run containers that should cause %s" + noPressure = v1.NodeConditionType("NoPressure") + lotsOfDisk = 10240 // 10 Gb in Mb + lotsOfFiles = 1000000000 // 1 billion + resourceInodes = v1.ResourceName("inodes") + noStarvedResource = v1.ResourceName("none") ) // InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods. @@ -62,6 +66,7 @@ const ( var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("inode-eviction-test") expectedNodeCondition := v1.NodeDiskPressure + expectedStarvedResource := resourceInodes pressureTimeout := 15 * time.Minute inodesConsumed := uint64(200000) Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { @@ -75,7 +80,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun initialConfig.EvictionHard = map[string]string{"nodefs.inodesFree": fmt.Sprintf("%d", inodesFree-inodesConsumed)} initialConfig.EvictionMinimumReclaim = map[string]string{} }) - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logInodeMetrics, []podEvictSpec{ + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logInodeMetrics, []podEvictSpec{ { evictionPriority: 1, pod: inodeConsumingPod("container-inode-hog", lotsOfFiles, nil), @@ -98,6 +103,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", f := framework.NewDefaultFramework("image-gc-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure + expectedStarvedResource := resourceInodes inodesConsumed := uint64(100000) Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { @@ -112,7 +118,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", }) // Consume enough inodes to induce disk pressure, // but expect that image garbage collection can reduce it enough to avoid an eviction - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{ + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{ { evictionPriority: 0, pod: inodeConsumingPod("container-inode", 110000, nil), @@ -126,6 +132,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("memory-allocatable-eviction-test") expectedNodeCondition := v1.NodeMemoryPressure + expectedStarvedResource := v1.ResourceMemory pressureTimeout := 10 * time.Minute Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { @@ -140,7 +147,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru initialConfig.EnforceNodeAllocatable = []string{kubetypes.NodeAllocatableEnforcementKey} initialConfig.CgroupsPerQOS = true }) - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{ + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, []podEvictSpec{ { evictionPriority: 1, pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}), @@ -159,6 +166,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure + expectedStarvedResource := v1.ResourceEphemeralStorage Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { diskConsumed := resource.MustParse("100Mi") @@ -167,7 +175,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive initialConfig.EvictionHard = map[string]string{"nodefs.available": fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))} initialConfig.EvictionMinimumReclaim = map[string]string{} }) - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{ + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{ { evictionPriority: 1, pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}), @@ -187,6 +195,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure + expectedStarvedResource := v1.ResourceEphemeralStorage Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { diskConsumed := resource.MustParse("100Mi") @@ -204,7 +213,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup // setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty) initialConfig.EvictionHard = map[string]string{"memory.available": "0%"} }) - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{ + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{ { evictionPriority: 1, pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}), @@ -232,7 +241,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se useUnderLimit := 99 /* Mb */ containerLimit := v1.ResourceList{v1.ResourceEphemeralStorage: sizeLimit} - runEvictionTest(f, evictionTestTimeout, noPressure, logDiskMetrics, []podEvictSpec{ + runEvictionTest(f, evictionTestTimeout, noPressure, noStarvedResource, logDiskMetrics, []podEvictSpec{ { evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{ @@ -274,6 +283,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") expectedNodeCondition := v1.NodeMemoryPressure + expectedStarvedResource := v1.ResourceMemory pressureTimeout := 10 * time.Minute Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { @@ -310,7 +320,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ } systemPriority := int32(2147483647) specs[1].pod.Spec.Priority = &systemPriority - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, specs) + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logMemoryMetrics, specs) }) }) @@ -320,6 +330,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() { f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") expectedNodeCondition := v1.NodeDiskPressure + expectedStarvedResource := v1.ResourceEphemeralStorage pressureTimeout := 10 * time.Minute Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { @@ -358,7 +369,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser } systemPriority := int32(2147483647) specs[1].pod.Spec.Priority = &systemPriority - runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, specs) + runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, specs) }) }) @@ -377,7 +388,7 @@ type podEvictSpec struct { // It ensures that lower evictionPriority pods are always evicted before higher evictionPriority pods (2 evicted before 1, etc.) // It ensures that all pods with non-zero evictionPriority are eventually evicted. // runEvictionTest then cleans up the testing environment by deleting provided pods, and ensures that expectedNodeCondition no longer exists -func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, logFunc func(), testSpecs []podEvictSpec) { +func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expectedNodeCondition v1.NodeConditionType, expectedStarvedResource v1.ResourceName, logFunc func(), testSpecs []podEvictSpec) { // Place the remainder of the test within a context so that the kubelet config is set before and after the test. Context("", func() { BeforeEach(func() { @@ -442,6 +453,9 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) return verifyEvictionOrdering(f, testSpecs) }, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil()) + + By("checking for correctly formatted eviction events") + verifyEvictionEvents(f, testSpecs, expectedStarvedResource) }) AfterEach(func() { @@ -549,6 +563,60 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er return fmt.Errorf("pods that should be evicted are still running") } +func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expectedStarvedResource v1.ResourceName) { + for _, spec := range testSpecs { + pod := spec.pod + if spec.evictionPriority != 0 { + selector := fields.Set{ + "involvedObject.kind": "Pod", + "involvedObject.name": pod.Name, + "involvedObject.namespace": f.Namespace.Name, + "reason": eviction.Reason, + }.AsSelector().String() + podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector}) + Expect(err).To(BeNil(), "Unexpected error getting events during eviction test: %v", err) + Expect(len(podEvictEvents.Items)).To(Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items)) + event := podEvictEvents.Items[0] + + if expectedStarvedResource != noStarvedResource { + // Check the eviction.StarvedResourceKey + starved, found := event.Annotations[eviction.StarvedResourceKey] + Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found", + pod.Name, expectedStarvedResource) + starvedResource := v1.ResourceName(starved) + Expect(starvedResource).To(Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead", + pod.Name, expectedStarvedResource, starvedResource) + + // We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present + if expectedStarvedResource == v1.ResourceMemory { + // Check the eviction.OffendingContainersKey + offendersString, found := event.Annotations[eviction.OffendingContainersKey] + Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found", + pod.Name) + offendingContainers := strings.Split(offendersString, ",") + Expect(len(offendingContainers)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found", + eviction.OffendingContainersKey) + Expect(offendingContainers[0]).To(Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead", + pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0]) + + // Check the eviction.OffendingContainersUsageKey + offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey] + Expect(found).To(BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found", + pod.Name) + offendingContainersUsage := strings.Split(offendingUsageString, ",") + Expect(len(offendingContainersUsage)).To(Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v", + eviction.OffendingContainersUsageKey, offendingContainersUsage) + usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0]) + Expect(err).To(BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err) + request := pod.Spec.Containers[0].Resources.Requests[starvedResource] + Expect(usageQuantity.Cmp(request)).To(Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s", + usageQuantity.String(), pod.Name, request.String()) + } + } + } + } +} + // Returns TRUE if the node has the node condition, FALSE otherwise func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeConditionType) bool { localNodeStatus := getLocalNode(f).Status From ddbd9d20cbaf3636ad36700422f9f6f32befc9da Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Wed, 23 May 2018 14:47:34 +0800 Subject: [PATCH 150/307] fix kubectl set subject --all option invalid bug --- hack/make-rules/test-cmd-util.sh | 12 ++++++++++++ pkg/kubectl/cmd/set/set_subject.go | 4 +++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index c2d6e8392b2..760a2a446c0 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -3828,6 +3828,12 @@ run_clusterroles_tests() { kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:' kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:' + # test `kubectl set subject clusterrolebinding --all` + kubectl set subject "${kube_flags[@]}" clusterrolebinding --all --user=test-all-user + kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:test-all-user:' + kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:' + kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:' + # test `kubectl create rolebinding` # test `kubectl set subject rolebinding` kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin @@ -3849,6 +3855,12 @@ run_clusterroles_tests() { kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:' kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:' + # test `kubectl set subject rolebinding --all` + kubectl set subject "${kube_flags[@]}" rolebinding --all --user=test-all-user + kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:test-all-user:' + kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:' + kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:' + set +o nounset set +o errexit } diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index 10fd4031e10..eb0143a1911 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -262,7 +262,9 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { continue } - return o.PrintObj(actual, o.Out) + if err := o.PrintObj(actual, o.Out); err != nil { + allErrs = append(allErrs, err) + } } return utilerrors.NewAggregate(allErrs) } From b020a4ba8c84f6a2817529a77e1c1d3bafeb705e Mon Sep 17 00:00:00 2001 From: WanLinghao Date: Thu, 24 May 2018 09:48:24 +0800 Subject: [PATCH 151/307] fix bugs that break processing when printing errors occur in kubectl --- pkg/kubectl/cmd/rollout/rollout_pause.go | 8 ++++++-- pkg/kubectl/cmd/rollout/rollout_resume.go | 8 ++++++-- pkg/kubectl/cmd/set/set_env.go | 4 ++-- pkg/kubectl/cmd/set/set_image.go | 4 ++-- pkg/kubectl/cmd/set/set_resources.go | 4 ++-- pkg/kubectl/cmd/set/set_serviceaccount.go | 4 ++-- pkg/kubectl/cmd/set/set_subject.go | 2 +- 7 files changed, 21 insertions(+), 13 deletions(-) diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index 65888248cd9..1037b7bd894 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -153,7 +153,9 @@ func (o PauseConfig) RunPause() error { allErrs = append(allErrs, err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out) + if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + allErrs = append(allErrs, err) + } continue } @@ -169,7 +171,9 @@ func (o PauseConfig) RunPause() error { allErrs = append(allErrs, err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out) + if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + allErrs = append(allErrs, err) + } } return utilerrors.NewAggregate(allErrs) diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index 16dde958d83..ed44a428ba2 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -158,7 +158,9 @@ func (o ResumeConfig) RunResume() error { allErrs = append(allErrs, err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out) + if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + allErrs = append(allErrs, err) + } } obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch) @@ -173,7 +175,9 @@ func (o ResumeConfig) RunResume() error { allErrs = append(allErrs, err) continue } - printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out) + if err = printer.PrintObj(cmdutil.AsDefaultVersionedOrOriginal(info.Object, info.Mapping), o.Out); err != nil { + allErrs = append(allErrs, err) + } } return utilerrors.NewAggregate(allErrs) diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 87a83c7a10d..da60aa55bc3 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -415,7 +415,7 @@ func (o *EnvOptions) RunEnv() error { if o.Local || o.dryRun { if err := o.PrintObj(info.Object, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } continue } @@ -433,7 +433,7 @@ func (o *EnvOptions) RunEnv() error { } if err := o.PrintObj(actual, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } } return utilerrors.NewAggregate(allErrs) diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 540e40bebc9..b2f09e76dd6 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -276,7 +276,7 @@ func (o *SetImageOptions) Run() error { if o.Local || o.DryRun { if err := o.PrintObj(info.Object, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } continue } @@ -289,7 +289,7 @@ func (o *SetImageOptions) Run() error { } if err := o.PrintObj(actual, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } } return utilerrors.NewAggregate(allErrs) diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 1089b3f61f5..23f1cd35e3a 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -277,7 +277,7 @@ func (o *SetResourcesOptions) Run() error { if o.Local || o.DryRun { if err := o.PrintObj(info.Object, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } continue } @@ -289,7 +289,7 @@ func (o *SetResourcesOptions) Run() error { } if err := o.PrintObj(actual, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } } return utilerrors.NewAggregate(allErrs) diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index e33e12ac3b7..a1a956bab88 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -198,7 +198,7 @@ func (o *SetServiceAccountOptions) Run() error { } if o.local || o.dryRun { if err := o.PrintObj(info.Object, o.Out); err != nil { - return err + patchErrs = append(patchErrs, err) } continue } @@ -209,7 +209,7 @@ func (o *SetServiceAccountOptions) Run() error { } if err := o.PrintObj(actual, o.Out); err != nil { - return err + patchErrs = append(patchErrs, err) } } return utilerrors.NewAggregate(patchErrs) diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index 10fd4031e10..332453fc998 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -251,7 +251,7 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { if o.Local || o.DryRun { if err := o.PrintObj(info.Object, o.Out); err != nil { - return err + allErrs = append(allErrs, err) } continue } From 506271b3635f1e43065f372f4718ac25c2aeb45e Mon Sep 17 00:00:00 2001 From: godliness Date: Thu, 17 May 2018 23:10:28 +0800 Subject: [PATCH 152/307] Optimize the lock which in the RunPredicate --- pkg/scheduler/core/equivalence_cache.go | 27 +++++++--- pkg/scheduler/core/equivalence_cache_test.go | 53 +++++++++++++------- 2 files changed, 53 insertions(+), 27 deletions(-) diff --git a/pkg/scheduler/core/equivalence_cache.go b/pkg/scheduler/core/equivalence_cache.go index 8322936ff8b..da50d2a81f7 100644 --- a/pkg/scheduler/core/equivalence_cache.go +++ b/pkg/scheduler/core/equivalence_cache.go @@ -35,7 +35,7 @@ import ( // 1. a map of AlgorithmCache with node name as key // 2. function to get equivalence pod type EquivalenceCache struct { - mu sync.Mutex + mu sync.RWMutex algorithmCache map[string]AlgorithmCache } @@ -72,9 +72,6 @@ func (ec *EquivalenceCache) RunPredicate( equivClassInfo *equivalenceClassInfo, cache schedulercache.Cache, ) (bool, []algorithm.PredicateFailureReason, error) { - ec.mu.Lock() - defer ec.mu.Unlock() - if nodeInfo == nil || nodeInfo.Node() == nil { // This may happen during tests. return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid") @@ -88,20 +85,32 @@ func (ec *EquivalenceCache) RunPredicate( if err != nil { return fit, reasons, err } - // Skip update if NodeInfo is stale. - if cache != nil && cache.IsUpToDate(nodeInfo) { - ec.updateResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, fit, reasons, equivClassInfo.hash) + if cache != nil { + ec.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClassInfo.hash, cache, nodeInfo) } return fit, reasons, nil } // updateResult updates the cached result of a predicate. func (ec *EquivalenceCache) updateResult( - podName, nodeName, predicateKey string, + podName, predicateKey string, fit bool, reasons []algorithm.PredicateFailureReason, equivalenceHash uint64, + cache schedulercache.Cache, + nodeInfo *schedulercache.NodeInfo, ) { + ec.mu.Lock() + defer ec.mu.Unlock() + if nodeInfo == nil || nodeInfo.Node() == nil { + // This may happen during tests. + return + } + // Skip update if NodeInfo is stale. + if !cache.IsUpToDate(nodeInfo) { + return + } + nodeName := nodeInfo.Node().GetName() if _, exist := ec.algorithmCache[nodeName]; !exist { ec.algorithmCache[nodeName] = AlgorithmCache{} } @@ -130,6 +139,8 @@ func (ec *EquivalenceCache) lookupResult( podName, nodeName, predicateKey string, equivalenceHash uint64, ) (bool, []algorithm.PredicateFailureReason, bool) { + ec.mu.RLock() + defer ec.mu.RUnlock() glog.V(5).Infof("Begin to calculate predicate: %v for pod: %s on node: %s based on equivalence cache", predicateKey, podName, nodeName) if hostPredicate, exist := ec.algorithmCache[nodeName][predicateKey][equivalenceHash]; exist { diff --git a/pkg/scheduler/core/equivalence_cache_test.go b/pkg/scheduler/core/equivalence_cache_test.go index 3b33917a14d..5411ffb0567 100644 --- a/pkg/scheduler/core/equivalence_cache_test.go +++ b/pkg/scheduler/core/equivalence_cache_test.go @@ -253,9 +253,7 @@ func TestRunPredicate(t *testing.T) { ecache := NewEquivalenceCache() equivClass := ecache.getEquivalenceClassInfo(pod) if test.expectCacheHit { - ecache.mu.Lock() - ecache.updateResult(pod.Name, node.Node().Name, "testPredicate", test.expectFit, test.expectedReasons, equivClass.hash) - ecache.mu.Unlock() + ecache.updateResult(pod.Name, "testPredicate", test.expectFit, test.expectedReasons, equivClass.hash, test.cache, node) } fit, reasons, err := ecache.RunPredicate(test.pred.predicate, "testPredicate", pod, meta, node, equivClass, test.cache) @@ -289,9 +287,7 @@ func TestRunPredicate(t *testing.T) { if !test.expectCacheHit && test.pred.callCount == 0 { t.Errorf("Predicate should be called") } - ecache.mu.Lock() _, _, invalid := ecache.lookupResult(pod.Name, node.Node().Name, "testPredicate", equivClass.hash) - ecache.mu.Unlock() if invalid && test.expectCacheWrite { t.Errorf("Cache write should happen") } @@ -316,6 +312,7 @@ func TestUpdateResult(t *testing.T) { equivalenceHash uint64 expectPredicateMap bool expectCacheItem HostPredicate + cache schedulercache.Cache }{ { name: "test 1", @@ -328,6 +325,7 @@ func TestUpdateResult(t *testing.T) { expectCacheItem: HostPredicate{ Fit: true, }, + cache: &upToDateCache{}, }, { name: "test 2", @@ -340,6 +338,7 @@ func TestUpdateResult(t *testing.T) { expectCacheItem: HostPredicate{ Fit: false, }, + cache: &upToDateCache{}, }, } for _, test := range tests { @@ -354,16 +353,18 @@ func TestUpdateResult(t *testing.T) { test.equivalenceHash: predicateItem, } } - ecache.mu.Lock() + + node := schedulercache.NewNodeInfo() + node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}) ecache.updateResult( test.pod, - test.nodeName, test.predicateKey, test.fit, test.reasons, test.equivalenceHash, + test.cache, + node, ) - ecache.mu.Unlock() cachedMapItem, ok := ecache.algorithmCache[test.nodeName][test.predicateKey] if !ok { @@ -390,6 +391,7 @@ func TestLookupResult(t *testing.T) { expectedInvalidPredicateKey bool expectedInvalidEquivalenceHash bool expectedPredicateItem predicateItemType + cache schedulercache.Cache }{ { name: "test 1", @@ -407,6 +409,7 @@ func TestLookupResult(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{}, }, + cache: &upToDateCache{}, }, { name: "test 2", @@ -423,6 +426,7 @@ func TestLookupResult(t *testing.T) { fit: true, reasons: []algorithm.PredicateFailureReason{}, }, + cache: &upToDateCache{}, }, { name: "test 3", @@ -440,6 +444,7 @@ func TestLookupResult(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, + cache: &upToDateCache{}, }, { name: "test 4", @@ -458,22 +463,24 @@ func TestLookupResult(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{}, }, + cache: &upToDateCache{}, }, } for _, test := range tests { ecache := NewEquivalenceCache() + node := schedulercache.NewNodeInfo() + node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}) // set cached item to equivalence cache - ecache.mu.Lock() ecache.updateResult( test.podName, - test.nodeName, test.predicateKey, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, + test.cache, + node, ) - ecache.mu.Unlock() // if we want to do invalid, invalid the cached item if test.expectedInvalidPredicateKey { predicateKeys := sets.NewString() @@ -481,13 +488,11 @@ func TestLookupResult(t *testing.T) { ecache.InvalidateCachedPredicateItem(test.nodeName, predicateKeys) } // calculate predicate with equivalence cache - ecache.mu.Lock() fit, reasons, invalid := ecache.lookupResult(test.podName, test.nodeName, test.predicateKey, test.equivalenceHashForCalPredicate, ) - ecache.mu.Unlock() // returned invalid should match expectedInvalidPredicateKey or expectedInvalidEquivalenceHash if test.equivalenceHashForUpdatePredicate != test.equivalenceHashForCalPredicate { if invalid != test.expectedInvalidEquivalenceHash { @@ -637,6 +642,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { nodeName string equivalenceHashForUpdatePredicate uint64 cachedItem predicateItemType + cache schedulercache.Cache }{ { podName: "testPod", @@ -648,6 +654,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { predicates.ErrPodNotFitsHostPorts, }, }, + cache: &upToDateCache{}, }, { podName: "testPod", @@ -659,6 +666,7 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { predicates.ErrPodNotFitsHostPorts, }, }, + cache: &upToDateCache{}, }, { podName: "testPod", @@ -667,22 +675,24 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { cachedItem: predicateItemType{ fit: true, }, + cache: &upToDateCache{}, }, } ecache := NewEquivalenceCache() for _, test := range tests { + node := schedulercache.NewNodeInfo() + node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}) // set cached item to equivalence cache - ecache.mu.Lock() ecache.updateResult( test.podName, - test.nodeName, testPredicate, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, + test.cache, + node, ) - ecache.mu.Unlock() } // invalidate cached predicate for all nodes @@ -708,6 +718,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { nodeName string equivalenceHashForUpdatePredicate uint64 cachedItem predicateItemType + cache schedulercache.Cache }{ { podName: "testPod", @@ -717,6 +728,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, + cache: &upToDateCache{}, }, { podName: "testPod", @@ -726,6 +738,7 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, + cache: &upToDateCache{}, }, { podName: "testPod", @@ -734,22 +747,24 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { cachedItem: predicateItemType{ fit: true, }, + cache: &upToDateCache{}, }, } ecache := NewEquivalenceCache() for _, test := range tests { + node := schedulercache.NewNodeInfo() + node.SetNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}}) // set cached item to equivalence cache - ecache.mu.Lock() ecache.updateResult( test.podName, - test.nodeName, testPredicate, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, + test.cache, + node, ) - ecache.mu.Unlock() } for _, test := range tests { From cf86cb77eb90e35c91796172e24d7a8341691f80 Mon Sep 17 00:00:00 2001 From: Guoliang Wang Date: Wed, 23 May 2018 11:28:54 +0800 Subject: [PATCH 153/307] Move unrelated methods from the factory to helper --- pkg/kubectl/cmd/expose.go | 5 +- pkg/kubectl/cmd/util/BUILD | 4 -- pkg/kubectl/cmd/util/factory.go | 18 ------ pkg/kubectl/cmd/util/factory_client_access.go | 54 ---------------- pkg/kubectl/cmd/util/factory_test.go | 43 ------------- pkg/kubectl/polymorphichelpers/BUILD | 2 + pkg/kubectl/polymorphichelpers/interface.go | 14 +++++ .../mapbasedselectorforobject.go | 63 +++++++++++++++++++ .../polymorphichelpers/protocolsforobject.go | 63 +++++++++++++++++++ 9 files changed, 145 insertions(+), 121 deletions(-) create mode 100644 pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go create mode 100644 pkg/kubectl/polymorphichelpers/protocolsforobject.go diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index ba696073588..d36d47d736d 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -192,9 +192,10 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e o.Builder = f.NewBuilder() o.CanBeExposed = polymorphichelpers.CanBeExposedFn o.ClientForMapping = f.ClientForMapping - o.MapBasedSelectorForObject = f.MapBasedSelectorForObject + o.MapBasedSelectorForObject = polymorphichelpers.MapBasedSelectorForObjectFn + o.ProtocolsForObject = polymorphichelpers.ProtocolsForObjectFn o.PortsForObject = polymorphichelpers.PortsForObjectFn - o.ProtocolsForObject = f.ProtocolsForObject + o.Mapper, err = f.ToRESTMapper() if err != nil { return err diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 4b83c6451f6..0fc82b7eaa2 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -17,7 +17,6 @@ go_library( deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/templates:go_default_library", @@ -69,15 +68,12 @@ go_test( "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/kubectl:go_default_library", - "//pkg/kubectl/genericclioptions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 04e64f65ffa..b89cdf070c2 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -22,7 +22,6 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -76,13 +75,6 @@ type ClientAccessFactory interface { // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder - // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a - // new set-based selector is provided, an error is returned if the selector cannot be converted to a - // map-based selector - MapBasedSelectorForObject(object runtime.Object) (string, error) - // ProtocolsForObject returns the mapping associated with the provided object - ProtocolsForObject(object runtime.Object) (map[string]string, error) - // SuggestedPodTemplateResources returns a list of resource types that declare a pod template SuggestedPodTemplateResources() []schema.GroupResource @@ -154,16 +146,6 @@ func makePortsString(ports []api.ServicePort, useNodePort bool) string { return strings.Join(pieces, ",") } -func getProtocols(spec api.PodSpec) map[string]string { - result := make(map[string]string) - for _, container := range spec.Containers { - for _, port := range container.Ports { - result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) - } - } - return result -} - // Extracts the protocols exposed by a service from the given service spec. func getServiceProtocols(spec api.ServiceSpec) map[string]string { result := make(map[string]string) diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 0477aa45ad3..c31a1e107c8 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -40,8 +40,6 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -118,58 +116,6 @@ func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { return restclient.RESTClientFor(clientConfig) } -func (f *ring0Factory) MapBasedSelectorForObject(object runtime.Object) (string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return kubectl.MakeLabels(t.Spec.Selector), nil - case *api.Pod: - if len(t.Labels) == 0 { - return "", fmt.Errorf("the pod has no labels and cannot be exposed") - } - return kubectl.MakeLabels(t.Labels), nil - case *api.Service: - if t.Spec.Selector == nil { - return "", fmt.Errorf("the service has no pod selector set") - } - return kubectl.MakeLabels(t.Spec.Selector), nil - case *extensions.Deployment: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - case *extensions.ReplicaSet: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - default: - return "", fmt.Errorf("cannot extract pod selector from %T", object) - } -} - -func (f *ring0Factory) ProtocolsForObject(object runtime.Object) (map[string]string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return getProtocols(t.Spec.Template.Spec), nil - case *api.Pod: - return getProtocols(t.Spec), nil - case *api.Service: - return getServiceProtocols(t.Spec), nil - case *extensions.Deployment: - return getProtocols(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getProtocols(t.Spec.Template.Spec), nil - default: - return nil, fmt.Errorf("cannot extract protocols from %T", object) - } -} - func (f *ring0Factory) SuggestedPodTemplateResources() []schema.GroupResource { return []schema.GroupResource{ {Resource: "replicationcontroller"}, diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go index 910a8b2da67..06d9ac87062 100644 --- a/pkg/kubectl/cmd/util/factory_test.go +++ b/pkg/kubectl/cmd/util/factory_test.go @@ -17,54 +17,11 @@ limitations under the License. package util import ( - "strings" "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/genericclioptions" ) -func TestProtocolsForObject(t *testing.T) { - f := NewFactory(genericclioptions.NewTestConfigFlags()) - - pod := &api.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Ports: []api.ContainerPort{ - { - ContainerPort: 101, - Protocol: api.ProtocolTCP, - }, - { - ContainerPort: 102, - Protocol: api.ProtocolUDP, - }, - }, - }, - }, - }, - } - - expected := sets.NewString("101/TCP", "102/UDP") - protocolsMap, err := f.ProtocolsForObject(pod) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - protocolsString := kubectl.MakeProtocols(protocolsMap) - protocolsStrings := strings.Split(protocolsString, ",") - got := sets.NewString(protocolsStrings...) - - if !expected.Equal(got) { - t.Fatalf("Protocols mismatch! Expected %v, got %v", expected, got) - } -} - func TestMakePortsString(t *testing.T) { tests := []struct { ports []api.ServicePort diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index b50030a3c86..08132fceba2 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -10,9 +10,11 @@ go_library( "historyviewer.go", "interface.go", "logsforobject.go", + "mapbasedselectorforobject.go", "objectpauser.go", "objectresumer.go", "portsforobject.go", + "protocolsforobject.go", "rollbacker.go", "statusviewer.go", "updatepodspec.go", diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index e2ecab3c20f..06f40df60be 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -60,6 +60,20 @@ type UpdatePodSpecForObjectFunc func(obj runtime.Object, fn func(*v1.PodSpec) er // UpdatePodSpecForObjectFn gives a way to easily override the function for unit testing if needed var UpdatePodSpecForObjectFn UpdatePodSpecForObjectFunc = updatePodSpecForObject +// MapBasedSelectorForObjectFunc will call the provided function on mapping the baesd selector for object, +// return "" if object is not supported, or return an error. +type MapBasedSelectorForObjectFunc func(object runtime.Object) (string, error) + +// MapBasedSelectorForObjectFn gives a way to easily override the function for unit testing if needed +var MapBasedSelectorForObjectFn MapBasedSelectorForObjectFunc = mapBasedSelectorForObject + +// ProtocolsForObjectFunc will call the provided function on the protocols for the object, +// return nil-map if no protocols for the object, or return an error. +type ProtocolsForObjectFunc func(object runtime.Object) (map[string]string, error) + +// ProtocolsForObjectFn gives a way to easily override the function for unit testing if needed +var ProtocolsForObjectFn ProtocolsForObjectFunc = protocolsForObject + // PortsForObjectFunc returns the ports associated with the provided object type PortsForObjectFunc func(object runtime.Object) ([]string, error) diff --git a/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go b/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go new file mode 100644 index 00000000000..dd12941babc --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/kubectl" +) + +// mapBasedSelectorForObject returns the map-based selector associated with the provided object. If a +// new set-based selector is provided, an error is returned if the selector cannot be converted to a +// map-based selector +func mapBasedSelectorForObject(object runtime.Object) (string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *api.ReplicationController: + return kubectl.MakeLabels(t.Spec.Selector), nil + case *api.Pod: + if len(t.Labels) == 0 { + return "", fmt.Errorf("the pod has no labels and cannot be exposed") + } + return kubectl.MakeLabels(t.Labels), nil + case *api.Service: + if t.Spec.Selector == nil { + return "", fmt.Errorf("the service has no pod selector set") + } + return kubectl.MakeLabels(t.Spec.Selector), nil + case *extensions.Deployment: + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + case *extensions.ReplicaSet: + // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals + // operator, DoubleEquals operator and In operator with only one element in the set. + if len(t.Spec.Selector.MatchExpressions) > 0 { + return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) + } + return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil + default: + return "", fmt.Errorf("cannot extract pod selector from %T", object) + } +} diff --git a/pkg/kubectl/polymorphichelpers/protocolsforobject.go b/pkg/kubectl/polymorphichelpers/protocolsforobject.go new file mode 100644 index 00000000000..de6d23d2a8e --- /dev/null +++ b/pkg/kubectl/polymorphichelpers/protocolsforobject.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package polymorphichelpers + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" +) + +func protocolsForObject(object runtime.Object) (map[string]string, error) { + // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) + switch t := object.(type) { + case *api.ReplicationController: + return getProtocols(t.Spec.Template.Spec), nil + case *api.Pod: + return getProtocols(t.Spec), nil + case *api.Service: + return getServiceProtocols(t.Spec), nil + case *extensions.Deployment: + return getProtocols(t.Spec.Template.Spec), nil + case *extensions.ReplicaSet: + return getProtocols(t.Spec.Template.Spec), nil + default: + return nil, fmt.Errorf("cannot extract protocols from %T", object) + } +} + +func getProtocols(spec api.PodSpec) map[string]string { + result := make(map[string]string) + for _, container := range spec.Containers { + for _, port := range container.Ports { + result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) + } + } + return result +} + +// Extracts the protocols exposed by a service from the given service spec. +func getServiceProtocols(spec api.ServiceSpec) map[string]string { + result := make(map[string]string) + for _, servicePort := range spec.Ports { + result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) + } + return result +} From 00dc6b5ed87e2b5d935ad1d0fe8d7976f27e535d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 23 May 2018 14:06:32 +0800 Subject: [PATCH 154/307] Check LoadBalancingRulePropertiesFormat for azure load balancers --- .../providers/azure/azure_loadbalancer.go | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 22c6f71a7cb..520a8faef17 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "math" + "reflect" "strconv" "strings" @@ -1255,13 +1256,30 @@ func findProbe(probes []network.Probe, probe network.Probe) bool { func findRule(rules []network.LoadBalancingRule, rule network.LoadBalancingRule) bool { for _, existingRule := range rules { - if strings.EqualFold(*existingRule.Name, *rule.Name) { + if strings.EqualFold(*existingRule.Name, *rule.Name) && + equalLoadBalancingRulePropertiesFormat(existingRule.LoadBalancingRulePropertiesFormat, rule.LoadBalancingRulePropertiesFormat) { return true } } return false } +// equalLoadBalancingRulePropertiesFormat checks whether the provided LoadBalancingRulePropertiesFormat are equal. +// Note: only fields used in reconcileLoadBalancer are considered. +func equalLoadBalancingRulePropertiesFormat(s, t *network.LoadBalancingRulePropertiesFormat) bool { + if s == nil || t == nil { + return false + } + + return reflect.DeepEqual(s.Protocol, t.Protocol) && + reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) && + reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) && + reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) && + reflect.DeepEqual(s.FrontendPort, t.FrontendPort) && + reflect.DeepEqual(s.BackendPort, t.BackendPort) && + reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) +} + // This compares rule's Name, Protocol, SourcePortRange, DestinationPortRange, SourceAddressPrefix, Access, and Direction. // Note that it compares rule's DestinationAddressPrefix only when it's not consolidated rule as such rule does not have DestinationAddressPrefix defined. // We intentionally do not compare DestinationAddressPrefixes in consolidated case because reconcileSecurityRule has to consider the two rules equal, From b9b6a9e98d7070beddf18f1a6a36bc2b5016f355 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 23 May 2018 13:49:09 +0800 Subject: [PATCH 155/307] Add verbose logs for azure cloud provider --- .../providers/azure/azure_backoff.go | 14 ++-- .../providers/azure/azure_instances.go | 2 +- .../providers/azure/azure_loadbalancer.go | 69 ++++++++++--------- .../providers/azure/azure_routes.go | 32 ++++----- .../providers/azure/azure_standard.go | 12 ++-- 5 files changed, 66 insertions(+), 63 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 8bf55f63fd2..6272fd67bfd 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -55,10 +55,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return true, cloudprovider.InstanceNotFound } if retryErr != nil { - glog.Errorf("backoff: failure, will retry,err=%v", retryErr) + glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) return false, nil } - glog.V(2).Info("backoff: success") + glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) return true, nil }) if err == wait.ErrWaitTimeout { @@ -99,10 +99,10 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, var retryErr error ip, publicIP, retryErr = az.getIPForMachine(name) if retryErr != nil { - glog.Errorf("backoff: failure, will retry,err=%v", retryErr) + glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) return false, nil } - glog.V(2).Info("backoff: success") + glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name) return true, nil }) return ip, publicIP, err @@ -304,11 +304,11 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st // A wait.ConditionFunc function to deal with common HTTP backoff response conditions func processRetryResponse(resp autorest.Response, err error) (bool, error) { if isSuccessHTTPResponse(resp) { - glog.V(2).Infof("backoff: success, HTTP response=%d", resp.StatusCode) + glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode) return true, nil } if shouldRetryAPIRequest(resp, err) { - glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err) + glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err) // suppress the error object so that backoff process continues return false, nil } @@ -361,7 +361,7 @@ func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) { } if shouldRetryHTTPRequest(resp, err) { - glog.Errorf("backoff: failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err) + glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err) // suppress the error object so that backoff process continues return false, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index 9f5de04c023..804553c3268 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -33,7 +33,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) { ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName) if err != nil { - glog.V(2).Infof("NodeAddresses(%s) abort backoff", nodeName) + glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err) return nil, err } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 520a8faef17..dcf398ae9a4 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -135,7 +135,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser if lbStatus != nil && len(lbStatus.Ingress) > 0 { serviceIP = &lbStatus.Ingress[0].IP } - glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancer for %s with IP %s, wantLb = true", service.Name, logSafe(serviceIP)) + glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil { return nil, err } @@ -165,7 +165,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri return err } - glog.V(10).Infof("Calling reconcileSecurityGroup from EnsureLoadBalancerDeleted for %s with IP %s, wantLb = false", service.Name, serviceIPToCleanup) + glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil { return err } @@ -262,7 +262,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(3).Infof("selectLoadBalancer(%s): isInternal(%s) - start", serviceName, isInternal) + glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%s) - start", serviceName, isInternal) vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) if err != nil { glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) @@ -318,10 +318,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) { if lb == nil { - glog.V(10).Info("getServiceLoadBalancerStatus lb is nil") + glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") return nil, nil } if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil { + glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") return nil, nil } isInternal := requiresInternalLoadBalancer(service) @@ -353,6 +354,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L } } + glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", *lbIP, lbFrontendIPConfigName, serviceName) return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: *lbIP}}}, nil } } @@ -446,7 +448,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } } - glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) + glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip) if err != nil { @@ -472,13 +474,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(2).Infof("reconcileLoadBalancer(%s) - wantLb(%t): started", serviceName, wantLb) + glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb) if err != nil { + glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) return nil, err } lbName := *lb.Name - glog.V(2).Infof("reconcileLoadBalancer(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) + glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) lbFrontendIPConfigName := getFrontendIPConfigName(service, subnet(service)) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName) @@ -496,18 +499,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, foundBackendPool := false for _, bp := range newBackendPools { if strings.EqualFold(*bp.Name, lbBackendPoolName) { - glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) foundBackendPool = true break } else { - glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) } } if !foundBackendPool { newBackendPools = append(newBackendPools, network.BackendAddressPool{ Name: to.StringPtr(lbBackendPoolName), }) - glog.V(10).Infof("reconcile(%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) dirtyLb = true lb.BackendAddressPools = &newBackendPools @@ -525,7 +528,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] if serviceOwnsFrontendIP(config, service) { - glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) + glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } @@ -535,7 +538,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] if serviceOwnsFrontendIP(config, service) && !strings.EqualFold(*config.Name, lbFrontendIPConfigName) { - glog.V(3).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) + glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } @@ -599,7 +602,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, Name: to.StringPtr(lbFrontendIPConfigName), FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties, }) - glog.V(10).Infof("reconcile(%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) dirtyConfigs = true } } @@ -700,15 +703,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(updatedProbes) - 1; i >= 0; i-- { existingProbe := updatedProbes[i] if serviceOwnsRule(service, *existingProbe.Name) { - glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) keepProbe := false if findProbe(expectedProbes, existingProbe) { - glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) keepProbe = true } if !keepProbe { updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...) - glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) dirtyProbes = true } } @@ -717,11 +720,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, expectedProbe := range expectedProbes { foundProbe := false if findProbe(updatedProbes, expectedProbe) { - glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) foundProbe = true } if !foundProbe { - glog.V(10).Infof("reconcile(%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) updatedProbes = append(updatedProbes, expectedProbe) dirtyProbes = true } @@ -742,13 +745,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, existingRule := updatedRules[i] if serviceOwnsRule(service, *existingRule.Name) { keepRule := false - glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) if findRule(expectedRules, existingRule) { - glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { - glog.V(3).Infof("reconcile(%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) + glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtyRules = true } @@ -758,11 +761,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, expectedRule := range expectedRules { foundRule := false if findRule(updatedRules, expectedRule) { - glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { - glog.V(10).Infof("reconcile(%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) + glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) updatedRules = append(updatedRules, expectedRule) dirtyRules = true } @@ -779,7 +782,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 { // When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself, // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection - glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) + glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) @@ -792,18 +795,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) // Remove the LB. - glog.V(10).Infof("az.DeleteLBWithRetry(%q): start", lbName) + glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) err = az.DeleteLBWithRetry(lbName) if err != nil { - glog.V(2).Infof("delete(%s) abort backoff: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) + glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) return nil, err } glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) } else { - glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) + glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) err := az.CreateOrUpdateLBWithRetry(*lb) if err != nil { - glog.V(2).Infof("ensure(%s) abort backoff: lb(%s) - updating", serviceName, lbName) + glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) return nil, err } @@ -811,7 +814,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Refresh updated lb which will be used later in other places. newLB, exist, err := az.getAzureLoadBalancer(lbName) if err != nil { - glog.V(2).Infof("getAzureLoadBalancer(%s) failed: %v", lbName, err) + glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) return nil, err } if !exist { @@ -831,7 +834,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } } - glog.V(2).Infof("ensure(%s): lb(%s) finished", serviceName, lbName) + glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) return lb, nil } @@ -1025,7 +1028,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, if dirtySg { sg.SecurityRules = &updatedRules - glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name) + glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) err := az.CreateOrUpdateSGWithRetry(sg) if err != nil { @@ -1213,7 +1216,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want // This is the only case we should preserve the // Public ip resource with match service tag } else { - glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) + glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName) glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName) if err != nil { @@ -1227,7 +1230,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want if err != nil { return nil, err } - glog.V(2).Infof("ensure(%s): pip(%s) - finished", serviceName, pipName) + glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName) } } diff --git a/pkg/cloudprovider/providers/azure/azure_routes.go b/pkg/cloudprovider/providers/azure/azure_routes.go index 0230ed6f5dd..7f627a98a1d 100644 --- a/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/pkg/cloudprovider/providers/azure/azure_routes.go @@ -30,7 +30,7 @@ import ( // ListRoutes lists all managed routes that belong to the specified clusterName func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - glog.V(10).Infof("list: START clusterName=%q", clusterName) + glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) routeTable, existsRouteTable, err := az.getRouteTable() return processRoutes(routeTable, existsRouteTable, err) } @@ -50,7 +50,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl for i, route := range *routeTable.Routes { instance := mapRouteNameToNodeName(*route.Name) cidr := *route.AddressPrefix - glog.V(10).Infof("list: * instance=%q, cidr=%q", instance, cidr) + glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) kubeRoutes[i] = &cloudprovider.Route{ Name: *route.Name, @@ -60,13 +60,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl } } - glog.V(10).Info("list: FINISH") + glog.V(10).Info("ListRoutes: FINISH") return kubeRoutes, nil } func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error { if _, existsRouteTable, err := az.getRouteTable(); err != nil { - glog.V(2).Infof("create error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return err } else if existsRouteTable { return nil @@ -81,17 +81,17 @@ func (az *Cloud) createRouteTable() error { RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{}, } - glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName) + glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable) glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName) + glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName) retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable) if retryErr != nil { err = retryErr - glog.V(2).Infof("create abort backoff: creating routetable. routeTableName=%q", az.RouteTableName) + glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName) } } if err != nil { @@ -107,7 +107,7 @@ func (az *Cloud) createRouteTable() error { // route.Name will be ignored, although the cloud-provider may use nameHint // to create a more user-meaningful name. func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error { - glog.V(2).Infof("create: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil { return err } @@ -126,31 +126,31 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s }, } - glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route) glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.CreateOrUpdateRouteWithRetry(route) if retryErr != nil { err = retryErr - glog.V(2).Infof("create abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) } } if err != nil { return err } - glog.V(2).Infof("create: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return nil } // DeleteRoute deletes the specified managed route // Route should be as returned by ListRoutes func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error { - glog.V(2).Infof("delete: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) ctx, cancel := getContextWithCancel() defer cancel() @@ -159,18 +159,18 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.DeleteRouteWithRetry(routeName) if retryErr != nil { err = retryErr - glog.V(2).Infof("delete abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) } } if err != nil { return err } - glog.V(2).Infof("delete: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index e30579febc8..0aa4fe7479f 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -173,7 +173,7 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor securityProto = network.SecurityRuleProtocolUDP return &transportProto, &securityProto, nil, nil default: - return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers") + return &transportProto, &securityProto, &probeProto, fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers") } } @@ -285,7 +285,7 @@ outer: return smallest, nil } - return -1, fmt.Errorf("SecurityGroup priorities are exhausted") + return -1, fmt.Errorf("securityGroup priorities are exhausted") } func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) { @@ -372,10 +372,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) } if err != nil { if as.CloudProviderBackoff { - glog.V(2).Infof("InstanceID(%s) backing off", name) + glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) if err != nil { - glog.V(2).Infof("InstanceID(%s) abort backoff", name) + glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) return "", err } } else { @@ -400,7 +400,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { machine, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { - glog.Errorf("error: as.GetInstanceTypeByNodeName(%s), as.getVirtualMachine(%s) err=%v", name, name, err) + glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) return "", err } @@ -437,7 +437,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) ipConfig, err := getPrimaryIPConfig(nic) if err != nil { - glog.Errorf("error: as.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", name, nic, err) + glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err) return "", "", err } From a50ac290292991d69f704527a1279c09842d56c8 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 23 May 2018 14:06:49 +0800 Subject: [PATCH 156/307] Add unit tests for findRule() --- .../azure/azure_loadbalancer_test.go | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go index 45c3ddfc682..ee0ed967de2 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go @@ -97,3 +97,116 @@ func TestFindProbe(t *testing.T) { assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) } } + +func TestFindRule(t *testing.T) { + tests := []struct { + msg string + existingRule []network.LoadBalancingRule + curRule network.LoadBalancingRule + expected bool + }{ + { + msg: "empty existing rules should return false", + expected: false, + }, + { + msg: "rule names unmatch should return false", + existingRule: []network.LoadBalancingRule{ + { + Name: to.StringPtr("httpProbe1"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + FrontendPort: to.Int32Ptr(1), + }, + }, + }, + curRule: network.LoadBalancingRule{ + Name: to.StringPtr("httpProbe2"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + FrontendPort: to.Int32Ptr(1), + }, + }, + expected: false, + }, + { + msg: "rule names match while frontend ports unmatch should return false", + existingRule: []network.LoadBalancingRule{ + { + Name: to.StringPtr("httpProbe"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + FrontendPort: to.Int32Ptr(1), + }, + }, + }, + curRule: network.LoadBalancingRule{ + Name: to.StringPtr("httpProbe"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + FrontendPort: to.Int32Ptr(2), + }, + }, + expected: false, + }, + { + msg: "rule names match while backend ports unmatch should return false", + existingRule: []network.LoadBalancingRule{ + { + Name: to.StringPtr("httpProbe"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + BackendPort: to.Int32Ptr(1), + }, + }, + }, + curRule: network.LoadBalancingRule{ + Name: to.StringPtr("httpProbe"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + BackendPort: to.Int32Ptr(2), + }, + }, + expected: false, + }, + { + msg: "rule names match while LoadDistribution unmatch should return false", + existingRule: []network.LoadBalancingRule{ + { + Name: to.StringPtr("probe1"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + LoadDistribution: network.Default, + }, + }, + }, + curRule: network.LoadBalancingRule{ + Name: to.StringPtr("probe2"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + LoadDistribution: network.SourceIP, + }, + }, + expected: false, + }, + { + msg: "both rule names and LoadBalancingRulePropertiesFormats match should return true", + existingRule: []network.LoadBalancingRule{ + { + Name: to.StringPtr("matchName"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + BackendPort: to.Int32Ptr(2), + FrontendPort: to.Int32Ptr(2), + LoadDistribution: network.SourceIP, + }, + }, + }, + curRule: network.LoadBalancingRule{ + Name: to.StringPtr("matchName"), + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + BackendPort: to.Int32Ptr(2), + FrontendPort: to.Int32Ptr(2), + LoadDistribution: network.SourceIP, + }, + }, + expected: true, + }, + } + + for i, test := range tests { + findResult := findRule(test.existingRule, test.curRule) + assert.Equal(t, test.expected, findResult, fmt.Sprintf("TestCase[%d]: %s", i, test.msg)) + } +} From f66d4e183fcfd62e0e3123a007a2a62ab9c980c5 Mon Sep 17 00:00:00 2001 From: Rene Treffer Date: Sun, 29 Apr 2018 13:54:19 +0200 Subject: [PATCH 157/307] Always masquerade node-originating traffic with a service VIP source ip --- pkg/proxy/ipvs/proxier.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index b5a4dd95d1e..9142cbdd38c 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -1218,17 +1218,25 @@ func (proxier *Proxier) writeIptablesRules() { "-A", string(kubeServicesChain), "-m", "comment", "--comment", proxier.ipsetList[kubeClusterIPSet].getComment(), "-m", "set", "--match-set", kubeClusterIPSet, - "dst,dst", ) if proxier.masqueradeAll { - writeLine(proxier.natRules, append(args, "-j", string(KubeMarkMasqChain))...) + writeLine(proxier.natRules, append(args, "dst,dst", "-j", string(KubeMarkMasqChain))...) } else if len(proxier.clusterCIDR) > 0 { // This masquerades off-cluster traffic to a service VIP. The idea // is that you can establish a static route for your Service range, // routing to any node, and that node will bridge into the Service // for you. Since that might bounce off-node, we masquerade here. // If/when we support "Local" policy for VIPs, we should update this. - writeLine(proxier.natRules, append(args, "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...) + writeLine(proxier.natRules, append(args, "dst,dst", "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...) + } else { + // Masquerade all OUTPUT traffic coming from a service ip. + // The kube dummy interface has all service VIPs assigned which + // results in the service VIP being picked as the source IP to reach + // a VIP. This leads to a connection from VIP: to + // VIP:. + // Always masquerading OUTPUT (node-originating) traffic with a VIP + // source ip and service port destination fixes the outgoing connections. + writeLine(proxier.natRules, append(args, "src,dst", "-j", string(KubeMarkMasqChain))...) } } From 5caf141650dd6aa9e146aca92226f5a526415d30 Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Thu, 24 May 2018 16:33:12 +0800 Subject: [PATCH 158/307] resourcequota return StatusError when timeout --- plugin/pkg/admission/resourcequota/BUILD | 1 + plugin/pkg/admission/resourcequota/controller.go | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index fea03de5446..66a1b10f63b 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -32,6 +32,7 @@ go_library( "//plugin/pkg/admission/resourcequota/apis/resourcequota/validation:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/controller.go b/plugin/pkg/admission/resourcequota/controller.go index 0f245b7e3da..0f254365880 100644 --- a/plugin/pkg/admission/resourcequota/controller.go +++ b/plugin/pkg/admission/resourcequota/controller.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -531,7 +532,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error { evaluator := e.registry.Get(gr) if evaluator == nil { // create an object count evaluator if no evaluator previously registered - // note, we do not need aggregate usage here, so we pass a nil infomer func + // note, we do not need aggregate usage here, so we pass a nil informer func evaluator = generic.NewObjectCountEvaluator(false, gr, nil, "") e.registry.Add(evaluator) glog.Infof("quota admission added evaluator for: %s", gr) @@ -549,7 +550,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error { select { case <-waiter.finished: case <-time.After(10 * time.Second): - return fmt.Errorf("timeout") + return apierrors.NewInternalError(fmt.Errorf("resource quota evaluates timeout")) } return waiter.result From 6f76247eb6f834d9fb8915741721069a845dc5ab Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Thu, 24 May 2018 16:57:09 +0800 Subject: [PATCH 159/307] replace `__internal` with runtime.APIVersionInternal --- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 2 +- .../serializer/versioning/versioning_test.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go b/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go index 450011f15f5..59163d77715 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -122,7 +122,7 @@ func (s *Scheme) nameFunc(t reflect.Type) string { for _, gvk := range gvks { internalGV := gvk.GroupVersion() - internalGV.Version = "__internal" // this is hacky and maybe should be passed in + internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in internalGVK := internalGV.WithKind(gvk.Kind) if internalType, exists := s.gvkToType[internalGVK]; exists { diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go index 43c24265788..f79b2a7cbd4 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning_test.go @@ -129,25 +129,25 @@ func TestDecode(t *testing.T) { }{ { serializer: &mockSerializer{actual: gvk1}, - convertor: &checkConvertor{groupVersion: schema.GroupVersion{Group: "other", Version: "__internal"}}, + convertor: &checkConvertor{groupVersion: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}}, expectedGVK: gvk1, - decodes: schema.GroupVersion{Group: "other", Version: "__internal"}, + decodes: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}, }, { serializer: &mockSerializer{actual: gvk1, obj: decodable1}, - convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: "__internal"}}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}}, expectedGVK: gvk1, sameObject: decodable2, - decodes: schema.GroupVersion{Group: "other", Version: "__internal"}, + decodes: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}, }, // defaultGVK.Group is allowed to force a conversion to the destination group { serializer: &mockSerializer{actual: gvk1, obj: decodable1}, defaultGVK: &schema.GroupVersionKind{Group: "force"}, - convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "force", Version: "__internal"}}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "force", Version: runtime.APIVersionInternal}}, expectedGVK: gvk1, sameObject: decodable2, - decodes: schema.GroupVersion{Group: "force", Version: "__internal"}, + decodes: schema.GroupVersion{Group: "force", Version: runtime.APIVersionInternal}, }, // uses direct conversion for into when objects differ { @@ -184,10 +184,10 @@ func TestDecode(t *testing.T) { into: &runtime.VersionedObjects{Objects: []runtime.Object{}}, serializer: &mockSerializer{actual: gvk1, obj: decodable1}, - convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: "__internal"}}, + convertor: &checkConvertor{in: decodable1, obj: decodable2, groupVersion: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}}, expectedGVK: gvk1, expectedObject: &runtime.VersionedObjects{Objects: []runtime.Object{decodable1, decodable2}}, - decodes: schema.GroupVersion{Group: "other", Version: "__internal"}, + decodes: schema.GroupVersion{Group: "other", Version: runtime.APIVersionInternal}, }, // decode into the same version as the serialized object From 4471d0321fd2d7fbf18cfa0419711c5ee04f9680 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 24 May 2018 09:37:20 +0200 Subject: [PATCH 160/307] apimachinery: remove unused UnstructuredObjectConverter --- .../pkg/apis/meta/v1/unstructured/helpers.go | 42 ------------------- 1 file changed, 42 deletions(-) diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 60f0573623a..fc138e75aa9 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -18,7 +18,6 @@ package unstructured import ( gojson "encoding/json" - "errors" "fmt" "io" "strings" @@ -450,44 +449,3 @@ func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { } return err } - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { - if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { - gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) - if !ok { - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) - } - in.GetObjectKind().SetGroupVersionKind(gvk) - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} From 5a06ad2d0fb431eade61bbd10d04ff0dd7823944 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Thu, 24 May 2018 14:43:44 +0800 Subject: [PATCH 161/307] Add reason message logs for non-exist resources --- .../providers/azure/azure_vmss_cache.go | 6 ++-- .../providers/azure/azure_wrap.go | 31 ++++++++++++------- .../providers/azure/azure_wrap_test.go | 2 +- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go index e224845f30c..5542b7ca0b8 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go @@ -67,12 +67,13 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) { ctx, cancel := getContextWithCancel() defer cancel() result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key) - exists, realErr := checkResourceExistsFromError(err) + exists, message, realErr := checkResourceExistsFromError(err) if realErr != nil { return nil, realErr } if !exists { + glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) return nil, nil } @@ -147,12 +148,13 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { ctx, cancel := getContextWithCancel() defer cancel() result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, ss.ResourceGroup, ssName, instanceID) - exists, realErr := checkResourceExistsFromError(err) + exists, message, realErr := checkResourceExistsFromError(err) if realErr != nil { return nil, realErr } if !exists { + glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) return nil, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 2c30e287220..53c17871e88 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -25,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider" @@ -40,18 +41,18 @@ var ( // checkExistsFromError inspects an error and returns a true if err is nil, // false if error is an autorest.Error with StatusCode=404 and will return the // error back if error is another status code or another type of error. -func checkResourceExistsFromError(err error) (bool, error) { +func checkResourceExistsFromError(err error) (bool, string, error) { if err == nil { - return true, nil + return true, "", nil } v, ok := err.(autorest.DetailedError) if !ok { - return false, err + return false, "", err } if v.StatusCode == http.StatusNotFound { - return false, nil + return false, err.Error(), nil } - return false, v + return false, "", v } // If it is StatusNotFound return nil, @@ -104,15 +105,17 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi } var realErr error + var message string ctx, cancel := getContextWithCancel() defer cancel() pip, err = az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "") - exists, realErr = checkResourceExistsFromError(err) + exists, message, realErr = checkResourceExistsFromError(err) if realErr != nil { return pip, false, realErr } if !exists { + glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message) return pip, false, nil } @@ -121,6 +124,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) { var realErr error + var message string var rg string if len(az.VnetResourceGroup) > 0 { @@ -132,12 +136,13 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet ctx, cancel := getContextWithCancel() defer cancel() subnet, err = az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "") - exists, realErr = checkResourceExistsFromError(err) + exists, message, realErr = checkResourceExistsFromError(err) if realErr != nil { return subnet, false, realErr } if !exists { + glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message) return subnet, false, nil } @@ -181,12 +186,13 @@ func (az *Cloud) newVMCache() (*timedCache, error) { ctx, cancel := getContextWithCancel() defer cancel() vm, err := az.VirtualMachinesClient.Get(ctx, az.ResourceGroup, key, compute.InstanceView) - exists, realErr := checkResourceExistsFromError(err) + exists, message, realErr := checkResourceExistsFromError(err) if realErr != nil { return nil, realErr } if !exists { + glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message) return nil, nil } @@ -202,12 +208,13 @@ func (az *Cloud) newLBCache() (*timedCache, error) { defer cancel() lb, err := az.LoadBalancerClient.Get(ctx, az.ResourceGroup, key, "") - exists, realErr := checkResourceExistsFromError(err) + exists, message, realErr := checkResourceExistsFromError(err) if realErr != nil { return nil, realErr } if !exists { + glog.V(2).Infof("Load balancer %q not found with message: %q", key, message) return nil, nil } @@ -222,12 +229,13 @@ func (az *Cloud) newNSGCache() (*timedCache, error) { ctx, cancel := getContextWithCancel() defer cancel() nsg, err := az.SecurityGroupsClient.Get(ctx, az.ResourceGroup, key, "") - exists, realErr := checkResourceExistsFromError(err) + exists, message, realErr := checkResourceExistsFromError(err) if realErr != nil { return nil, realErr } if !exists { + glog.V(2).Infof("Security group %q not found with message: %q", key, message) return nil, nil } @@ -242,12 +250,13 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) { ctx, cancel := getContextWithCancel() defer cancel() rt, err := az.RouteTablesClient.Get(ctx, az.ResourceGroup, key, "") - exists, realErr := checkResourceExistsFromError(err) + exists, message, realErr := checkResourceExistsFromError(err) if realErr != nil { return nil, realErr } if !exists { + glog.V(2).Infof("Route table %q not found with message: %q", key, message) return nil, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_wrap_test.go b/pkg/cloudprovider/providers/azure/azure_wrap_test.go index 380194ba9c3..5ab090c2a47 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap_test.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap_test.go @@ -42,7 +42,7 @@ func TestExtractNotFound(t *testing.T) { } for _, test := range tests { - exists, err := checkResourceExistsFromError(test.err) + exists, _, err := checkResourceExistsFromError(test.err) if test.exists != exists { t.Errorf("expected: %v, saw: %v", test.exists, exists) } From 91d403f3843e34668ca0f31c5aa569376f1a64bd Mon Sep 17 00:00:00 2001 From: lichuqiang Date: Mon, 9 Apr 2018 11:14:59 +0800 Subject: [PATCH 162/307] cache update for dynamic provisioning --- .../scheduler_assume_cache.go | 31 +++ .../scheduler_assume_cache_test.go | 200 ++++++++++++++++-- .../scheduler_binder_cache.go | 93 ++++++-- .../scheduler_binder_cache_test.go | 72 +++++-- 4 files changed, 345 insertions(+), 51 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go b/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go index 3b2352beac2..b04b402bff4 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go +++ b/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go @@ -371,3 +371,34 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume } return pvs } + +// PVCAssumeCache is a AssumeCache for PersistentVolumeClaim objects +type PVCAssumeCache interface { + AssumeCache + + // GetPVC returns the PVC from the cache with the same + // namespace and the same name of the specified pod. + // pvcKey is the result of MetaNamespaceKeyFunc on PVC obj + GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) +} + +type pvcAssumeCache struct { + *assumeCache +} + +func NewPVCAssumeCache(informer cache.SharedIndexInformer) PVCAssumeCache { + return &pvcAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolumeClaim", "namespace", cache.MetaNamespaceIndexFunc)} +} + +func (c *pvcAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) { + obj, err := c.Get(pvcKey) + if err != nil { + return nil, err + } + + pvc, ok := obj.(*v1.PersistentVolumeClaim) + if !ok { + return nil, &errWrongType{"v1.PersistentVolumeClaim", obj} + } + return pvc, nil +} diff --git a/pkg/controller/volume/persistentvolume/scheduler_assume_cache_test.go b/pkg/controller/volume/persistentvolume/scheduler_assume_cache_test.go index 467daffe747..c6c0f1f0ccd 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_assume_cache_test.go +++ b/pkg/controller/volume/persistentvolume/scheduler_assume_cache_test.go @@ -36,6 +36,33 @@ func makePV(name, version, storageClass string) *v1.PersistentVolume { } } +func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) { + pvList := cache.ListPVs(storageClassName) + if len(pvList) != len(expectedPVs) { + t.Errorf("ListPVs() returned %v PVs, expected %v", len(pvList), len(expectedPVs)) + } + for _, pv := range pvList { + expectedPV, ok := expectedPVs[pv.Name] + if !ok { + t.Errorf("ListPVs() returned unexpected PV %q", pv.Name) + } + if expectedPV != pv { + t.Errorf("ListPVs() returned PV %p, expected %p", pv, expectedPV) + } + } +} + +func verifyPV(cache PVAssumeCache, name string, expectedPV *v1.PersistentVolume) error { + pv, err := cache.GetPV(name) + if err != nil { + return err + } + if pv != expectedPV { + return fmt.Errorf("GetPV() returned %p, expected %p", pv, expectedPV) + } + return nil +} + func TestAssumePV(t *testing.T) { scenarios := map[string]struct { oldPV *v1.PersistentVolume @@ -276,29 +303,170 @@ func TestAssumeUpdatePVCache(t *testing.T) { } } -func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) { - pvList := cache.ListPVs(storageClassName) - if len(pvList) != len(expectedPVs) { - t.Errorf("ListPVs() returned %v PVs, expected %v", len(pvList), len(expectedPVs)) +func makeClaim(name, version, namespace string) *v1.PersistentVolumeClaim { + return &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + ResourceVersion: version, + Annotations: map[string]string{}, + }, } - for _, pv := range pvList { - expectedPV, ok := expectedPVs[pv.Name] +} + +func verifyPVC(cache PVCAssumeCache, pvcKey string, expectedPVC *v1.PersistentVolumeClaim) error { + pvc, err := cache.GetPVC(pvcKey) + if err != nil { + return err + } + if pvc != expectedPVC { + return fmt.Errorf("GetPVC() returned %p, expected %p", pvc, expectedPVC) + } + return nil +} + +func TestAssumePVC(t *testing.T) { + scenarios := map[string]struct { + oldPVC *v1.PersistentVolumeClaim + newPVC *v1.PersistentVolumeClaim + shouldSucceed bool + }{ + "success-same-version": { + oldPVC: makeClaim("pvc1", "5", "ns1"), + newPVC: makeClaim("pvc1", "5", "ns1"), + shouldSucceed: true, + }, + "success-new-higher-version": { + oldPVC: makeClaim("pvc1", "5", "ns1"), + newPVC: makeClaim("pvc1", "6", "ns1"), + shouldSucceed: true, + }, + "fail-old-not-found": { + oldPVC: makeClaim("pvc2", "5", "ns1"), + newPVC: makeClaim("pvc1", "5", "ns1"), + shouldSucceed: false, + }, + "fail-new-lower-version": { + oldPVC: makeClaim("pvc1", "5", "ns1"), + newPVC: makeClaim("pvc1", "4", "ns1"), + shouldSucceed: false, + }, + "fail-new-bad-version": { + oldPVC: makeClaim("pvc1", "5", "ns1"), + newPVC: makeClaim("pvc1", "a", "ns1"), + shouldSucceed: false, + }, + "fail-old-bad-version": { + oldPVC: makeClaim("pvc1", "a", "ns1"), + newPVC: makeClaim("pvc1", "5", "ns1"), + shouldSucceed: false, + }, + } + + for name, scenario := range scenarios { + cache := NewPVCAssumeCache(nil) + internal_cache, ok := cache.(*pvcAssumeCache) if !ok { - t.Errorf("ListPVs() returned unexpected PV %q", pv.Name) + t.Fatalf("Failed to get internal cache") } - if expectedPV != pv { - t.Errorf("ListPVs() returned PV %p, expected %p", pv, expectedPV) + + // Add oldPVC to cache + internal_cache.add(scenario.oldPVC) + if err := verifyPVC(cache, getPVCName(scenario.oldPVC), scenario.oldPVC); err != nil { + t.Errorf("Failed to GetPVC() after initial update: %v", err) + continue + } + + // Assume newPVC + err := cache.Assume(scenario.newPVC) + if scenario.shouldSucceed && err != nil { + t.Errorf("Test %q failed: Assume() returned error %v", name, err) + } + if !scenario.shouldSucceed && err == nil { + t.Errorf("Test %q failed: Assume() returned success but expected error", name) + } + + // Check that GetPVC returns correct PVC + expectedPV := scenario.newPVC + if !scenario.shouldSucceed { + expectedPV = scenario.oldPVC + } + if err := verifyPVC(cache, getPVCName(scenario.oldPVC), expectedPV); err != nil { + t.Errorf("Failed to GetPVC() after initial update: %v", err) } } } -func verifyPV(cache PVAssumeCache, name string, expectedPV *v1.PersistentVolume) error { - pv, err := cache.GetPV(name) - if err != nil { - return err +func TestRestorePVC(t *testing.T) { + cache := NewPVCAssumeCache(nil) + internal_cache, ok := cache.(*pvcAssumeCache) + if !ok { + t.Fatalf("Failed to get internal cache") } - if pv != expectedPV { - return fmt.Errorf("GetPV() returned %p, expected %p", pv, expectedPV) + + oldPVC := makeClaim("pvc1", "5", "ns1") + newPVC := makeClaim("pvc1", "5", "ns1") + + // Restore PVC that doesn't exist + cache.Restore("nothing") + + // Add oldPVC to cache + internal_cache.add(oldPVC) + if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil { + t.Fatalf("Failed to GetPVC() after initial update: %v", err) + } + + // Restore PVC + cache.Restore(getPVCName(oldPVC)) + if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil { + t.Fatalf("Failed to GetPVC() after iniital restore: %v", err) + } + + // Assume newPVC + if err := cache.Assume(newPVC); err != nil { + t.Fatalf("Assume() returned error %v", err) + } + if err := verifyPVC(cache, getPVCName(oldPVC), newPVC); err != nil { + t.Fatalf("Failed to GetPVC() after Assume: %v", err) + } + + // Restore PVC + cache.Restore(getPVCName(oldPVC)) + if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil { + t.Fatalf("Failed to GetPVC() after restore: %v", err) + } +} + +func TestAssumeUpdatePVCCache(t *testing.T) { + cache := NewPVCAssumeCache(nil) + internal_cache, ok := cache.(*pvcAssumeCache) + if !ok { + t.Fatalf("Failed to get internal cache") + } + + pvcName := "test-pvc0" + pvcNamespace := "test-ns" + + // Add a PVC + pvc := makeClaim(pvcName, "1", pvcNamespace) + internal_cache.add(pvc) + if err := verifyPVC(cache, getPVCName(pvc), pvc); err != nil { + t.Fatalf("failed to get PVC: %v", err) + } + + // Assume PVC + newPVC := pvc.DeepCopy() + newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node" + if err := cache.Assume(newPVC); err != nil { + t.Fatalf("failed to assume PVC: %v", err) + } + if err := verifyPVC(cache, getPVCName(pvc), newPVC); err != nil { + t.Fatalf("failed to get PVC after assume: %v", err) + } + + // Add old PVC + internal_cache.add(pvc) + if err := verifyPVC(cache, getPVCName(pvc), newPVC); err != nil { + t.Fatalf("failed to get PVC after old PVC added: %v", err) } - return nil } diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go b/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go index 8a0a7796085..c523011795f 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder_cache.go @@ -30,27 +30,41 @@ type PodBindingCache interface { // pod and node. UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) - // DeleteBindings will remove all cached bindings for the given pod. - DeleteBindings(pod *v1.Pod) - // GetBindings will return the cached bindings for the given pod and node. GetBindings(pod *v1.Pod, node string) []*bindingInfo + + // UpdateProvisionedPVCs will update the cache with the given provisioning decisions + // for the pod and node. + UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim) + + // GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node. + GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim + + // DeleteBindings will remove all cached bindings and provisionings for the given pod. + // TODO: separate the func if it is needed to delete bindings/provisionings individually + DeleteBindings(pod *v1.Pod) } type podBindingCache struct { mutex sync.Mutex // Key = pod name - // Value = nodeBindings - bindings map[string]nodeBindings + // Value = nodeDecisions + bindingDecisions map[string]nodeDecisions } // Key = nodeName -// Value = array of bindingInfo -type nodeBindings map[string][]*bindingInfo +// Value = bindings & provisioned PVCs of the node +type nodeDecisions map[string]nodeDecision + +// A decision includes bindingInfo and provisioned PVCs of the node +type nodeDecision struct { + bindings []*bindingInfo + provisionings []*v1.PersistentVolumeClaim +} func NewPodBindingCache() PodBindingCache { - return &podBindingCache{bindings: map[string]nodeBindings{}} + return &podBindingCache{bindingDecisions: map[string]nodeDecisions{}} } func (c *podBindingCache) DeleteBindings(pod *v1.Pod) { @@ -58,7 +72,7 @@ func (c *podBindingCache) DeleteBindings(pod *v1.Pod) { defer c.mutex.Unlock() podName := getPodName(pod) - delete(c.bindings, podName) + delete(c.bindingDecisions, podName) } func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) { @@ -66,12 +80,20 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b defer c.mutex.Unlock() podName := getPodName(pod) - nodeBinding, ok := c.bindings[podName] + decisions, ok := c.bindingDecisions[podName] if !ok { - nodeBinding = nodeBindings{} - c.bindings[podName] = nodeBinding + decisions = nodeDecisions{} + c.bindingDecisions[podName] = decisions } - nodeBinding[node] = bindings + decision, ok := decisions[node] + if !ok { + decision = nodeDecision{ + bindings: bindings, + } + } else { + decision.bindings = bindings + } + decisions[node] = decision } func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo { @@ -79,9 +101,50 @@ func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo { defer c.mutex.Unlock() podName := getPodName(pod) - nodeBindings, ok := c.bindings[podName] + decisions, ok := c.bindingDecisions[podName] if !ok { return nil } - return nodeBindings[node] + decision, ok := decisions[node] + if !ok { + return nil + } + return decision.bindings +} + +func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) { + c.mutex.Lock() + defer c.mutex.Unlock() + + podName := getPodName(pod) + decisions, ok := c.bindingDecisions[podName] + if !ok { + decisions = nodeDecisions{} + c.bindingDecisions[podName] = decisions + } + decision, ok := decisions[node] + if !ok { + decision = nodeDecision{ + provisionings: pvcs, + } + } else { + decision.provisionings = pvcs + } + decisions[node] = decision +} + +func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim { + c.mutex.Lock() + defer c.mutex.Unlock() + + podName := getPodName(pod) + decisions, ok := c.bindingDecisions[podName] + if !ok { + return nil + } + decision, ok := decisions[node] + if !ok { + return nil + } + return decision.provisionings } diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder_cache_test.go b/pkg/controller/volume/persistentvolume/scheduler_binder_cache_test.go index d39d823c26d..65086274cc3 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder_cache_test.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder_cache_test.go @@ -26,32 +26,37 @@ import ( func TestUpdateGetBindings(t *testing.T) { scenarios := map[string]struct { - updateBindings []*bindingInfo - updatePod string - updateNode string + updateBindings []*bindingInfo + updateProvisionings []*v1.PersistentVolumeClaim + updatePod string + updateNode string - getBindings []*bindingInfo - getPod string - getNode string + getBindings []*bindingInfo + getProvisionings []*v1.PersistentVolumeClaim + getPod string + getNode string }{ "no-pod": { getPod: "pod1", getNode: "node1", }, "no-node": { - updatePod: "pod1", - updateNode: "node1", - updateBindings: []*bindingInfo{}, - getPod: "pod1", - getNode: "node2", + updatePod: "pod1", + updateNode: "node1", + updateBindings: []*bindingInfo{}, + updateProvisionings: []*v1.PersistentVolumeClaim{}, + getPod: "pod1", + getNode: "node2", }, "binding-exists": { - updatePod: "pod1", - updateNode: "node1", - updateBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}}, - getPod: "pod1", - getNode: "node1", - getBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}}, + updatePod: "pod1", + updateNode: "node1", + updateBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}}, + updateProvisionings: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}}, + getPod: "pod1", + getNode: "node1", + getBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}}, + getProvisionings: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}}, }, } @@ -61,6 +66,7 @@ func TestUpdateGetBindings(t *testing.T) { // Perform updates updatePod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: scenario.updatePod, Namespace: "ns"}} cache.UpdateBindings(updatePod, scenario.updateNode, scenario.updateBindings) + cache.UpdateProvisionedPVCs(updatePod, scenario.updateNode, scenario.updateProvisionings) // Verify updated bindings bindings := cache.GetBindings(updatePod, scenario.updateNode) @@ -68,45 +74,71 @@ func TestUpdateGetBindings(t *testing.T) { t.Errorf("Test %v failed: returned bindings after update different. Got %+v, expected %+v", name, bindings, scenario.updateBindings) } + // Verify updated provisionings + provisionings := cache.GetProvisionedPVCs(updatePod, scenario.updateNode) + if !reflect.DeepEqual(provisionings, scenario.updateProvisionings) { + t.Errorf("Test %v failed: returned provisionings after update different. Got %+v, expected %+v", name, provisionings, scenario.updateProvisionings) + } + // Get bindings getPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: scenario.getPod, Namespace: "ns"}} bindings = cache.GetBindings(getPod, scenario.getNode) if !reflect.DeepEqual(bindings, scenario.getBindings) { t.Errorf("Test %v failed: unexpected bindings returned. Got %+v, expected %+v", name, bindings, scenario.updateBindings) } + + // Get provisionings + provisionings = cache.GetProvisionedPVCs(getPod, scenario.getNode) + if !reflect.DeepEqual(provisionings, scenario.getProvisionings) { + t.Errorf("Test %v failed: unexpected bindings returned. Got %+v, expected %+v", name, provisionings, scenario.getProvisionings) + } } } func TestDeleteBindings(t *testing.T) { initialBindings := []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}} + initialProvisionings := []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}} cache := NewPodBindingCache() pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns"}} - // Get nil bindings + // Get nil bindings and provisionings bindings := cache.GetBindings(pod, "node1") if bindings != nil { t.Errorf("Test failed: expected initial nil bindings, got %+v", bindings) } + provisionings := cache.GetProvisionedPVCs(pod, "node1") + if provisionings != nil { + t.Errorf("Test failed: expected initial nil provisionings, got %+v", provisionings) + } // Delete nothing cache.DeleteBindings(pod) // Perform updates cache.UpdateBindings(pod, "node1", initialBindings) + cache.UpdateProvisionedPVCs(pod, "node1", initialProvisionings) - // Get bindings + // Get bindings and provisionings bindings = cache.GetBindings(pod, "node1") if !reflect.DeepEqual(bindings, initialBindings) { t.Errorf("Test failed: expected bindings %+v, got %+v", initialBindings, bindings) } + provisionings = cache.GetProvisionedPVCs(pod, "node1") + if !reflect.DeepEqual(provisionings, initialProvisionings) { + t.Errorf("Test failed: expected provisionings %+v, got %+v", initialProvisionings, provisionings) + } // Delete cache.DeleteBindings(pod) - // Get bindings + // Get bindings and provisionings bindings = cache.GetBindings(pod, "node1") if bindings != nil { t.Errorf("Test failed: expected nil bindings, got %+v", bindings) } + provisionings = cache.GetProvisionedPVCs(pod, "node1") + if provisionings != nil { + t.Errorf("Test failed: expected nil provisionings, got %+v", provisionings) + } } From 95b530366aeea7c379c50424f58bcd2f27bed921 Mon Sep 17 00:00:00 2001 From: lichuqiang Date: Mon, 9 Apr 2018 14:37:41 +0800 Subject: [PATCH 163/307] Add dynamic provisioning process --- .../volume/persistentvolume/pv_controller.go | 8 + .../persistentvolume/scheduler_binder.go | 190 ++++++++-- .../persistentvolume/scheduler_binder_test.go | 357 +++++++++++++++--- pkg/features/kube_features.go | 7 + .../authorizer/rbac/bootstrappolicy/policy.go | 12 +- 5 files changed, 492 insertions(+), 82 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 87a7ef2f70a..ac796ce481a 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -135,6 +135,14 @@ const annDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" // a volume for this PVC. const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner" +// This annotation is added to a PVC that has been triggered by scheduler to +// be dynamically provisioned. Its value is the name of the selected node. +const annSelectedNode = "volume.alpha.kubernetes.io/selected-node" + +// If the provisioner name in a storage class is set to "kubernetes.io/no-provisioner", +// then dynamic provisioning is not supported by the storage. +const notSupportedProvisioner = "kubernetes.io/no-provisioner" + // CloudVolumeCreatedForClaimNamespaceTag is a name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD) // with namespace of a persistent volume claim used to create this volume. const CloudVolumeCreatedForClaimNamespaceTag = "kubernetes.io/created-for/pvc/namespace" diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder.go b/pkg/controller/volume/persistentvolume/scheduler_binder.go index ad4cf139c89..0c5ccc9ec42 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder.go @@ -24,10 +24,12 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" clientset "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/features" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -58,24 +60,30 @@ type SchedulerVolumeBinder interface { // If a PVC is bound, it checks if the PV's NodeAffinity matches the Node. // Otherwise, it tries to find an available PV to bind to the PVC. // - // It returns true if there are matching PVs that can satisfy all of the Pod's PVCs, and returns true - // if bound volumes satisfy the PV NodeAffinity. + // It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned, + // and returns true if bound volumes satisfy the PV NodeAffinity. // // This function is called by the volume binding scheduler predicate and can be called in parallel FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error) - // AssumePodVolumes will take the PV matches for unbound PVCs and update the PV cache assuming + // AssumePodVolumes will: + // 1. Take the PV matches for unbound PVCs and update the PV cache assuming // that the PV is prebound to the PVC. + // 2. Take the PVCs that need provisioning and update the PVC cache with related + // annotations set. // - // It returns true if all volumes are fully bound, and returns true if any volume binding API operation needs - // to be done afterwards. + // It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning + // API operation needs to be done afterwards. // // This function will modify assumedPod with the node name. // This function is called serially. AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error) - // BindPodVolumes will initiate the volume binding by making the API call to prebind the PV + // BindPodVolumes will: + // 1. Initiate the volume binding by making the API call to prebind the PV // to its matching PVC. + // 2. Trigger the volume provisioning by making the API call to set related + // annotations on the PVC // // This function can be called in parallel. BindPodVolumes(assumedPod *v1.Pod) error @@ -87,8 +95,7 @@ type SchedulerVolumeBinder interface { type volumeBinder struct { ctrl *PersistentVolumeController - // TODO: Need AssumeCache for PVC for dynamic provisioning - pvcCache corelisters.PersistentVolumeClaimLister + pvcCache PVCAssumeCache pvCache PVAssumeCache // Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes. @@ -111,7 +118,7 @@ func NewVolumeBinder( b := &volumeBinder{ ctrl: ctrl, - pvcCache: pvcInformer.Lister(), + pvcCache: NewPVCAssumeCache(pvcInformer.Informer()), pvCache: NewPVAssumeCache(pvInformer.Informer()), podBindingCache: NewPodBindingCache(), } @@ -123,7 +130,7 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache { return b.podBindingCache } -// FindPodVolumes caches the matching PVs per node in podBindingCache +// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) { podName := getPodName(pod) @@ -135,8 +142,8 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume boundVolumesSatisfied = true // The pod's volumes need to be processed in one call to avoid the race condition where - // volumes can get bound in between calls. - boundClaims, unboundClaims, unboundClaimsImmediate, err := b.getPodVolumes(pod) + // volumes can get bound/provisioned in between calls. + boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod) if err != nil { return false, false, err } @@ -154,20 +161,32 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume } } - // Find PVs for unbound volumes - if len(unboundClaims) > 0 { - unboundVolumesSatisfied, err = b.findMatchingVolumes(pod, unboundClaims, node) + if len(claimsToBind) > 0 { + var claimsToProvision []*v1.PersistentVolumeClaim + unboundVolumesSatisfied, claimsToProvision, err = b.findMatchingVolumes(pod, claimsToBind, node) if err != nil { return false, false, err } + + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) { + // Try to provision for unbound volumes + if !unboundVolumesSatisfied { + unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node) + if err != nil { + return false, false, err + } + } + } } return unboundVolumesSatisfied, boundVolumesSatisfied, nil } -// AssumePodVolumes will take the cached matching PVs in podBindingCache for the chosen node -// and update the pvCache with the new prebound PV. It will update podBindingCache again -// with the PVs that need an API update. +// AssumePodVolumes will take the cached matching PVs and PVCs to provision +// in podBindingCache for the chosen node, and: +// 1. Update the pvCache with the new prebound PV. +// 2. Update the pvcCache with the new PVCs with annotations set +// It will update podBindingCache again with the PVs and PVCs that need an API update. func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) { podName := getPodName(assumedPod) @@ -179,6 +198,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al } assumedPod.Spec.NodeName = nodeName + // Assume PV claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName) newBindings := []*bindingInfo{} @@ -206,23 +226,48 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al } } - if len(newBindings) == 0 { - // Don't update cached bindings if no API updates are needed. This can happen if we - // previously updated the PV object and are waiting for the PV controller to finish binding. - glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: PVs already assumed", podName, nodeName) - return false, false, nil + // Don't update cached bindings if no API updates are needed. This can happen if we + // previously updated the PV object and are waiting for the PV controller to finish binding. + if len(newBindings) != 0 { + bindingRequired = true + b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings) } - b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings) - return false, true, nil + // Assume PVCs + claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName) + + newProvisionedPVCs := []*v1.PersistentVolumeClaim{} + for _, claim := range claimsToProvision { + // The claims from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + claimClone := claim.DeepCopy() + metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annSelectedNode, nodeName) + err = b.pvcCache.Assume(claimClone) + if err != nil { + b.revertAssumedPVs(newBindings) + b.revertAssumedPVCs(newProvisionedPVCs) + return + } + + newProvisionedPVCs = append(newProvisionedPVCs, claimClone) + } + + if len(newProvisionedPVCs) != 0 { + bindingRequired = true + b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs) + } + + return } -// BindPodVolumes gets the cached bindings in podBindingCache and makes the API update for those PVs. +// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache +// and makes the API update for those PVs/PVCs. func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error { podName := getPodName(assumedPod) glog.V(4).Infof("BindPodVolumes for pod %q", podName) bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName) + claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName) // Do the actual prebinding. Let the PV controller take care of the rest // There is no API rollback if the actual binding fails @@ -232,6 +277,20 @@ func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error { if err != nil { // only revert assumed cached updates for volumes we haven't successfully bound b.revertAssumedPVs(bindings[i:]) + // Revert all of the assumed cached updates for claims, + // since no actual API update will be done + b.revertAssumedPVCs(claimsToProvision) + return err + } + } + + // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest + // PV controller is expect to signal back by removing related annotations if actual provisioning fails + for i, claim := range claimsToProvision { + if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil { + glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err) + // only revert assumed cached updates for claims we haven't successfully updated + b.revertAssumedPVCs(claimsToProvision[i:]) return err } } @@ -253,7 +312,13 @@ func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFull } pvcName := vol.PersistentVolumeClaim.ClaimName - pvc, err := b.pvcCache.PersistentVolumeClaims(namespace).Get(pvcName) + claim := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + }, + } + pvc, err := b.pvcCache.GetPVC(getPVCName(claim)) if err != nil || pvc == nil { return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err) } @@ -342,14 +407,18 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node return true, nil } -func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingInfo, node *v1.Node) (foundMatches bool, err error) { +// findMatchingVolumes tries to find matching volumes for given claims, +// and return unbound claims for further provision. +func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingInfo, node *v1.Node) (foundMatches bool, unboundClaims []*v1.PersistentVolumeClaim, err error) { podName := getPodName(pod) - // Sort all the claims by increasing size request to get the smallest fits sort.Sort(byPVCSize(claimsToBind)) chosenPVs := map[string]*v1.PersistentVolume{} + foundMatches = true + matchedClaims := []*bindingInfo{} + for _, bindingInfo := range claimsToBind { // Get storage class name from each PVC storageClassName := "" @@ -362,21 +431,68 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI // Find a matching PV bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true) if err != nil { - return false, err + return false, nil, err } if bindingInfo.pv == nil { glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, getPVCName(bindingInfo.pvc), node.Name) - return false, nil + unboundClaims = append(unboundClaims, bindingInfo.pvc) + foundMatches = false + continue } // matching PV needs to be excluded so we don't select it again chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv + matchedClaims = append(matchedClaims, bindingInfo) glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, getPVCName(bindingInfo.pvc), node.Name, podName) } // Mark cache with all the matches for each PVC for this node - b.podBindingCache.UpdateBindings(pod, node.Name, claimsToBind) - glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) + if len(matchedClaims) > 0 { + b.podBindingCache.UpdateBindings(pod, node.Name, matchedClaims) + } + + if foundMatches { + glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) + } + + return +} + +// checkVolumeProvisions checks given unbound claims (the claims have gone through func +// findMatchingVolumes, and do not have matching volumes for binding), and return true +// if all of the claims are eligible for dynamic provision. +func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied bool, err error) { + podName := getPodName(pod) + provisionedClaims := []*v1.PersistentVolumeClaim{} + + for _, claim := range claimsToProvision { + className := v1helper.GetPersistentVolumeClaimClass(claim) + if className == "" { + return false, fmt.Errorf("no class for claim %q", getPVCName(claim)) + } + + class, err := b.ctrl.classLister.Get(className) + if err != nil { + return false, fmt.Errorf("failed to find storage class %q", className) + } + provisioner := class.Provisioner + if provisioner == "" || provisioner == notSupportedProvisioner { + glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, getPVCName(claim)) + return false, nil + } + + // TODO: Check if the node can satisfy the topology requirement in the class + + // TODO: Check if capacity of the node domain in the storage class + // can satisfy resource requirement of given claim + + provisionedClaims = append(provisionedClaims, claim) + + } + glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) + + // Mark cache with all the PVCs that need provisioning for this node + b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims) return true, nil } @@ -387,6 +503,12 @@ func (b *volumeBinder) revertAssumedPVs(bindings []*bindingInfo) { } } +func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) { + for _, claim := range claims { + b.pvcCache.Restore(getPVCName(claim)) + } +} + type bindingInfo struct { // Claim that needs to be bound pvc *v1.PersistentVolumeClaim diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder_test.go b/pkg/controller/volume/persistentvolume/scheduler_binder_test.go index ad8d2efa960..98bc9e8bcd2 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder_test.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder_test.go @@ -33,20 +33,23 @@ import ( "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/controller" ) var ( - unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", &waitClass) - unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", &waitClass) - preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", &waitClass) - boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", &waitClass) - boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", &waitClass) - badPVC = makeBadPVC() - immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", &immediateClass) - immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", &immediateClass) + unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", "1", &waitClass) + unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", "1", &waitClass) + preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", "1", &waitClass) + boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", "1", &waitClass) + boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", "1", &waitClass) + badPVC = makeBadPVC() + immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", "1", &immediateClass) + immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", "1", &immediateClass) + provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", pvcUnbound, "", "1", &waitClass) + provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass) + provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass) + noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass) pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass) pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass) @@ -68,10 +71,12 @@ var ( binding1aBound = makeBinding(unboundPVC, pvNode1aBound) binding1bBound = makeBinding(unboundPVC2, pvNode1bBound) - waitClass = "waitClass" - immediateClass = "immediateClass" + waitClass = "waitClass" + immediateClass = "immediateClass" + provisionNotSupportClass = "provisionNotSupportedClass" - nodeLabelKey = "nodeKey" + nodeLabelKey = "nodeKey" + nodeLabelValue = "node1" ) type testEnv struct { @@ -80,7 +85,7 @@ type testEnv struct { binder SchedulerVolumeBinder internalBinder *volumeBinder internalPVCache *pvAssumeCache - internalPVCCache cache.Indexer + internalPVCCache *pvcAssumeCache } func newTestBinder(t *testing.T) *testEnv { @@ -106,6 +111,7 @@ func newTestBinder(t *testing.T) *testEnv { Name: waitClass, }, VolumeBindingMode: &waitMode, + Provisioner: "test-provisioner", }, { ObjectMeta: metav1.ObjectMeta{ @@ -113,6 +119,13 @@ func newTestBinder(t *testing.T) *testEnv { }, VolumeBindingMode: &immediateMode, }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: provisionNotSupportClass, + }, + VolumeBindingMode: &waitMode, + Provisioner: "kubernetes.io/no-provisioner", + }, } for _, class := range classes { if err := classInformer.Informer().GetIndexer().Add(class); err != nil { @@ -132,22 +145,31 @@ func newTestBinder(t *testing.T) *testEnv { t.Fatalf("Failed to convert to internal PV cache") } + pvcCache := internalBinder.pvcCache + internalPVCCache, ok := pvcCache.(*pvcAssumeCache) + if !ok { + t.Fatalf("Failed to convert to internal PVC cache") + } + return &testEnv{ client: client, reactor: reactor, binder: binder, internalBinder: internalBinder, internalPVCache: internalPVCache, - internalPVCCache: pvcInformer.Informer().GetIndexer(), + internalPVCCache: internalPVCCache, } } -func (env *testEnv) initClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim) { - for _, pvc := range pvcs { - err := env.internalPVCCache.Add(pvc) - if err != nil { - t.Fatalf("Failed to add PVC %q to internal cache: %v", pvc.Name, err) +func (env *testEnv) initClaims(cachedPVCs []*v1.PersistentVolumeClaim, apiPVCs []*v1.PersistentVolumeClaim) { + internalPVCCache := env.internalPVCCache + for _, pvc := range cachedPVCs { + internalPVCCache.add(pvc) + if apiPVCs == nil { + env.reactor.claims[pvc.Name] = pvc } + } + for _, pvc := range apiPVCs { env.reactor.claims[pvc.Name] = pvc } } @@ -166,7 +188,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P } -func (env *testEnv) assumeVolumes(t *testing.T, name, node string, pod *v1.Pod, bindings []*bindingInfo) { +func (env *testEnv) assumeVolumes(t *testing.T, name, node string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) { pvCache := env.internalBinder.pvCache for _, binding := range bindings { if err := pvCache.Assume(binding.pv); err != nil { @@ -175,20 +197,38 @@ func (env *testEnv) assumeVolumes(t *testing.T, name, node string, pod *v1.Pod, } env.internalBinder.podBindingCache.UpdateBindings(pod, node, bindings) + + pvcCache := env.internalBinder.pvcCache + for _, pvc := range provisionings { + if err := pvcCache.Assume(pvc); err != nil { + t.Fatalf("Failed to setup test %q: error: %v", name, err) + } + } + + env.internalBinder.podBindingCache.UpdateProvisionedPVCs(pod, node, provisionings) } -func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingInfo) { +func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) { cache := env.internalBinder.podBindingCache cache.UpdateBindings(pod, node, bindings) + + cache.UpdateProvisionedPVCs(pod, node, provisionings) } -func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo) { +func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) { cache := env.internalBinder.podBindingCache bindings := cache.GetBindings(pod, node) if !reflect.DeepEqual(expectedBindings, bindings) { t.Errorf("Test %q failed: Expected bindings %+v, got %+v", name, expectedBindings, bindings) } + + provisionedClaims := cache.GetProvisionedPVCs(pod, node) + + if !reflect.DeepEqual(expectedProvisionings, provisionedClaims) { + t.Errorf("Test %q failed: Expected provisionings %+v, got %+v", name, expectedProvisionings, provisionedClaims) + } + } func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) []*bindingInfo { @@ -196,7 +236,7 @@ func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) return cache.GetBindings(pod, node) } -func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo) { +func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) { // TODO: Check binding cache // Check pv cache @@ -218,9 +258,23 @@ func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindi t.Errorf("Test %q failed: expected PV.ClaimRef.Namespace %q, got %q", name, b.pvc.Namespace, pv.Spec.ClaimRef.Namespace) } } + + // Check pvc cache + pvcCache := env.internalBinder.pvcCache + for _, p := range provisionings { + pvcKey := getPVCName(p) + pvc, err := pvcCache.GetPVC(pvcKey) + if err != nil { + t.Errorf("Test %q failed: GetPVC %q returned error: %v", name, pvcKey, err) + continue + } + if pvc.Annotations[annSelectedNode] != nodeLabelValue { + t.Errorf("Test %q failed: expected annSelectedNode of pvc %q to be %q, but got %q", name, pvcKey, nodeLabelValue, pvc.Annotations[annSelectedNode]) + } + } } -func (env *testEnv) validateFailedAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo) { +func (env *testEnv) validateFailedAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) { // All PVs have been unmodified in cache pvCache := env.internalBinder.pvCache for _, b := range bindings { @@ -230,6 +284,20 @@ func (env *testEnv) validateFailedAssume(t *testing.T, name string, pod *v1.Pod, t.Errorf("Test %q failed: PV %q was modified in cache", name, b.pv.Name) } } + + // Check pvc cache + pvcCache := env.internalBinder.pvcCache + for _, p := range provisionings { + pvcKey := getPVCName(p) + pvc, err := pvcCache.GetPVC(pvcKey) + if err != nil { + t.Errorf("Test %q failed: GetPVC %q returned error: %v", name, pvcKey, err) + continue + } + if pvc.Annotations[annSelectedNode] != "" { + t.Errorf("Test %q failed: expected annSelectedNode of pvc %q empty, but got %q", name, pvcKey, pvc.Annotations[annSelectedNode]) + } + } } func (env *testEnv) validateBind( @@ -257,20 +325,46 @@ func (env *testEnv) validateBind( } } +func (env *testEnv) validateProvision( + t *testing.T, + name string, + pod *v1.Pod, + expectedPVCs []*v1.PersistentVolumeClaim, + expectedAPIPVCs []*v1.PersistentVolumeClaim) { + + // Check pvc cache + pvcCache := env.internalBinder.pvcCache + for _, pvc := range expectedPVCs { + cachedPVC, err := pvcCache.GetPVC(getPVCName(pvc)) + if err != nil { + t.Errorf("Test %q failed: GetPVC %q returned error: %v", name, getPVCName(pvc), err) + } + if !reflect.DeepEqual(cachedPVC, pvc) { + t.Errorf("Test %q failed: cached PVC check failed [A-expected, B-got]:\n%s", name, diff.ObjectDiff(pvc, cachedPVC)) + } + } + + // Check reactor for API updates + if err := env.reactor.checkClaims(expectedAPIPVCs); err != nil { + t.Errorf("Test %q failed: API reactor validation failed: %v", name, err) + } +} + const ( pvcUnbound = iota pvcPrebound pvcBound ) -func makeTestPVC(name, size string, pvcBoundState int, pvName string, className *string) *v1.PersistentVolumeClaim { +func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim { pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "testns", UID: types.UID("pvc-uid"), - ResourceVersion: "1", + ResourceVersion: resourceVersion, SelfLink: testapi.Default.SelfLink("pvc", name), + Annotations: map[string]string{}, }, Spec: v1.PersistentVolumeClaimSpec{ Resources: v1.ResourceRequirements{ @@ -389,7 +483,15 @@ func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *bindin return &bindingInfo{pvc: pvc, pv: pv} } -func TestFindPodVolumes(t *testing.T) { +func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim { + res := pvc.DeepCopy() + // Add provision related annotations + res.Annotations[annSelectedNode] = nodeLabelValue + + return res +} + +func TestFindPodVolumesWithoutProvisioning(t *testing.T) { scenarios := map[string]struct { // Inputs pvs []*v1.PersistentVolume @@ -470,10 +572,11 @@ func TestFindPodVolumes(t *testing.T) { expectedBound: true, }, "two-unbound-pvcs,partial-match": { - podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, - pvs: []*v1.PersistentVolume{pvNode1a}, - expectedUnbound: false, - expectedBound: true, + podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, + pvs: []*v1.PersistentVolume{pvNode1a}, + expectedBindings: []*bindingInfo{binding1a}, + expectedUnbound: false, + expectedBound: true, }, "one-bound,one-unbound": { podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC}, @@ -552,7 +655,7 @@ func TestFindPodVolumes(t *testing.T) { if scenario.cachePVCs == nil { scenario.cachePVCs = scenario.podPVCs } - testEnv.initClaims(t, scenario.cachePVCs) + testEnv.initClaims(scenario.cachePVCs, scenario.cachePVCs) // b. Generate pod with given claims if scenario.pod == nil { @@ -575,16 +678,126 @@ func TestFindPodVolumes(t *testing.T) { if unboundSatisfied != scenario.expectedUnbound { t.Errorf("Test %q failed: expected unboundSatsified %v, got %v", name, scenario.expectedUnbound, unboundSatisfied) } - testEnv.validatePodCache(t, name, testNode.Name, scenario.pod, scenario.expectedBindings) + testEnv.validatePodCache(t, name, testNode.Name, scenario.pod, scenario.expectedBindings, nil) + } +} + +func TestFindPodVolumesWithProvisioning(t *testing.T) { + scenarios := map[string]struct { + // Inputs + pvs []*v1.PersistentVolume + podPVCs []*v1.PersistentVolumeClaim + // If nil, use pod PVCs + cachePVCs []*v1.PersistentVolumeClaim + // If nil, makePod with podPVCs + pod *v1.Pod + + // Expected podBindingCache fields + expectedBindings []*bindingInfo + expectedProvisions []*v1.PersistentVolumeClaim + + // Expected return values + expectedUnbound bool + expectedBound bool + shouldFail bool + }{ + "one-provisioned": { + podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedUnbound: true, + expectedBound: true, + }, + "two-unbound-pvcs,one-matched,one-provisioned": { + podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}, + pvs: []*v1.PersistentVolume{pvNode1a}, + expectedBindings: []*bindingInfo{binding1a}, + expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedUnbound: true, + expectedBound: true, + }, + "one-bound,one-provisioned": { + podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC}, + pvs: []*v1.PersistentVolume{pvBound}, + expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedUnbound: true, + expectedBound: true, + }, + "immediate-unbound-pvc": { + podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC}, + expectedUnbound: false, + expectedBound: false, + shouldFail: true, + }, + "one-immediate-bound,one-provisioned": { + podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC}, + pvs: []*v1.PersistentVolume{pvBoundImmediate}, + expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedUnbound: true, + expectedBound: true, + }, + "invalid-provisioner": { + podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC}, + expectedUnbound: false, + expectedBound: true, + }, + } + + // Set VolumeScheduling and DynamicProvisioningScheduling feature gate + utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,DynamicProvisioningScheduling=true") + defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,DynamicProvisioningScheduling=false") + + testNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Labels: map[string]string{ + nodeLabelKey: "node1", + }, + }, + } + + for name, scenario := range scenarios { + // Setup + testEnv := newTestBinder(t) + testEnv.initVolumes(scenario.pvs, scenario.pvs) + + // a. Init pvc cache + if scenario.cachePVCs == nil { + scenario.cachePVCs = scenario.podPVCs + } + testEnv.initClaims(scenario.cachePVCs, scenario.cachePVCs) + + // b. Generate pod with given claims + if scenario.pod == nil { + scenario.pod = makePod(scenario.podPVCs) + } + + // Execute + unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode) + + // Validate + if !scenario.shouldFail && err != nil { + t.Errorf("Test %q failed: returned error: %v", name, err) + } + if scenario.shouldFail && err == nil { + t.Errorf("Test %q failed: returned success but expected error", name) + } + if boundSatisfied != scenario.expectedBound { + t.Errorf("Test %q failed: expected boundSatsified %v, got %v", name, scenario.expectedBound, boundSatisfied) + } + if unboundSatisfied != scenario.expectedUnbound { + t.Errorf("Test %q failed: expected unboundSatsified %v, got %v", name, scenario.expectedUnbound, unboundSatisfied) + } + testEnv.validatePodCache(t, name, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions) } } func TestAssumePodVolumes(t *testing.T) { scenarios := map[string]struct { // Inputs - podPVCs []*v1.PersistentVolumeClaim - pvs []*v1.PersistentVolume - bindings []*bindingInfo + podPVCs []*v1.PersistentVolumeClaim + pvs []*v1.PersistentVolume + bindings []*bindingInfo + provisionedPVCs []*v1.PersistentVolumeClaim // Expected return values shouldFail bool @@ -636,6 +849,21 @@ func TestAssumePodVolumes(t *testing.T) { shouldFail: true, expectedBindingRequired: true, }, + "one-binding, one-pvc-provisioned": { + podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}, + bindings: []*bindingInfo{binding1a}, + pvs: []*v1.PersistentVolume{pvNode1a}, + provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedBindingRequired: true, + }, + "one-binding, one-provision-tmpupdate-failed": { + podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion}, + bindings: []*bindingInfo{binding1a}, + pvs: []*v1.PersistentVolume{pvNode1a}, + provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2}, + shouldFail: true, + expectedBindingRequired: true, + }, } for name, scenario := range scenarios { @@ -643,9 +871,9 @@ func TestAssumePodVolumes(t *testing.T) { // Setup testEnv := newTestBinder(t) - testEnv.initClaims(t, scenario.podPVCs) + testEnv.initClaims(scenario.podPVCs, scenario.podPVCs) pod := makePod(scenario.podPVCs) - testEnv.initPodCache(pod, "node1", scenario.bindings) + testEnv.initPodCache(pod, "node1", scenario.bindings, scenario.provisionedPVCs) testEnv.initVolumes(scenario.pvs, scenario.pvs) // Execute @@ -668,9 +896,9 @@ func TestAssumePodVolumes(t *testing.T) { scenario.expectedBindings = scenario.bindings } if scenario.shouldFail { - testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings) + testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs) } else { - testEnv.validateAssume(t, name, pod, scenario.expectedBindings) + testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs) } } } @@ -683,11 +911,20 @@ func TestBindPodVolumes(t *testing.T) { // if nil, use cachedPVs apiPVs []*v1.PersistentVolume + provisionedPVCs []*v1.PersistentVolumeClaim + cachedPVCs []*v1.PersistentVolumeClaim + // if nil, use cachedPVCs + apiPVCs []*v1.PersistentVolumeClaim + // Expected return values shouldFail bool expectedPVs []*v1.PersistentVolume // if nil, use expectedPVs expectedAPIPVs []*v1.PersistentVolume + + expectedPVCs []*v1.PersistentVolumeClaim + // if nil, use expectedPVCs + expectedAPIPVCs []*v1.PersistentVolumeClaim }{ "all-bound": {}, "not-fully-bound": { @@ -711,6 +948,30 @@ func TestBindPodVolumes(t *testing.T) { expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion}, shouldFail: true, }, + "one-provisioned-pvc": { + provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)}, + cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, + expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)}, + }, + "provision-api-update-failed": { + provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)}, + cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2}, + apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion}, + expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVC2}, + expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion}, + shouldFail: true, + }, + "bingding-succeed, provision-api-update-failed": { + bindings: []*bindingInfo{binding1aBound}, + cachedPVs: []*v1.PersistentVolume{pvNode1a}, + expectedPVs: []*v1.PersistentVolume{pvNode1aBound}, + provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)}, + cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2}, + apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion}, + expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVC2}, + expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion}, + shouldFail: true, + }, } for name, scenario := range scenarios { glog.V(5).Infof("Running test case %q", name) @@ -721,8 +982,12 @@ func TestBindPodVolumes(t *testing.T) { if scenario.apiPVs == nil { scenario.apiPVs = scenario.cachedPVs } + if scenario.apiPVCs == nil { + scenario.apiPVCs = scenario.cachedPVCs + } testEnv.initVolumes(scenario.cachedPVs, scenario.apiPVs) - testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings) + testEnv.initClaims(scenario.cachedPVCs, scenario.apiPVCs) + testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings, scenario.provisionedPVCs) // Execute err := testEnv.binder.BindPodVolumes(pod) @@ -737,7 +1002,11 @@ func TestBindPodVolumes(t *testing.T) { if scenario.expectedAPIPVs == nil { scenario.expectedAPIPVs = scenario.expectedPVs } + if scenario.expectedAPIPVCs == nil { + scenario.expectedAPIPVCs = scenario.expectedPVCs + } testEnv.validateBind(t, name, pod, scenario.expectedPVs, scenario.expectedAPIPVs) + testEnv.validateProvision(t, name, pod, scenario.expectedPVCs, scenario.expectedAPIPVCs) } } @@ -753,7 +1022,7 @@ func TestFindAssumeVolumes(t *testing.T) { // Setup testEnv := newTestBinder(t) testEnv.initVolumes(pvs, pvs) - testEnv.initClaims(t, podPVCs) + testEnv.initClaims(podPVCs, podPVCs) pod := makePod(podPVCs) testNode := &v1.Node{ @@ -787,7 +1056,7 @@ func TestFindAssumeVolumes(t *testing.T) { if !bindingRequired { t.Errorf("Test failed: binding not required") } - testEnv.validateAssume(t, "assume", pod, expectedBindings) + testEnv.validateAssume(t, "assume", pod, expectedBindings, nil) // After assume, claimref should be set on pv expectedBindings = testEnv.getPodBindings(t, "after-assume", testNode.Name, pod) @@ -803,6 +1072,6 @@ func TestFindAssumeVolumes(t *testing.T) { if !unboundSatisfied { t.Errorf("Test failed: couldn't find PVs for all PVCs") } - testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings) + testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, nil) } } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index d773374ec43..002bcc4297d 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -279,6 +279,12 @@ const ( // A node which has closer cpu,memory utilization and volume count is favoured by scheduler // while making decisions. BalanceAttachedNodeVolumes utilfeature.Feature = "BalanceAttachedNodeVolumes" + + // owner: @lichuqiang + // alpha: v1.11 + // + // Extend the default scheduler to be aware of volume topology and handle PV provisioning + DynamicProvisioningScheduling utilfeature.Feature = "DynamicProvisioningScheduling" ) func init() { @@ -327,6 +333,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 5aed8c6819f..912ab05a1b3 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -506,12 +506,16 @@ func ClusterRoles() []rbacv1.ClusterRole { } if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + rules := []rbacv1.PolicyRule{ + rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), + rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), + } + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) { + rules = append(rules, rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie()) + } roles = append(roles, rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"}, - Rules: []rbacv1.PolicyRule{ - rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), - rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), - }, + Rules: rules, }) } From 446f36559e933d3eeea666bd2165db21b7bb419e Mon Sep 17 00:00:00 2001 From: lichuqiang Date: Sat, 21 Apr 2018 11:26:26 +0800 Subject: [PATCH 164/307] pv_controller change for provisioning --- .../volume/persistentvolume/pv_controller.go | 43 ++++++++++++++++-- .../persistentvolume/pv_controller_test.go | 45 +++++++++++++++++-- 2 files changed, 82 insertions(+), 6 deletions(-) diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index ac796ce481a..a46c75510dd 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -285,6 +285,16 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV return false, nil } + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) { + // When feature DynamicProvisioningScheduling enabled, + // Scheduler signal to the PV controller to start dynamic + // provisioning by setting the "annSelectedNode" annotation + // in the PVC + if _, ok := claim.Annotations[annSelectedNode]; ok { + return false, nil + } + } + className := v1helper.GetPersistentVolumeClaimClass(claim) if className == "" { return false, nil @@ -299,8 +309,6 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className) } - // TODO: add check to handle dynamic provisioning later - return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil } @@ -328,7 +336,6 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // OBSERVATION: pvc is "Pending", will retry switch { case delayBinding: - // TODO: Skip dynamic provisioning for now ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.WaitForFirstConsumer, "waiting for first consumer to be created before binding") case v1helper.GetPersistentVolumeClaimClass(claim) != "": if err = ctrl.provisionClaim(claim); err != nil { @@ -1428,9 +1435,16 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis } opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision") + // TODO: modify the Provision() interface to pass in the allowed topology information + // of the provisioned volume. volume, err = provisioner.Provision() opComplete(&err) if err != nil { + // Other places of failure has nothing to do with DynamicProvisioningScheduling, + // so just let controller retry in the next sync. We'll only call func + // rescheduleProvisioning here when the underlying provisioning actually failed. + ctrl.rescheduleProvisioning(claim) + strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) @@ -1521,6 +1535,29 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis } } +// rescheduleProvisioning signal back to the scheduler to retry dynamic provisioning +// by removing the annSelectedNode annotation +func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.PersistentVolumeClaim) { + if _, ok := claim.Annotations[annSelectedNode]; !ok { + // Provisioning not triggered by the scheduler, skip + return + } + + // The claim from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + newClaim := claim.DeepCopy() + delete(newClaim.Annotations, annSelectedNode) + // Try to update the PVC object + if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil { + glog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) + return + } + if _, err := ctrl.storeClaimUpdate(newClaim); err != nil { + // We will get an "claim updated" event soon, this is not a big error + glog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err) + } +} + // getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume. // The name must be unique. func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim *v1.PersistentVolumeClaim) string { diff --git a/pkg/controller/volume/persistentvolume/pv_controller_test.go b/pkg/controller/volume/persistentvolume/pv_controller_test.go index 5454fe26ece..48b7c1d983b 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_test.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_test.go @@ -312,8 +312,8 @@ func TestDelayBinding(t *testing.T) { } } - // When feature gate is disabled, should always be delayed - name := "feature-disabled" + // When volumeScheduling feature gate is disabled, should always be delayed + name := "volumeScheduling-feature-disabled" shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode)) if err != nil { t.Errorf("Test %q returned error: %v", name, err) @@ -322,7 +322,7 @@ func TestDelayBinding(t *testing.T) { t.Errorf("Test %q returned true, expected false", name) } - // Enable feature gate + // Enable volumeScheduling feature gate utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true") defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false") @@ -338,4 +338,43 @@ func TestDelayBinding(t *testing.T) { t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay) } } + + // When dynamicProvisioningScheduling feature gate is disabled, should be delayed, + // even if the pvc has selectedNode annotation. + provisionedClaim := makePVCClass(&classWaitMode) + provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"} + name = "dynamicProvisioningScheduling-feature-disabled" + shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim) + if err != nil { + t.Errorf("Test %q returned error: %v", name, err) + } + if !shouldDelay { + t.Errorf("Test %q returned false, expected true", name) + } + + // Enable DynamicProvisioningScheduling feature gate + utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true") + defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false") + + // When the pvc does not have selectedNode annotation, should be delayed, + // even if dynamicProvisioningScheduling feature gate is enabled. + name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set" + shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode)) + if err != nil { + t.Errorf("Test %q returned error: %v", name, err) + } + if !shouldDelay { + t.Errorf("Test %q returned false, expected true", name) + } + + // Should not be delayed when dynamicProvisioningScheduling feature gate is enabled, + // and the pvc has selectedNode annotation. + name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set" + shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim) + if err != nil { + t.Errorf("Test %q returned error: %v", name, err) + } + if shouldDelay { + t.Errorf("Test %q returned true, expected false", name) + } } From b01699b9f748a0f1ce565e0415bf0964808d2de5 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Wed, 23 May 2018 16:53:13 +0200 Subject: [PATCH 165/307] apiextensions: unify multi- and mono-versioned test helpers --- .../test/integration/basic_test.go | 255 ++++++++++-------- .../test/integration/registration_test.go | 32 +-- .../test/integration/subresources_test.go | 2 +- .../test/integration/testserver/resources.go | 28 +- .../test/integration/validation_test.go | 9 +- .../test/integration/versioning_test.go | 171 +----------- 6 files changed, 162 insertions(+), 335 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go index 39e8e6080fb..063e4d8882e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go @@ -81,124 +81,163 @@ func TestClusterScopedCRUD(t *testing.T) { } func testSimpleCRUD(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition, dynamicClient dynamic.Interface) { - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) - initialList, err := noxuResourceClient.List(metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if e, a := 0, len(initialList.Items); e != a { - t.Errorf("expected %v, got %v", e, a) - } - initialListTypeMeta, err := meta.TypeAccessor(initialList) - if err != nil { - t.Fatal(err) - } - if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, initialListTypeMeta.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := noxuDefinition.Spec.Names.ListKind, initialListTypeMeta.GetKind(); e != a { - t.Errorf("expected %v, got %v", e, a) + noxuResourceClients := map[string]dynamic.ResourceInterface{} + noxuWatchs := map[string]watch.Interface{} + disabledVersions := map[string]bool{} + for _, v := range noxuDefinition.Spec.Versions { + disabledVersions[v.Name] = !v.Served } + for _, v := range noxuDefinition.Spec.Versions { + noxuResourceClients[v.Name] = NewNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name) - initialListListMeta, err := meta.ListAccessor(initialList) - if err != nil { - t.Fatal(err) - } - noxuWatch, err := noxuResourceClient.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) - if err != nil { - t.Fatal(err) - } - defer noxuWatch.Stop() - - createdNoxuInstance, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns, "foo"), noxuResourceClient, noxuDefinition) - if err != nil { - t.Fatalf("unable to create noxu Instance:%v", err) - } - - select { - case watchEvent := <-noxuWatch.ResultChan(): - if e, a := watch.Added, watchEvent.Type; e != a { - t.Errorf("expected %v, got %v", e, a) - break + noxuWatch, err := noxuResourceClients[v.Name].Watch(metav1.ListOptions{}) + if disabledVersions[v.Name] { + if err == nil { + t.Errorf("expected the watch creation fail for disabled version %s", v.Name) + } + } else { + if err != nil { + t.Fatal(err) + } + noxuWatchs[v.Name] = noxuWatch } - createdObjectMeta, err := meta.Accessor(watchEvent.Object) + } + defer func() { + for _, w := range noxuWatchs { + w.Stop() + } + }() + + for version, noxuResourceClient := range noxuResourceClients { + createdNoxuInstance, err := instantiateVersionedCustomResource(t, testserver.NewVersionedNoxuInstance(ns, "foo", version), noxuResourceClient, noxuDefinition, version) + if disabledVersions[version] { + if err == nil { + t.Errorf("expected the CR creation fail for disabled version %s", version) + } + continue + } + if err != nil { + t.Fatalf("unable to create noxu Instance:%v", err) + } + if e, a := noxuDefinition.Spec.Group+"/"+version, createdNoxuInstance.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + for watchVersion, noxuWatch := range noxuWatchs { + select { + case watchEvent := <-noxuWatch.ResultChan(): + if e, a := watch.Added, watchEvent.Type; e != a { + t.Errorf("expected %v, got %v", e, a) + break + } + createdObjectMeta, err := meta.Accessor(watchEvent.Object) + if err != nil { + t.Fatal(err) + } + // it should have a UUID + if len(createdObjectMeta.GetUID()) == 0 { + t.Errorf("missing uuid: %#v", watchEvent.Object) + } + if e, a := ns, createdObjectMeta.GetNamespace(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + createdTypeMeta, err := meta.TypeAccessor(watchEvent.Object) + if err != nil { + t.Fatal(err) + } + if e, a := noxuDefinition.Spec.Group+"/"+watchVersion, createdTypeMeta.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + case <-time.After(5 * time.Second): + t.Errorf("missing watch event") + } + } + + // Check get for all versions + for version2, noxuResourceClient2 := range noxuResourceClients { + // Get test + gottenNoxuInstance, err := noxuResourceClient2.Get("foo", metav1.GetOptions{}) + + if disabledVersions[version2] { + if err == nil { + t.Errorf("expected the get operation fail for disabled version %s", version2) + } + } else { + if err != nil { + t.Fatal(err) + } + + if e, a := version2, gottenNoxuInstance.GroupVersionKind().Version; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + } + + // List test + listWithItem, err := noxuResourceClient2.List(metav1.ListOptions{}) + if disabledVersions[version2] { + if err == nil { + t.Errorf("expected the list operation fail for disabled version %s", version2) + } + } else { + if err != nil { + t.Fatal(err) + } + if e, a := 1, len(listWithItem.Items); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := version2, listWithItem.GroupVersionKind().Version; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := version2, listWithItem.Items[0].GroupVersionKind().Version; !reflect.DeepEqual(e, a) { + t.Errorf("expected %v, got %v", e, a) + } + } + } + + // Delete test + if err := noxuResourceClient.Delete("foo", metav1.NewDeleteOptions(0)); err != nil { + t.Fatal(err) + } + + listWithoutItem, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) } - // it should have a UUID - if len(createdObjectMeta.GetUID()) == 0 { - t.Errorf("missing uuid: %#v", watchEvent.Object) - } - if e, a := ns, createdObjectMeta.GetNamespace(); e != a { + if e, a := 0, len(listWithoutItem.Items); e != a { t.Errorf("expected %v, got %v", e, a) } - createdTypeMeta, err := meta.TypeAccessor(watchEvent.Object) - if err != nil { + + for _, noxuWatch := range noxuWatchs { + select { + case watchEvent := <-noxuWatch.ResultChan(): + if e, a := watch.Deleted, watchEvent.Type; e != a { + t.Errorf("expected %v, got %v", e, a) + break + } + deletedObjectMeta, err := meta.Accessor(watchEvent.Object) + if err != nil { + t.Fatal(err) + } + // it should have a UUID + createdObjectMeta, err := meta.Accessor(createdNoxuInstance) + if err != nil { + t.Fatal(err) + } + if e, a := createdObjectMeta.GetUID(), deletedObjectMeta.GetUID(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + + case <-time.After(5 * time.Second): + t.Errorf("missing watch event") + } + } + + // Delete test + if err := noxuResourceClient.DeleteCollection(metav1.NewDeleteOptions(0), metav1.ListOptions{}); err != nil { t.Fatal(err) } - if e, a := noxuDefinition.Spec.Group+"/"+noxuDefinition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - - case <-time.After(5 * time.Second): - t.Errorf("missing watch event") - } - - gottenNoxuInstance, err := noxuResourceClient.Get("foo", metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if e, a := createdNoxuInstance, gottenNoxuInstance; !reflect.DeepEqual(e, a) { - t.Errorf("expected %v, got %v", e, a) - } - - listWithItem, err := noxuResourceClient.List(metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if e, a := 1, len(listWithItem.Items); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := *createdNoxuInstance, listWithItem.Items[0]; !reflect.DeepEqual(e, a) { - t.Errorf("expected %v, got %v", e, a) - } - - if err := noxuResourceClient.Delete("foo", nil); err != nil { - t.Fatal(err) - } - - listWithoutItem, err := noxuResourceClient.List(metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if e, a := 0, len(listWithoutItem.Items); e != a { - t.Errorf("expected %v, got %v", e, a) - } - - select { - case watchEvent := <-noxuWatch.ResultChan(): - if e, a := watch.Deleted, watchEvent.Type; e != a { - t.Errorf("expected %v, got %v", e, a) - break - } - deletedObjectMeta, err := meta.Accessor(watchEvent.Object) - if err != nil { - t.Fatal(err) - } - // it should have a UUID - createdObjectMeta, err := meta.Accessor(createdNoxuInstance) - if err != nil { - t.Fatal(err) - } - if e, a := createdObjectMeta.GetUID(), deletedObjectMeta.GetUID(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - - case <-time.After(5 * time.Second): - t.Errorf("missing watch event") } } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go index 302559d59b5..736c21061f0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go @@ -42,30 +42,7 @@ import ( ) func instantiateCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) { - createdInstance, err := client.Create(instanceToCreate) - if err != nil { - t.Logf("%#v", createdInstance) - return nil, err - } - createdObjectMeta, err := meta.Accessor(createdInstance) - if err != nil { - t.Fatal(err) - } - // it should have a UUID - if len(createdObjectMeta.GetUID()) == 0 { - t.Errorf("missing uuid: %#v", createdInstance) - } - createdTypeMeta, err := meta.TypeAccessor(createdInstance) - if err != nil { - t.Fatal(err) - } - if e, a := definition.Spec.Group+"/"+definition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - return createdInstance, nil + return instantiateVersionedCustomResource(t, instanceToCreate, client, definition, definition.Spec.Versions[0].Name) } func instantiateVersionedCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition, version string) (*unstructured.Unstructured, error) { @@ -105,12 +82,7 @@ func NewNamespacedCustomResourceVersionedClient(ns string, client dynamic.Interf } func NewNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface { - gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural} - - if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { - return client.Resource(gvr).Namespace(ns) - } - return client.Resource(gvr) + return NewNamespacedCustomResourceVersionedClient(ns, client, crd, crd.Spec.Versions[0].Name) } func TestMultipleResourceInstances(t *testing.T) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go index 770ead2872e..c861eace5c9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go @@ -342,7 +342,7 @@ func TestValidationSchema(t *testing.T) { noxuDefinition.Spec.Subresources = &apiextensionsv1beta1.CustomResourceSubresources{ Status: &apiextensionsv1beta1.CustomResourceSubresourceStatus{}, } - noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf(`unexpected non-error, expected: must only have "properties" or "required" at the root if the status subresource is enabled`) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go index b84d623dd81..b6d9da11ee6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go @@ -80,10 +80,10 @@ func NewNoxuCustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) * } } -func NewNoxuInstance(namespace, name string) *unstructured.Unstructured { +func NewVersionedNoxuInstance(namespace, name, version string) *unstructured.Unstructured { return &unstructured.Unstructured{ Object: map[string]interface{}{ - "apiVersion": "mygroup.example.com/v1beta1", + "apiVersion": "mygroup.example.com/" + version, "kind": "WishIHadChosenNoxu", "metadata": map[string]interface{}{ "namespace": namespace, @@ -100,6 +100,10 @@ func NewNoxuInstance(namespace, name string) *unstructured.Unstructured { } } +func NewNoxuInstance(namespace, name string) *unstructured.Unstructured { + return NewVersionedNoxuInstance(namespace, name, "v1beta1") +} + func NewMultipleVersionNoxuCRD(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { return &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "noxus.mygroup.example.com"}, @@ -136,26 +140,6 @@ func NewMultipleVersionNoxuCRD(scope apiextensionsv1beta1.ResourceScope) *apiext } } -func NewVersionedNoxuInstance(namespace, name, version string) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "mygroup.example.com/" + version, - "kind": "WishIHadChosenNoxu", - "metadata": map[string]interface{}{ - "namespace": namespace, - "name": name, - }, - "content": map[string]interface{}{ - "key": "value", - }, - "num": map[string]interface{}{ - "num1": noxuInstanceNum, - "num2": 1000000, - }, - }, - } -} - func NewNoxu2CustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { return &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{Name: "noxus2.mygroup.example.com"}, diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index ac7b608ad23..0855def133a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -54,8 +54,7 @@ func TestForProperValidationErrors(t *testing.T) { { name: "bad version", instanceFn: func() *unstructured.Unstructured { - instance := testserver.NewNoxuInstance(ns, "foo") - instance.Object["apiVersion"] = "mygroup.example.com/v2" + instance := testserver.NewVersionedNoxuInstance(ns, "foo", "v2") return instance }, expectedError: "the API version in the data (mygroup.example.com/v2) does not match the expected API version (mygroup.example.com/v1beta1)", @@ -384,7 +383,7 @@ func TestForbiddenFieldsInSchema(t *testing.T) { noxuDefinition := newNoxuValidationCRD(apiextensionsv1beta1.NamespaceScoped) noxuDefinition.Spec.Validation.OpenAPIV3Schema.AdditionalProperties.Allows = false - noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf("unexpected non-error: additionalProperties cannot be set to false") } @@ -395,7 +394,7 @@ func TestForbiddenFieldsInSchema(t *testing.T) { } noxuDefinition.Spec.Validation.OpenAPIV3Schema.AdditionalProperties.Allows = true - noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatalf("unexpected non-error: uniqueItems cannot be set to true") } @@ -406,7 +405,7 @@ func TestForbiddenFieldsInSchema(t *testing.T) { UniqueItems: false, } - noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) + _, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err == nil { t.Fatal("unexpected non-error: $ref cannot be non-empty string") } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go index 62d59f5dfd1..b9983394a84 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -19,14 +19,9 @@ package integration import ( "reflect" "testing" - "time" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/test/integration/testserver" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" ) func TestVersionedNamspacedScopedCRD(t *testing.T) { @@ -43,7 +38,7 @@ func TestVersionedNamspacedScopedCRD(t *testing.T) { } ns := "not-the-default" - testSimpleVersionedCRUD(t, ns, noxuDefinition, dynamicClient) + testSimpleCRUD(t, ns, noxuDefinition, dynamicClient) } func TestVersionedClusterScopedCRD(t *testing.T) { @@ -60,7 +55,7 @@ func TestVersionedClusterScopedCRD(t *testing.T) { } ns := "" - testSimpleVersionedCRUD(t, ns, noxuDefinition, dynamicClient) + testSimpleCRUD(t, ns, noxuDefinition, dynamicClient) } func TestStoragedVersionInNamespacedCRDStatus(t *testing.T) { @@ -140,165 +135,3 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api t.Fatal(err) } } - -func testSimpleVersionedCRUD(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition, dynamicClient dynamic.Interface) { - noxuResourceClients := map[string]dynamic.ResourceInterface{} - noxuWatchs := map[string]watch.Interface{} - disbaledVersions := map[string]bool{} - for _, v := range noxuDefinition.Spec.Versions { - disbaledVersions[v.Name] = !v.Served - } - for _, v := range noxuDefinition.Spec.Versions { - noxuResourceClients[v.Name] = NewNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name) - - noxuWatch, err := noxuResourceClients[v.Name].Watch(metav1.ListOptions{}) - if disbaledVersions[v.Name] { - if err == nil { - t.Errorf("expected the watch creation fail for disabled version %s", v.Name) - } - } else { - if err != nil { - t.Fatal(err) - } - noxuWatchs[v.Name] = noxuWatch - } - } - defer func() { - for _, w := range noxuWatchs { - w.Stop() - } - }() - - for version, noxuResourceClient := range noxuResourceClients { - createdNoxuInstance, err := instantiateVersionedCustomResource(t, testserver.NewVersionedNoxuInstance(ns, "foo", version), noxuResourceClient, noxuDefinition, version) - if disbaledVersions[version] { - if err == nil { - t.Errorf("expected the CR creation fail for disabled version %s", version) - } - continue - } - if err != nil { - t.Fatalf("unable to create noxu Instance:%v", err) - } - if e, a := noxuDefinition.Spec.Group+"/"+version, createdNoxuInstance.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - for watchVersion, noxuWatch := range noxuWatchs { - select { - case watchEvent := <-noxuWatch.ResultChan(): - if e, a := watch.Added, watchEvent.Type; e != a { - t.Errorf("expected %v, got %v", e, a) - break - } - createdObjectMeta, err := meta.Accessor(watchEvent.Object) - if err != nil { - t.Fatal(err) - } - // it should have a UUID - if len(createdObjectMeta.GetUID()) == 0 { - t.Errorf("missing uuid: %#v", watchEvent.Object) - } - if e, a := ns, createdObjectMeta.GetNamespace(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - createdTypeMeta, err := meta.TypeAccessor(watchEvent.Object) - if err != nil { - t.Fatal(err) - } - if e, a := noxuDefinition.Spec.Group+"/"+watchVersion, createdTypeMeta.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := noxuDefinition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - case <-time.After(5 * time.Second): - t.Errorf("missing watch event") - } - } - - // Check get for all versions - for version2, noxuResourceClient2 := range noxuResourceClients { - // Get test - gottenNoxuInstance, err := noxuResourceClient2.Get("foo", metav1.GetOptions{}) - - if disbaledVersions[version2] { - if err == nil { - t.Errorf("expected the get operation fail for disabled version %s", version2) - } - } else { - if err != nil { - t.Fatal(err) - } - - if e, a := version2, gottenNoxuInstance.GroupVersionKind().Version; !reflect.DeepEqual(e, a) { - t.Errorf("expected %v, got %v", e, a) - } - } - - // List test - listWithItem, err := noxuResourceClient2.List(metav1.ListOptions{}) - if disbaledVersions[version2] { - if err == nil { - t.Errorf("expected the list operation fail for disabled version %s", version2) - } - } else { - if err != nil { - t.Fatal(err) - } - if e, a := 1, len(listWithItem.Items); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := version2, listWithItem.GroupVersionKind().Version; !reflect.DeepEqual(e, a) { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := version2, listWithItem.Items[0].GroupVersionKind().Version; !reflect.DeepEqual(e, a) { - t.Errorf("expected %v, got %v", e, a) - } - } - } - - // Delete test - if err := noxuResourceClient.Delete("foo", metav1.NewDeleteOptions(0)); err != nil { - t.Fatal(err) - } - - listWithoutItem, err := noxuResourceClient.List(metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if e, a := 0, len(listWithoutItem.Items); e != a { - t.Errorf("expected %v, got %v", e, a) - } - - for _, noxuWatch := range noxuWatchs { - select { - case watchEvent := <-noxuWatch.ResultChan(): - if e, a := watch.Deleted, watchEvent.Type; e != a { - t.Errorf("expected %v, got %v", e, a) - break - } - deletedObjectMeta, err := meta.Accessor(watchEvent.Object) - if err != nil { - t.Fatal(err) - } - // it should have a UUID - createdObjectMeta, err := meta.Accessor(createdNoxuInstance) - if err != nil { - t.Fatal(err) - } - if e, a := createdObjectMeta.GetUID(), deletedObjectMeta.GetUID(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - - case <-time.After(5 * time.Second): - t.Errorf("missing watch event") - } - } - - // Delete test - if err := noxuResourceClient.DeleteCollection(metav1.NewDeleteOptions(0), metav1.ListOptions{}); err != nil { - t.Fatal(err) - } - - } -} From d6d2668f28a2edc3fed1b01a985093bc004b43b1 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Thu, 24 May 2018 11:47:00 +0200 Subject: [PATCH 166/307] apiextensions: cleanup test/integration helpers --- .../test/integration/BUILD | 18 ++++ .../test/integration/basic_test.go | 24 ++--- .../test/integration/finalization_test.go | 6 +- .../test/integration/helpers.go | 94 +++++++++++++++++++ .../test/integration/registration_test.go | 63 ++----------- .../test/integration/subresources_test.go | 10 +- .../test/integration/testserver/resources.go | 23 ----- .../test/integration/validation_test.go | 12 +-- .../test/integration/versioning_test.go | 5 +- 9 files changed, 150 insertions(+), 105 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index 5a37caa82ec..6c5adbefbb4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -2,6 +2,7 @@ package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", + "go_library", "go_test", ) @@ -16,6 +17,7 @@ go_test( "versioning_test.go", "yaml_test.go", ], + embed = [":go_default_library"], tags = ["integration"], deps = [ "//vendor/github.com/coreos/etcd/clientv3:go_default_library", @@ -54,3 +56,19 @@ filegroup( ], tags = ["automanaged"], ) + +go_library( + name = "go_default_library", + srcs = ["helpers.go"], + importpath = "k8s.io/apiextensions-apiserver/test/integration", + deps = [ + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/client-go/dynamic:go_default_library", + ], +) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go index 063e4d8882e..73585b42c42 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/basic_test.go @@ -88,7 +88,7 @@ func testSimpleCRUD(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta disabledVersions[v.Name] = !v.Served } for _, v := range noxuDefinition.Spec.Versions { - noxuResourceClients[v.Name] = NewNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name) + noxuResourceClients[v.Name] = newNamespacedCustomResourceVersionedClient(ns, dynamicClient, noxuDefinition, v.Name) noxuWatch, err := noxuResourceClients[v.Name].Watch(metav1.ListOptions{}) if disabledVersions[v.Name] { @@ -242,7 +242,7 @@ func testSimpleCRUD(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta } func testFieldSelector(t *testing.T, ns string, noxuDefinition *apiextensionsv1beta1.CustomResourceDefinition, dynamicClient dynamic.Interface) { - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) initialList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -449,7 +449,7 @@ func TestNoNamespaceReject(t *testing.T) { } ns := "" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) initialList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -509,7 +509,7 @@ func TestSelfLink(t *testing.T) { } ns := "not-the-default" - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) noxuInstanceToCreate := testserver.NewNoxuInstance(ns, "foo") createdNoxuInstance, err := noxuNamespacedResourceClient.Create(noxuInstanceToCreate) @@ -528,7 +528,7 @@ func TestSelfLink(t *testing.T) { t.Fatal(err) } - curletResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, curletDefinition) + curletResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, curletDefinition) curletInstanceToCreate := testserver.NewCurletInstance(ns, "foo") createdCurletInstance, err := curletResourceClient.Create(curletInstanceToCreate) @@ -555,7 +555,7 @@ func TestPreserveInt(t *testing.T) { } ns := "not-the-default" - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) noxuInstanceToCreate := testserver.NewNoxuInstance(ns, "foo") createdNoxuInstance, err := noxuNamespacedResourceClient.Create(noxuInstanceToCreate) @@ -597,7 +597,7 @@ func TestPatch(t *testing.T) { } ns := "not-the-default" - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) noxuInstanceToCreate := testserver.NewNoxuInstance(ns, "foo") createdNoxuInstance, err := noxuNamespacedResourceClient.Create(noxuInstanceToCreate) @@ -657,7 +657,7 @@ func TestCrossNamespaceListWatch(t *testing.T) { } ns := "" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) initialList, err := noxuResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -679,13 +679,13 @@ func TestCrossNamespaceListWatch(t *testing.T) { instances := make(map[string]*unstructured.Unstructured) ns1 := "namespace-1" - noxuNamespacedResourceClient1 := NewNamespacedCustomResourceClient(ns1, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient1 := newNamespacedCustomResourceClient(ns1, dynamicClient, noxuDefinition) instances[ns1] = createInstanceWithNamespaceHelper(t, ns1, "foo1", noxuNamespacedResourceClient1, noxuDefinition) noxuNamespacesWatch1, err := noxuNamespacedResourceClient1.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) defer noxuNamespacesWatch1.Stop() ns2 := "namespace-2" - noxuNamespacedResourceClient2 := NewNamespacedCustomResourceClient(ns2, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient2 := newNamespacedCustomResourceClient(ns2, dynamicClient, noxuDefinition) instances[ns2] = createInstanceWithNamespaceHelper(t, ns2, "foo2", noxuNamespacedResourceClient2, noxuDefinition) noxuNamespacesWatch2, err := noxuNamespacedResourceClient2.Watch(metav1.ListOptions{ResourceVersion: initialListListMeta.GetResourceVersion()}) defer noxuNamespacesWatch2.Stop() @@ -800,7 +800,7 @@ func TestNameConflict(t *testing.T) { // A NameConflict occurs err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := testserver.GetCustomResourceDefinition(noxu2Definition, apiExtensionClient) + crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxu2Definition.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -823,7 +823,7 @@ func TestNameConflict(t *testing.T) { // Names are now accepted err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - crd, err := testserver.GetCustomResourceDefinition(noxu2Definition, apiExtensionClient) + crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxu2Definition.Name, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go index ae456713abd..4dcf8d727bb 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/finalization_test.go @@ -41,7 +41,7 @@ func TestFinalization(t *testing.T) { ns := "not-the-default" name := "foo123" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) instance := testserver.NewNoxuInstance(ns, name) instance.SetFinalizers([]string{"noxu.example.com/finalizer"}) @@ -106,7 +106,7 @@ func TestFinalizationAndDeletion(t *testing.T) { // Create a CR with a finalizer. ns := "not-the-default" name := "foo123" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) instance := testserver.NewNoxuInstance(ns, name) instance.SetFinalizers([]string{"noxu.example.com/finalizer"}) @@ -156,7 +156,7 @@ func TestFinalizationAndDeletion(t *testing.T) { } err = wait.Poll(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { - _, err = testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient) + _, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) return errors.IsNotFound(err), err }) if !errors.IsNotFound(err) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go new file mode 100644 index 00000000000..5aa231d4551 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/helpers.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "testing" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +func instantiateCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) { + return instantiateVersionedCustomResource(t, instanceToCreate, client, definition, definition.Spec.Versions[0].Name) +} + +func instantiateVersionedCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition, version string) (*unstructured.Unstructured, error) { + createdInstance, err := client.Create(instanceToCreate) + if err != nil { + t.Logf("%#v", createdInstance) + return nil, err + } + createdObjectMeta, err := meta.Accessor(createdInstance) + if err != nil { + t.Fatal(err) + } + // it should have a UUID + if len(createdObjectMeta.GetUID()) == 0 { + t.Errorf("missing uuid: %#v", createdInstance) + } + createdTypeMeta, err := meta.TypeAccessor(createdInstance) + if err != nil { + t.Fatal(err) + } + if e, a := definition.Spec.Group+"/"+version, createdTypeMeta.GetAPIVersion(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { + t.Errorf("expected %v, got %v", e, a) + } + return createdInstance, nil +} + +func newNamespacedCustomResourceVersionedClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition, version string) dynamic.ResourceInterface { + gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version, Resource: crd.Spec.Names.Plural} + + if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { + return client.Resource(gvr).Namespace(ns) + } + return client.Resource(gvr) +} + +func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface { + return newNamespacedCustomResourceVersionedClient(ns, client, crd, crd.Spec.Versions[0].Name) +} + +// updateCustomResourceDefinitionWithRetry updates a CRD, retrying up to 5 times on version conflict errors. +func updateCustomResourceDefinitionWithRetry(client clientset.Interface, name string, update func(*apiextensionsv1beta1.CustomResourceDefinition)) (*apiextensionsv1beta1.CustomResourceDefinition, error) { + for i := 0; i < 5; i++ { + crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) + } + update(crd) + crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) + if err == nil { + return crd, nil + } + if !errors.IsConflict(err) { + return nil, fmt.Errorf("failed to update CustomResourceDefinition %q: %v", name, err) + } + } + return nil, fmt.Errorf("too many retries after conflicts updating CustomResourceDefinition %q", name) +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go index 736c21061f0..9cb33ac70e3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/registration_test.go @@ -36,55 +36,10 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" ) -func instantiateCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) { - return instantiateVersionedCustomResource(t, instanceToCreate, client, definition, definition.Spec.Versions[0].Name) -} - -func instantiateVersionedCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition, version string) (*unstructured.Unstructured, error) { - createdInstance, err := client.Create(instanceToCreate) - if err != nil { - t.Logf("%#v", createdInstance) - return nil, err - } - createdObjectMeta, err := meta.Accessor(createdInstance) - if err != nil { - t.Fatal(err) - } - // it should have a UUID - if len(createdObjectMeta.GetUID()) == 0 { - t.Errorf("missing uuid: %#v", createdInstance) - } - createdTypeMeta, err := meta.TypeAccessor(createdInstance) - if err != nil { - t.Fatal(err) - } - if e, a := definition.Spec.Group+"/"+version, createdTypeMeta.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - return createdInstance, nil -} - -func NewNamespacedCustomResourceVersionedClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition, version string) dynamic.ResourceInterface { - gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version, Resource: crd.Spec.Names.Plural} - - if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { - return client.Resource(gvr).Namespace(ns) - } - return client.Resource(gvr) -} - -func NewNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface { - return NewNamespacedCustomResourceVersionedClient(ns, client, crd, crd.Spec.Versions[0].Name) -} - func TestMultipleResourceInstances(t *testing.T) { stopCh, apiExtensionClient, dynamicClient, err := testserver.StartDefaultServerWithClients() if err != nil { @@ -98,7 +53,7 @@ func TestMultipleResourceInstances(t *testing.T) { if err != nil { t.Fatal(err) } - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) noxuList, err := noxuNamespacedResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -223,7 +178,7 @@ func TestMultipleRegistration(t *testing.T) { if err != nil { t.Fatal(err) } - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) createdNoxuInstance, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns, sameInstanceName), noxuNamespacedResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu Instance:%v", err) @@ -242,7 +197,7 @@ func TestMultipleRegistration(t *testing.T) { if err != nil { t.Fatal(err) } - curletNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, curletDefinition) + curletNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, curletDefinition) createdCurletInstance, err := instantiateCustomResource(t, testserver.NewCurletInstance(ns, sameInstanceName), curletNamespacedResourceClient, curletDefinition) if err != nil { t.Fatalf("unable to create noxu Instance:%v", err) @@ -279,14 +234,14 @@ func TestDeRegistrationAndReRegistration(t *testing.T) { if err != nil { t.Fatal(err) } - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) if _, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns, sameInstanceName), noxuNamespacedResourceClient, noxuDefinition); err != nil { t.Fatal(err) } if err := testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient); err != nil { t.Fatal(err) } - if _, err := testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient); err == nil || !errors.IsNotFound(err) { + if _, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}); err == nil || !errors.IsNotFound(err) { t.Fatalf("expected a NotFound error, got:%v", err) } if _, err = noxuNamespacedResourceClient.List(metav1.ListOptions{}); err == nil || !errors.IsNotFound(err) { @@ -298,14 +253,14 @@ func TestDeRegistrationAndReRegistration(t *testing.T) { }() func() { - if _, err := testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient); err == nil || !errors.IsNotFound(err) { + if _, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}); err == nil || !errors.IsNotFound(err) { t.Fatalf("expected a NotFound error, got:%v", err) } noxuDefinition, err := testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient) if err != nil { t.Fatal(err) } - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) initialList, err := noxuNamespacedResourceClient.List(metav1.ListOptions{}) if err != nil { t.Fatal(err) @@ -382,7 +337,7 @@ func TestEtcdStorage(t *testing.T) { if err != nil { t.Fatal(err) } - curletNamespacedResourceClient := NewNamespacedCustomResourceClient(ns1, dynamicClient, curletDefinition) + curletNamespacedResourceClient := newNamespacedCustomResourceClient(ns1, dynamicClient, curletDefinition) if _, err := instantiateCustomResource(t, testserver.NewCurletInstance(ns1, "bar"), curletNamespacedResourceClient, curletDefinition); err != nil { t.Fatalf("unable to create curlet cluster scoped Instance:%v", err) } @@ -393,7 +348,7 @@ func TestEtcdStorage(t *testing.T) { if err != nil { t.Fatal(err) } - noxuNamespacedResourceClient := NewNamespacedCustomResourceClient(ns2, dynamicClient, noxuDefinition) + noxuNamespacedResourceClient := newNamespacedCustomResourceClient(ns2, dynamicClient, noxuDefinition) if _, err := instantiateCustomResource(t, testserver.NewNoxuInstance(ns2, "foo"), noxuNamespacedResourceClient, noxuDefinition); err != nil { t.Fatalf("unable to create noxu namespace scoped Instance:%v", err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go index c861eace5c9..da40416a338 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/subresources_test.go @@ -98,7 +98,7 @@ func TestStatusSubresource(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) _, err = instantiateCustomResource(t, NewNoxuSubresourceInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu instance: %v", err) @@ -227,7 +227,7 @@ func TestScaleSubresource(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) _, err = instantiateCustomResource(t, NewNoxuSubresourceInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu instance: %v", err) @@ -412,7 +412,7 @@ func TestValidateOnlyStatus(t *testing.T) { t.Fatal(err) } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) // set .spec.num = 10 and .status.num = 10 noxuInstance := NewNoxuSubresourceInstance(ns, "foo") @@ -552,7 +552,7 @@ func TestGeneration(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) _, err = instantiateCustomResource(t, NewNoxuSubresourceInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu instance: %v", err) @@ -631,7 +631,7 @@ func TestSubresourcePatch(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) _, err = instantiateCustomResource(t, NewNoxuSubresourceInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu instance: %v", err) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go index b6d9da11ee6..b5c24fe1a12 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver/resources.go @@ -315,25 +315,6 @@ func checkForWatchCachePrimed(crd *apiextensionsv1beta1.CustomResourceDefinition } } -// UpdateCustomResourceDefinition updates a CRD, retrying up to 5 times on version conflict errors. -func UpdateCustomResourceDefinition(client clientset.Interface, name string, update func(*apiextensionsv1beta1.CustomResourceDefinition)) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - for i := 0; i < 5; i++ { - crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) - } - update(crd) - crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) - if err == nil { - return crd, nil - } - if !errors.IsConflict(err) { - return nil, fmt.Errorf("failed to update CustomResourceDefinition %q: %v", name, err) - } - } - return nil, fmt.Errorf("too many retries after conflicts updating CustomResourceDefinition %q", name) -} - func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { if err := apiExtensionsClient.Apiextensions().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { return err @@ -357,10 +338,6 @@ func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefi return err } -func GetCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - return apiExtensionsClient.Apiextensions().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) -} - func CreateNewScaleClient(crd *apiextensionsv1beta1.CustomResourceDefinition, config *rest.Config) (scale.ScalesGetter, error) { discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) if err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go index 0855def133a..cb761330dd6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/validation_test.go @@ -44,7 +44,7 @@ func TestForProperValidationErrors(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) tests := []struct { name string @@ -181,7 +181,7 @@ func TestCustomResourceValidation(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) _, err = instantiateCustomResource(t, newNoxuValidationInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu instance: %v", err) @@ -202,7 +202,7 @@ func TestCustomResourceUpdateValidation(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) _, err = instantiateCustomResource(t, newNoxuValidationInstance(ns, "foo"), noxuResourceClient, noxuDefinition) if err != nil { t.Fatalf("unable to create noxu instance: %v", err) @@ -245,7 +245,7 @@ func TestCustomResourceValidationErrors(t *testing.T) { } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) tests := []struct { name string @@ -339,7 +339,7 @@ func TestCRValidationOnCRDUpdate(t *testing.T) { t.Fatal(err) } ns := "not-the-default" - noxuResourceClient := NewNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) + noxuResourceClient := newNamespacedCustomResourceClient(ns, dynamicClient, noxuDefinition) // CR is rejected _, err = instantiateCustomResource(t, newNoxuValidationInstance(ns, "foo"), noxuResourceClient, noxuDefinition) @@ -348,7 +348,7 @@ func TestCRValidationOnCRDUpdate(t *testing.T) { } // update the CRD to a less stricter schema - _, err = testserver.UpdateCustomResourceDefinition(apiExtensionClient, "noxus.mygroup.example.com", func(crd *apiextensionsv1beta1.CustomResourceDefinition) { + _, err = updateCustomResourceDefinitionWithRetry(apiExtensionClient, "noxus.mygroup.example.com", func(crd *apiextensionsv1beta1.CustomResourceDefinition) { crd.Spec.Validation.OpenAPIV3Schema.Required = []string{"alpha", "beta"} }) if err != nil { diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go index b9983394a84..55d0c17af22 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/versioning_test.go @@ -22,6 +22,7 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/test/integration/testserver" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestVersionedNamspacedScopedCRD(t *testing.T) { @@ -108,7 +109,7 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api } // The storage version list should be initilized to storage version - crd, err := testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient) + crd, err := apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -122,7 +123,7 @@ func testStoragedVersionInCRDStatus(t *testing.T, ns string, noxuDefinition *api if err != nil { t.Fatal(err) } - crd, err = testserver.GetCustomResourceDefinition(noxuDefinition, apiExtensionClient) + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(noxuDefinition.Name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } From 8d8b47596e6e71ae705ed2fd1e946e32a98a42f0 Mon Sep 17 00:00:00 2001 From: Sandeep Rajan Date: Thu, 24 May 2018 08:12:50 -0400 Subject: [PATCH 167/307] update manifest fix test --- cluster/addons/dns/coredns.yaml.base | 8 +++++- cluster/addons/dns/coredns.yaml.in | 8 +++++- cluster/addons/dns/coredns.yaml.sed | 8 +++++- .../app/phases/addons/dns/manifests.go | 6 ++++ cmd/kubeadm/app/phases/addons/dns/versions.go | 6 ++-- .../app/phases/upgrade/compute_test.go | 28 +++++++++---------- 6 files changed, 44 insertions(+), 20 deletions(-) diff --git a/cluster/addons/dns/coredns.yaml.base b/cluster/addons/dns/coredns.yaml.base index 012ad34dca0..9732029ae92 100644 --- a/cluster/addons/dns/coredns.yaml.base +++ b/cluster/addons/dns/coredns.yaml.base @@ -66,6 +66,7 @@ data: prometheus :9153 proxy . /etc/resolv.conf cache 30 + reload } --- apiVersion: extensions/v1beta1 @@ -103,7 +104,7 @@ spec: operator: "Exists" containers: - name: coredns - image: coredns/coredns:1.0.6 + image: coredns/coredns:1.1.3 imagePullPolicy: IfNotPresent resources: limits: @@ -122,6 +123,9 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP livenessProbe: httpGet: path: /health @@ -145,6 +149,8 @@ kind: Service metadata: name: kube-dns namespace: kube-system + annotations: + prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/cluster/addons/dns/coredns.yaml.in b/cluster/addons/dns/coredns.yaml.in index 5ccf70bd0cd..25d005aac99 100644 --- a/cluster/addons/dns/coredns.yaml.in +++ b/cluster/addons/dns/coredns.yaml.in @@ -66,6 +66,7 @@ data: prometheus :9153 proxy . /etc/resolv.conf cache 30 + reload } --- apiVersion: extensions/v1beta1 @@ -103,7 +104,7 @@ spec: operator: "Exists" containers: - name: coredns - image: coredns/coredns:1.0.6 + image: coredns/coredns:1.1.3 imagePullPolicy: IfNotPresent resources: limits: @@ -122,6 +123,9 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP livenessProbe: httpGet: path: /health @@ -145,6 +149,8 @@ kind: Service metadata: name: kube-dns namespace: kube-system + annotations: + prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/cluster/addons/dns/coredns.yaml.sed b/cluster/addons/dns/coredns.yaml.sed index d635755e770..42105f905d1 100644 --- a/cluster/addons/dns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns.yaml.sed @@ -66,6 +66,7 @@ data: prometheus :9153 proxy . /etc/resolv.conf cache 30 + reload } --- apiVersion: extensions/v1beta1 @@ -103,7 +104,7 @@ spec: operator: "Exists" containers: - name: coredns - image: coredns/coredns:1.0.6 + image: coredns/coredns:1.1.3 imagePullPolicy: IfNotPresent resources: limits: @@ -122,6 +123,9 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP livenessProbe: httpGet: path: /health @@ -145,6 +149,8 @@ kind: Service metadata: name: kube-dns namespace: kube-system + annotations: + prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index 4184555da68..2a765f634ec 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -196,6 +196,8 @@ metadata: kubernetes.io/name: "KubeDNS" name: kube-dns namespace: kube-system + annotations: + prometheus.io/scrape: "true" # Without this resourceVersion value, an update of the Service between versions will yield: # Service "kube-dns" is invalid: metadata.resourceVersion: Invalid value: "": must be specified for an update resourceVersion: "0" @@ -264,6 +266,9 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP livenessProbe: httpGet: path: /health @@ -303,6 +308,7 @@ data: prometheus :9153 proxy . {{ .UpstreamNameserver }} cache 30 + reload }{{ .StubDomain }} ` // CoreDNSClusterRole is the CoreDNS ClusterRole manifest diff --git a/cmd/kubeadm/app/phases/addons/dns/versions.go b/cmd/kubeadm/app/phases/addons/dns/versions.go index e72eb325fb3..a12d28dfd61 100644 --- a/cmd/kubeadm/app/phases/addons/dns/versions.go +++ b/cmd/kubeadm/app/phases/addons/dns/versions.go @@ -23,13 +23,13 @@ import ( const ( kubeDNSVersion = "1.14.10" - coreDNSVersion = "1.0.6" + coreDNSVersion = "1.1.3" ) // GetDNSVersion returns the right kube-dns version for a specific k8s version func GetDNSVersion(kubeVersion *version.Version, dns string) string { - // v1.9.0+ uses kube-dns 1.14.10 - // v1.9.0+ uses CoreDNS 1.0.6 if feature gate "CoreDNS" is enabled. + // v1.9.0+ uses kube-dns 1.14.10, if feature gate "CoreDNS" is disabled. + // v1.9.0+ uses CoreDNS 1.1.3. // In the future when the version is bumped at HEAD; add conditional logic to return the right versions // Also, the version might be bumped for different k8s releases on the same branch diff --git a/cmd/kubeadm/app/phases/upgrade/compute_test.go b/cmd/kubeadm/app/phases/upgrade/compute_test.go index 7050ed636af..ee17ee76a87 100644 --- a/cmd/kubeadm/app/phases/upgrade/compute_test.go +++ b/cmd/kubeadm/app/phases/upgrade/compute_test.go @@ -168,7 +168,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.10.3", KubeadmVersion: "v1.10.3", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.1.12", }, }, @@ -207,7 +207,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.10.3", KubeadmVersion: "v1.10.3", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.1.12", }, }, @@ -246,7 +246,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.0", KubeadmVersion: "v1.11.0", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -285,7 +285,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.10.5", KubeadmVersion: "v1.10.5", // Note: The kubeadm version mustn't be "downgraded" here DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.1.12", }, }, @@ -305,7 +305,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.1", KubeadmVersion: "v1.11.1", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -364,7 +364,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.0-alpha.2", KubeadmVersion: "v1.11.0-alpha.2", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -404,7 +404,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.0-alpha.2", KubeadmVersion: "v1.11.0-alpha.2", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -445,7 +445,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.0-beta.1", KubeadmVersion: "v1.11.0-beta.1", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -486,7 +486,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.0-rc.1", KubeadmVersion: "v1.11.0-rc.1", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -527,7 +527,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.6-rc.1", KubeadmVersion: "v1.11.6-rc.1", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -568,7 +568,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.11.0-rc.1", KubeadmVersion: "v1.11.0-rc.1", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -588,7 +588,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.12.0-alpha.2", KubeadmVersion: "v1.12.0-alpha.2", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -641,7 +641,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.12.1", KubeadmVersion: "v1.12.1", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, @@ -678,7 +678,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.12.0", KubeadmVersion: "v1.12.0", DNSType: "coredns", - DNSVersion: "1.0.6", + DNSVersion: "1.1.3", EtcdVersion: "3.2.18", }, }, From 1a1d7205b44a5cee6c542617ff27446ebfbee7ce Mon Sep 17 00:00:00 2001 From: wojtekt Date: Thu, 24 May 2018 14:05:59 +0200 Subject: [PATCH 168/307] Trigger function for secrets --- pkg/registry/core/secret/storage/storage.go | 2 +- pkg/registry/core/secret/strategy.go | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pkg/registry/core/secret/storage/storage.go b/pkg/registry/core/secret/storage/storage.go index 20c6413b605..d6c91ab48e7 100644 --- a/pkg/registry/core/secret/storage/storage.go +++ b/pkg/registry/core/secret/storage/storage.go @@ -46,7 +46,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)}, } - options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: secret.GetAttrs} + options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: secret.GetAttrs, TriggerFunc: secret.SecretNameTriggerFunc} if err := store.CompleteWithOptions(options); err != nil { panic(err) // TODO: Propagate error up } diff --git a/pkg/registry/core/secret/strategy.go b/pkg/registry/core/secret/strategy.go index c0725f33f68..d52fd42abc2 100644 --- a/pkg/registry/core/secret/strategy.go +++ b/pkg/registry/core/secret/strategy.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" - apistorage "k8s.io/apiserver/pkg/storage" + pkgstorage "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" @@ -107,14 +107,21 @@ func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, bool, error) { } // Matcher returns a generic matcher for a given label and field selector. -func Matcher(label labels.Selector, field fields.Selector) apistorage.SelectionPredicate { - return apistorage.SelectionPredicate{ - Label: label, - Field: field, - GetAttrs: GetAttrs, +func Matcher(label labels.Selector, field fields.Selector) pkgstorage.SelectionPredicate { + return pkgstorage.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: GetAttrs, + IndexFields: []string{"metadata.name"}, } } +func SecretNameTriggerFunc(obj runtime.Object) []pkgstorage.MatchValue { + secret := obj.(*api.Secret) + result := pkgstorage.MatchValue{IndexName: "metadata.name", Value: secret.ObjectMeta.Name} + return []pkgstorage.MatchValue{result} +} + // SelectableFields returns a field set that can be used for filter selection func SelectableFields(obj *api.Secret) fields.Set { objectMetaFieldsSet := generic.ObjectMetaFieldsSet(&obj.ObjectMeta, true) From cf29a302582e7746c561e7365031ee6b5e79872f Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Wed, 23 May 2018 17:43:04 -0400 Subject: [PATCH 169/307] move Describer from factory --- pkg/kubectl/cmd/describe.go | 5 +- pkg/kubectl/cmd/testing/fake.go | 7 +++ pkg/kubectl/cmd/util/factory.go | 3 -- .../cmd/util/factory_object_mapping.go | 44 ---------------- pkg/kubectl/cmd/util/helpers.go | 52 +++++++++++++++++++ 5 files changed, 63 insertions(+), 48 deletions(-) diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index 73dcd3dace3..848f82100f8 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -137,7 +137,10 @@ func (o *DescribeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.BuilderArgs = args - o.Describer = f.Describer + o.Describer = func(mapping *meta.RESTMapping) (printers.Describer, error) { + return cmdutil.DescriberFn(f, mapping) + } + o.NewBuilder = f.NewBuilder // include the uninitialized objects by default diff --git a/pkg/kubectl/cmd/testing/fake.go b/pkg/kubectl/cmd/testing/fake.go index ce678c31665..59a4d947b58 100644 --- a/pkg/kubectl/cmd/testing/fake.go +++ b/pkg/kubectl/cmd/testing/fake.go @@ -271,10 +271,17 @@ func NewTestFactory() *TestFactory { WithClientConfig(clientConfig). WithRESTMapper(testRESTMapper()) + restConfig, err := clientConfig.ClientConfig() + if err != nil { + panic(fmt.Sprintf("unable to create a fake restclient config: %v", err)) + } + return &TestFactory{ Factory: cmdutil.NewFactory(configFlags), FakeDynamicClient: fakedynamic.NewSimpleDynamicClient(legacyscheme.Scheme), tempConfigFile: tmpFile, + + ClientConfigVal: restConfig, } } diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 04e64f65ffa..10c50389346 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -35,7 +35,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/validation" - "k8s.io/kubernetes/pkg/printers" ) // Factory provides abstractions that allow the Kubectl command to be extended across multiple types @@ -102,8 +101,6 @@ type ObjectMappingFactory interface { ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) // Returns a RESTClient for working with Unstructured objects. UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) - // Returns a Describer for displaying the specified RESTMapping type or an error. - Describer(mapping *meta.RESTMapping) (printers.Describer, error) // Returns a schema that can validate objects stored on disk. Validator(validate bool) (validation.Schema, error) diff --git a/pkg/kubectl/cmd/util/factory_object_mapping.go b/pkg/kubectl/cmd/util/factory_object_mapping.go index aa7105a1bd0..a4769978215 100644 --- a/pkg/kubectl/cmd/util/factory_object_mapping.go +++ b/pkg/kubectl/cmd/util/factory_object_mapping.go @@ -19,19 +19,15 @@ limitations under the License. package util import ( - "fmt" "sync" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/client-go/dynamic" restclient "k8s.io/client-go/rest" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/validation" - "k8s.io/kubernetes/pkg/printers" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" ) type ring1Factory struct { @@ -91,46 +87,6 @@ func (f *ring1Factory) UnstructuredClientForMapping(mapping *meta.RESTMapping) ( return restclient.RESTClientFor(cfg) } -func (f *ring1Factory) Describer(mapping *meta.RESTMapping) (printers.Describer, error) { - clientConfig, err := f.clientAccessFactory.ToRESTConfig() - if err != nil { - return nil, err - } - // try to get a describer - if describer, ok := printersinternal.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { - return describer, nil - } - // if this is a kind we don't have a describer for yet, go generic if possible - if genericDescriber, genericErr := genericDescriber(f.clientAccessFactory, mapping); genericErr == nil { - return genericDescriber, nil - } - // otherwise return an unregistered error - return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) -} - -// helper function to make a generic describer, or return an error -func genericDescriber(clientAccessFactory ClientAccessFactory, mapping *meta.RESTMapping) (printers.Describer, error) { - clientConfig, err := clientAccessFactory.ToRESTConfig() - if err != nil { - return nil, err - } - - // used to fetch the resource - dynamicClient, err := dynamic.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - - // used to get events for the resource - clientSet, err := clientAccessFactory.ClientSet() - if err != nil { - return nil, err - } - eventsClient := clientSet.Core() - - return printersinternal.GenericDescriberFor(mapping, dynamicClient, eventsClient), nil -} - func (f *ring1Factory) Validator(validate bool) (validation.Schema, error) { if !validate { return validation.NullSchema{}, nil diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 1e5aa7dc6a1..b4640f67b25 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -37,9 +37,14 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/printers" + printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" utilexec "k8s.io/utils/exec" ) @@ -621,3 +626,50 @@ func ShouldIncludeUninitialized(cmd *cobra.Command, includeUninitialized bool) b } return shouldIncludeUninitialized } + +// DescriberFunc gives a way to display the specified RESTMapping type +type DescriberFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) + +// DescriberFn gives a way to easily override the function for unit testing if needed +var DescriberFn DescriberFunc = describer + +// Returns a Describer for displaying the specified RESTMapping type or an error. +func describer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + // try to get a describer + if describer, ok := printersinternal.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientConfig); ok { + return describer, nil + } + // if this is a kind we don't have a describer for yet, go generic if possible + if genericDescriber, genericErr := genericDescriber(restClientGetter, mapping); genericErr == nil { + return genericDescriber, nil + } + // otherwise return an unregistered error + return nil, fmt.Errorf("no description has been implemented for %s", mapping.GroupVersionKind.String()) +} + +// helper function to make a generic describer, or return an error +func genericDescriber(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) { + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + // used to fetch the resource + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + // used to get events for the resource + clientSet, err := internalclientset.NewForConfig(clientConfig) + if err != nil { + return nil, err + } + + eventsClient := clientSet.Core() + return printersinternal.GenericDescriberFor(mapping, dynamicClient, eventsClient), nil +} From 6d117383fc575bfb084c00a11b22ec97b279f490 Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 24 May 2018 10:50:49 -0400 Subject: [PATCH 170/307] fix describer tests --- pkg/kubectl/cmd/cmd_test.go | 14 ------- pkg/kubectl/cmd/describe_test.go | 70 +++++++++++++++++++++++++++----- pkg/kubectl/cmd/testing/BUILD | 1 - pkg/kubectl/cmd/testing/fake.go | 6 --- 4 files changed, 60 insertions(+), 31 deletions(-) diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index 742d8926841..8ae9c605401 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -39,7 +39,6 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/printers" ) // This init should be removed after switching this command and its tests to user external types. @@ -116,19 +115,6 @@ func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList) return pods, svc, rc } -type testDescriber struct { - Name, Namespace string - Settings printers.DescriberSettings - Output string - Err error -} - -func (t *testDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (output string, err error) { - t.Namespace, t.Name = namespace, name - t.Settings = describerSettings - return t.Output, t.Err -} - func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) } diff --git a/pkg/kubectl/cmd/describe_test.go b/pkg/kubectl/cmd/describe_test.go index 6ecc1eb909d..ecc0fc8c4ed 100644 --- a/pkg/kubectl/cmd/describe_test.go +++ b/pkg/kubectl/cmd/describe_test.go @@ -22,20 +22,29 @@ import ( "strings" "testing" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/rest/fake" "k8s.io/kubernetes/pkg/api/legacyscheme" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubernetes/pkg/printers" ) // Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. func TestDescribeUnknownSchemaObject(t *testing.T) { d := &testDescriber{Output: "test output"} + oldFn := cmdutil.DescriberFn + defer func() { + cmdutil.DescriberFn = oldFn + }() + cmdutil.DescriberFn = d.describerFor + tf := cmdtesting.NewTestFactory() defer tf.Cleanup() _, _, codec := cmdtesting.NewExternalScheme() - tf.DescriberVal = d + tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, cmdtesting.NewInternalType("", "", "foo"))}, @@ -59,11 +68,16 @@ func TestDescribeUnknownSchemaObject(t *testing.T) { // Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. func TestDescribeUnknownNamespacedSchemaObject(t *testing.T) { d := &testDescriber{Output: "test output"} + oldFn := cmdutil.DescriberFn + defer func() { + cmdutil.DescriberFn = oldFn + }() + cmdutil.DescriberFn = d.describerFor + tf := cmdtesting.NewTestFactory() defer tf.Cleanup() _, _, codec := cmdtesting.NewExternalScheme() - tf.DescriberVal = d tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, cmdtesting.NewInternalNamespacedType("", "", "foo", "non-default"))}, @@ -85,13 +99,18 @@ func TestDescribeUnknownNamespacedSchemaObject(t *testing.T) { } func TestDescribeObject(t *testing.T) { + d := &testDescriber{Output: "test output"} + oldFn := cmdutil.DescriberFn + defer func() { + cmdutil.DescriberFn = oldFn + }() + cmdutil.DescriberFn = d.describerFor + _, _, rc := testData() tf := cmdtesting.NewTestFactory() defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - d := &testDescriber{Output: "test output"} - tf.DescriberVal = d tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { @@ -122,13 +141,18 @@ func TestDescribeObject(t *testing.T) { } func TestDescribeListObjects(t *testing.T) { + d := &testDescriber{Output: "test output"} + oldFn := cmdutil.DescriberFn + defer func() { + cmdutil.DescriberFn = oldFn + }() + cmdutil.DescriberFn = d.describerFor + pods, _, _ := testData() tf := cmdtesting.NewTestFactory() defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - d := &testDescriber{Output: "test output"} - tf.DescriberVal = d tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, @@ -145,13 +169,18 @@ func TestDescribeListObjects(t *testing.T) { } func TestDescribeObjectShowEvents(t *testing.T) { + d := &testDescriber{Output: "test output"} + oldFn := cmdutil.DescriberFn + defer func() { + cmdutil.DescriberFn = oldFn + }() + cmdutil.DescriberFn = d.describerFor + pods, _, _ := testData() tf := cmdtesting.NewTestFactory() defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - d := &testDescriber{Output: "test output"} - tf.DescriberVal = d tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, @@ -167,13 +196,18 @@ func TestDescribeObjectShowEvents(t *testing.T) { } func TestDescribeObjectSkipEvents(t *testing.T) { + d := &testDescriber{Output: "test output"} + oldFn := cmdutil.DescriberFn + defer func() { + cmdutil.DescriberFn = oldFn + }() + cmdutil.DescriberFn = d.describerFor + pods, _, _ := testData() tf := cmdtesting.NewTestFactory() defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - d := &testDescriber{Output: "test output"} - tf.DescriberVal = d tf.UnstructuredClient = &fake.RESTClient{ NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, @@ -215,3 +249,19 @@ func TestDescribeHelpMessage(t *testing.T) { t.Errorf("Expected not to contain: \n %v\nGot:\n %v\n", unexpected, got) } } + +type testDescriber struct { + Name, Namespace string + Settings printers.DescriberSettings + Output string + Err error +} + +func (t *testDescriber) Describe(namespace, name string, describerSettings printers.DescriberSettings) (output string, err error) { + t.Namespace, t.Name = namespace, name + t.Settings = describerSettings + return t.Output, t.Err +} +func (t *testDescriber) describerFor(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (printers.Describer, error) { + return t, nil +} diff --git a/pkg/kubectl/cmd/testing/BUILD b/pkg/kubectl/cmd/testing/BUILD index 673c74c85e1..8c615c2cf7f 100644 --- a/pkg/kubectl/cmd/testing/BUILD +++ b/pkg/kubectl/cmd/testing/BUILD @@ -18,7 +18,6 @@ go_library( "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/validation:go_default_library", - "//pkg/printers:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library", diff --git a/pkg/kubectl/cmd/testing/fake.go b/pkg/kubectl/cmd/testing/fake.go index 59a4d947b58..839ee29dbea 100644 --- a/pkg/kubectl/cmd/testing/fake.go +++ b/pkg/kubectl/cmd/testing/fake.go @@ -52,7 +52,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/validation" - "k8s.io/kubernetes/pkg/printers" ) // +k8s:deepcopy-gen=true @@ -238,7 +237,6 @@ type TestFactory struct { Client kubectl.RESTClient ScaleGetter scaleclient.ScalesGetter UnstructuredClient kubectl.RESTClient - DescriberVal printers.Describer Namespace string ClientConfigVal *restclient.Config CommandVal string @@ -312,10 +310,6 @@ func (f *TestFactory) UnstructuredClientForMapping(mapping *meta.RESTMapping) (r return f.UnstructuredClient, nil } -func (f *TestFactory) Describer(*meta.RESTMapping) (printers.Describer, error) { - return f.DescriberVal, nil -} - func (f *TestFactory) Validator(validate bool) (validation.Schema, error) { return validation.NullSchema{}, nil } From 8ae4b537867711e5041cf3661d0a542b17945b9d Mon Sep 17 00:00:00 2001 From: jennybuckley Date: Thu, 24 May 2018 09:54:03 -0700 Subject: [PATCH 171/307] Update version of k8s.io/kube-openapi --- Godeps/Godeps.json | 16 ++-- .../kube-openapi/pkg/builder/openapi.go | 51 ++++++---- .../kube-openapi/pkg/generators/extension.go | 96 +++++++++++++++---- .../kube-openapi/pkg/generators/openapi.go | 11 +-- 4 files changed, 123 insertions(+), 51 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 7e937c367ea..6327cde93d6 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -3334,35 +3334,35 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go index 78714e8b2ae..e4fe7c62ed9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go @@ -45,6 +45,32 @@ type openAPI struct { // BuildOpenAPISpec builds OpenAPI spec given a list of webservices (containing routes) and common.Config to customize it. func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec.Swagger, error) { + o := newOpenAPI(config) + err := o.buildPaths(webServices) + if err != nil { + return nil, err + } + return o.finalizeSwagger() +} + +// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it. +func BuildOpenAPIDefinitionsForResource(model interface{}, config *common.Config) (*spec.Definitions, error) { + o := newOpenAPI(config) + // We can discard the return value of toSchema because all we care about is the side effect of calling it. + // All the models created for this resource get added to o.swagger.Definitions + _, err := o.toSchema(model) + if err != nil { + return nil, err + } + swagger, err := o.finalizeSwagger() + if err != nil { + return nil, err + } + return &swagger.Definitions, nil +} + +// newOpenAPI sets up the openAPI object so we can build the spec. +func newOpenAPI(config *common.Config) openAPI { o := openAPI{ config: config, swagger: &spec.Swagger{ @@ -56,16 +82,6 @@ func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) }, }, } - - err := o.init(webServices) - if err != nil { - return nil, err - } - - return o.swagger, nil -} - -func (o *openAPI) init(webServices []*restful.WebService) error { if o.config.GetOperationIDAndTags == nil { o.config.GetOperationIDAndTags = func(r *restful.Route) (string, []string, error) { return r.Operation, nil, nil @@ -83,22 +99,25 @@ func (o *openAPI) init(webServices []*restful.WebService) error { if o.config.CommonResponses == nil { o.config.CommonResponses = map[int]spec.Response{} } - err := o.buildPaths(webServices) - if err != nil { - return err - } + return o +} + +// finalizeSwagger is called after the spec is built and returns the final spec. +// NOTE: finalizeSwagger also make changes to the final spec, as specified in the config. +func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) { if o.config.SecurityDefinitions != nil { o.swagger.SecurityDefinitions = *o.config.SecurityDefinitions o.swagger.Security = o.config.DefaultSecurity } if o.config.PostProcessSpec != nil { + var err error o.swagger, err = o.config.PostProcessSpec(o.swagger) if err != nil { - return err + return nil, err } } - return nil + return o.swagger, nil } func getCanonicalizeTypeName(t reflect.Type) string { diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go index 7f3602408a3..befe38db248 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -27,18 +27,33 @@ import ( const extensionPrefix = "x-kubernetes-" -// Extension tag to openapi extension -var tagToExtension = map[string]string{ - "patchMergeKey": "x-kubernetes-patch-merge-key", - "patchStrategy": "x-kubernetes-patch-strategy", - "listType": "x-kubernetes-list-type", - "listMapKey": "x-kubernetes-list-map-keys", +// extensionAttributes encapsulates common traits for particular extensions. +type extensionAttributes struct { + xName string + kind types.Kind + allowedValues sets.String } -// Enum values per extension -var allowedExtensionValues = map[string]sets.String{ - "x-kubernetes-patch-strategy": sets.NewString("merge", "retainKeys"), - "x-kubernetes-list-type": sets.NewString("atomic", "set", "map"), +// Extension tag to openapi extension attributes +var tagToExtension = map[string]extensionAttributes{ + "patchMergeKey": extensionAttributes{ + xName: "x-kubernetes-patch-merge-key", + kind: types.Slice, + }, + "patchStrategy": extensionAttributes{ + xName: "x-kubernetes-patch-strategy", + kind: types.Slice, + allowedValues: sets.NewString("merge", "retainKeys"), + }, + "listMapKey": extensionAttributes{ + xName: "x-kubernetes-list-map-keys", + kind: types.Slice, + }, + "listType": extensionAttributes{ + xName: "x-kubernetes-list-type", + kind: types.Slice, + allowedValues: sets.NewString("atomic", "set", "map"), + }, } // Extension encapsulates information necessary to generate an OpenAPI extension. @@ -48,10 +63,25 @@ type extension struct { values []string // Example: [atomic] } +func (e extension) hasAllowedValues() bool { + return tagToExtension[e.idlTag].allowedValues.Len() > 0 +} + +func (e extension) allowedValues() sets.String { + return tagToExtension[e.idlTag].allowedValues +} + +func (e extension) hasKind() bool { + return len(tagToExtension[e.idlTag].kind) > 0 +} + +func (e extension) kind() types.Kind { + return tagToExtension[e.idlTag].kind +} + func (e extension) validateAllowedValues() error { // allowedValues not set means no restrictions on values. - allowedValues, exists := allowedExtensionValues[e.xName] - if !exists { + if !e.hasAllowedValues() { return nil } // Check for missing value. @@ -59,6 +89,7 @@ func (e extension) validateAllowedValues() error { return fmt.Errorf("%s needs a value, none given.", e.idlTag) } // For each extension value, validate that it is allowed. + allowedValues := e.allowedValues() if !allowedValues.HasAll(e.values...) { return fmt.Errorf("%v not allowed for %s. Allowed values: %v", e.values, e.idlTag, allowedValues.List()) @@ -66,6 +97,18 @@ func (e extension) validateAllowedValues() error { return nil } +func (e extension) validateType(kind types.Kind) error { + // If this extension class has no kind, then don't validate the type. + if !e.hasKind() { + return nil + } + if kind != e.kind() { + return fmt.Errorf("tag %s on type %v; only allowed on type %v", + e.idlTag, kind, e.kind()) + } + return nil +} + func (e extension) hasMultipleValues() bool { return len(e.values) > 1 } @@ -82,7 +125,9 @@ func sortedMapKeys(m map[string][]string) []string { return keys } -// Parses comments to return openapi extensions. +// Parses comments to return openapi extensions. Returns a list of +// extensions which parsed correctly, as well as a list of the +// parse errors. Validating extensions is performed separately. // NOTE: Non-empty errors does not mean extensions is empty. func parseExtensions(comments []string) ([]extension, []error) { extensions := []extension{} @@ -108,21 +153,30 @@ func parseExtensions(comments []string) ([]extension, []error) { // Next, generate extensions from "idlTags" (e.g. +listType) tagValues := types.ExtractCommentTags("+", comments) for _, idlTag := range sortedMapKeys(tagValues) { - xName, exists := tagToExtension[idlTag] + xAttrs, exists := tagToExtension[idlTag] if !exists { continue } values := tagValues[idlTag] e := extension{ - idlTag: idlTag, // listType - xName: xName, // x-kubernetes-list-type - values: values, // [atomic] - } - if err := e.validateAllowedValues(); err != nil { - // For now, only log the extension validation errors. - errors = append(errors, err) + idlTag: idlTag, // listType + xName: xAttrs.xName, // x-kubernetes-list-type + values: values, // [atomic] } extensions = append(extensions, e) } return extensions, errors } + +func validateMemberExtensions(extensions []extension, m *types.Member) []error { + errors := []error{} + for _, e := range extensions { + if err := e.validateAllowedValues(); err != nil { + errors = append(errors, err) + } + if err := e.validateType(m.Type.Kind); err != nil { + errors = append(errors, err) + } + } + return errors +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index deeac757197..13e1e0985af 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -432,7 +432,7 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { extensions, errors := parseExtensions(t.CommentLines) // Initially, we will only log struct extension errors. if len(errors) > 0 { - for e := range errors { + for _, e := range errors { glog.V(2).Infof("[%s]: %s\n", t.String(), e) } } @@ -442,17 +442,16 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { } func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type) error { - extensions, errors := parseExtensions(m.CommentLines) + extensions, parseErrors := parseExtensions(m.CommentLines) + validationErrors := validateMemberExtensions(extensions, m) + errors := append(parseErrors, validationErrors...) // Initially, we will only log member extension errors. if len(errors) > 0 { errorPrefix := fmt.Sprintf("[%s] %s:", parent.String(), m.String()) - for e := range errors { + for _, e := range errors { glog.V(2).Infof("%s %s\n", errorPrefix, e) } } - // TODO(seans3): Validate member extensions here. - // Example: listType extension is only on a Slice. - // Example: cross-extension validation - listMapKey only makes sense with listType=map g.emitExtensions(extensions) return nil } From dee088586a76b876c473418efba8190be7fa6b26 Mon Sep 17 00:00:00 2001 From: jennybuckley Date: Thu, 24 May 2018 09:55:19 -0700 Subject: [PATCH 172/307] Expose openapi schema to handlers --- .../apiserver/pkg/endpoints/groupversion.go | 4 + .../apiserver/pkg/endpoints/handlers/rest.go | 2 + .../apiserver/pkg/endpoints/installer.go | 12 ++ .../apiserver/pkg/server/genericapiserver.go | 1 + .../pkg/server/genericapiserver_test.go | 39 ++++- .../apiserver/pkg/util/openapi/proto.go | 142 ++++++++++++++++++ .../apiserver/pkg/util/openapi/proto_test.go | 77 ++++++++++ 7 files changed, 276 insertions(+), 1 deletion(-) create mode 100644 staging/src/k8s.io/apiserver/pkg/util/openapi/proto.go create mode 100644 staging/src/k8s.io/apiserver/pkg/util/openapi/proto_test.go diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/groupversion.go b/staging/src/k8s.io/apiserver/pkg/endpoints/groupversion.go index 7060eb73897..23d13adc3d3 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/groupversion.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/groupversion.go @@ -30,6 +30,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/apiserver/pkg/registry/rest" + openapicommon "k8s.io/kube-openapi/pkg/common" ) // APIGroupVersion is a helper for exposing rest.Storage objects as http.Handlers via go-restful @@ -77,6 +78,9 @@ type APIGroupVersion struct { // EnableAPIResponseCompression indicates whether API Responses should support compression // if the client requests it via Accept-Encoding EnableAPIResponseCompression bool + + // OpenAPIConfig lets the individual handlers build a subset of the OpenAPI schema before they are installed. + OpenAPIConfig *openapicommon.Config } // InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container. diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index 4da38f43b14..942e53483ff 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -39,6 +39,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" + openapiproto "k8s.io/kube-openapi/pkg/util/proto" ) // RequestScope encapsulates common fields across all RESTful handler methods. @@ -55,6 +56,7 @@ type RequestScope struct { UnsafeConvertor runtime.ObjectConvertor TableConvertor rest.TableConvertor + OpenAPISchema openapiproto.Schema Resource schema.GroupVersionResource Kind schema.GroupVersionKind diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index 0158d28f611..3edd09dcdf9 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -39,6 +39,8 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/registry/rest" genericfilters "k8s.io/apiserver/pkg/server/filters" + utilopenapi "k8s.io/apiserver/pkg/util/openapi" + openapibuilder "k8s.io/kube-openapi/pkg/builder" ) const ( @@ -495,6 +497,16 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if a.group.MetaGroupVersion != nil { reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } + if a.group.OpenAPIConfig != nil { + openAPIDefinitions, err := openapibuilder.BuildOpenAPIDefinitionsForResource(defaultVersionedObject, a.group.OpenAPIConfig) + if err != nil { + return nil, fmt.Errorf("unable to build openapi definitions for %v: %v", fqKindToRegister, err) + } + reqScope.OpenAPISchema, err = utilopenapi.ToProtoSchema(openAPIDefinitions, fqKindToRegister) + if err != nil { + return nil, fmt.Errorf("unable to get openapi schema for %v: %v", fqKindToRegister, err) + } + } for _, action := range actions { producedObject := storageMeta.ProducesObject(action.Verb) if producedObject == nil { diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index 10a8ddff6e5..9beba735d33 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -426,6 +426,7 @@ func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupV Admit: s.admissionControl, MinRequestTimeout: s.minRequestTimeout, EnableAPIResponseCompression: s.enableAPIResponseCompression, + OpenAPIConfig: s.openAPIConfig, } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go index fbe7235c2c2..8439d8cf75a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go @@ -31,6 +31,7 @@ import ( "testing" "time" + openapi "github.com/go-openapi/spec" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -78,9 +79,45 @@ func init() { examplev1.AddToScheme(scheme) } +func buildTestOpenAPIDefinition() kubeopenapi.OpenAPIDefinition { + return kubeopenapi.OpenAPIDefinition{ + Schema: openapi.Schema{ + SchemaProps: openapi.SchemaProps{ + Description: "Description", + Properties: map[string]openapi.Schema{}, + }, + VendorExtensible: openapi.VendorExtensible{ + Extensions: openapi.Extensions{ + "x-kubernetes-group-version-kind": []map[string]string{ + { + "group": "", + "version": "v1", + "kind": "Getter", + }, + { + "group": "batch", + "version": "v1", + "kind": "Getter", + }, + { + "group": "extensions", + "version": "v1", + "kind": "Getter", + }, + }, + }, + }, + }, + } +} + func testGetOpenAPIDefinitions(_ kubeopenapi.ReferenceCallback) map[string]kubeopenapi.OpenAPIDefinition { return map[string]kubeopenapi.OpenAPIDefinition{ - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": {}, + "k8s.io/apimachinery/pkg/apis/meta/v1.Status": {}, + "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": {}, + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": {}, + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": buildTestOpenAPIDefinition(), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": {}, } } diff --git a/staging/src/k8s.io/apiserver/pkg/util/openapi/proto.go b/staging/src/k8s.io/apiserver/pkg/util/openapi/proto.go new file mode 100644 index 00000000000..5641d1a141f --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/openapi/proto.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "encoding/json" + "fmt" + + "github.com/go-openapi/spec" + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + "github.com/googleapis/gnostic/compiler" + yaml "gopkg.in/yaml.v2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + // groupVersionKindExtensionKey is the key used to lookup the + // GroupVersionKind value for an object definition from the + // definition's "extensions" map. + groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" +) + +// ToProtoSchema builds the proto formatted schema from an OpenAPI spec +func ToProtoSchema(openAPIDefinitions *spec.Definitions, gvk schema.GroupVersionKind) (proto.Schema, error) { + openAPISpec := newMinimalValidOpenAPISpec() + openAPISpec.Definitions = *openAPIDefinitions + + specBytes, err := json.MarshalIndent(openAPISpec, " ", " ") + if err != nil { + return nil, err + } + + var info yaml.MapSlice + err = yaml.Unmarshal(specBytes, &info) + if err != nil { + return nil, err + } + + doc, err := openapi_v2.NewDocument(info, compiler.NewContext("$root", nil)) + if err != nil { + return nil, err + } + + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + return nil, fmt.Errorf("the ListModels function returned a model that can't be looked-up") + } + gvkList := parseGroupVersionKind(model) + for _, modelGVK := range gvkList { + if modelGVK == gvk { + return model, nil + } + } + } + + return nil, fmt.Errorf("no model found with a %v tag matching %v", groupVersionKindExtensionKey, gvk) +} + +// newMinimalValidOpenAPISpec creates a minimal openapi spec with only the required fields filled in +func newMinimalValidOpenAPISpec() *spec.Swagger { + return &spec.Swagger{ + SwaggerProps: spec.SwaggerProps{ + Swagger: "2.0", + Info: &spec.Info{ + InfoProps: spec.InfoProps{ + Title: "Kubernetes", + Version: "0.0.0", + }, + }, + }, + } +} + +// parseGroupVersionKind gets and parses GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/staging/src/k8s.io/apiserver/pkg/util/openapi/proto_test.go b/staging/src/k8s.io/apiserver/pkg/util/openapi/proto_test.go new file mode 100644 index 00000000000..64421a7ff8f --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/openapi/proto_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "reflect" + "testing" + + "github.com/go-openapi/spec" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) + +// TestOpenAPIDefinitionsToProtoSchema tests the openapi parser +func TestOpenAPIDefinitionsToProtoSchema(t *testing.T) { + openAPIDefinitions := &spec.Definitions{ + "io.k8s.api.testgroup.v1.Foo": spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Description of Foos", + Properties: map[string]spec.Schema{}, + }, + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-group-version-kind": []map[string]string{ + { + "group": "testgroup.k8s.io", + "version": "v1", + "kind": "Foo", + }, + }, + }, + }, + }, + } + gvk := schema.GroupVersionKind{ + Group: "testgroup.k8s.io", + Version: "v1", + Kind: "Foo", + } + expectedSchema := &proto.Arbitrary{ + BaseSchema: proto.BaseSchema{ + Description: "Description of Foos", + Extensions: map[string]interface{}{ + "x-kubernetes-group-version-kind": []interface{}{ + map[interface{}]interface{}{ + "group": "testgroup.k8s.io", + "version": "v1", + "kind": "Foo", + }, + }, + }, + Path: proto.NewPath("io.k8s.api.testgroup.v1.Foo"), + }, + } + actualSchema, err := ToProtoSchema(openAPIDefinitions, gvk) + if err != nil { + t.Fatalf("expected ToProtoSchema not to return an error") + } + if !reflect.DeepEqual(expectedSchema, actualSchema) { + t.Fatalf("expected schema:\n%v\nbut got:\n%v", expectedSchema, actualSchema) + } +} From 349fbdfe9fbcad91fcecbfe4e71fa91d235dad6d Mon Sep 17 00:00:00 2001 From: jennybuckley Date: Thu, 24 May 2018 09:55:33 -0700 Subject: [PATCH 173/307] Run hack/update-all.sh --- staging/BUILD | 1 + .../Godeps/Godeps.json | 14 ++++--- .../k8s.io/apimachinery/Godeps/Godeps.json | 2 +- .../src/k8s.io/apiserver/Godeps/Godeps.json | 10 ++--- .../src/k8s.io/apiserver/pkg/endpoints/BUILD | 3 ++ .../apiserver/pkg/endpoints/handlers/BUILD | 1 + staging/src/k8s.io/apiserver/pkg/server/BUILD | 1 + .../k8s.io/apiserver/pkg/util/openapi/BUILD | 41 +++++++++++++++++++ .../src/k8s.io/client-go/Godeps/Godeps.json | 2 +- .../k8s.io/code-generator/Godeps/Godeps.json | 4 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 16 +++++--- .../sample-apiserver/Godeps/Godeps.json | 14 ++++--- .../sample-controller/Godeps/Godeps.json | 2 +- 13 files changed, 85 insertions(+), 26 deletions(-) create mode 100644 staging/src/k8s.io/apiserver/pkg/util/openapi/BUILD diff --git a/staging/BUILD b/staging/BUILD index 1f6015e3520..8403046c6fd 100644 --- a/staging/BUILD +++ b/staging/BUILD @@ -92,6 +92,7 @@ filegroup( "//staging/src/k8s.io/apiserver/pkg/util/flag:all-srcs", "//staging/src/k8s.io/apiserver/pkg/util/flushwriter:all-srcs", "//staging/src/k8s.io/apiserver/pkg/util/logs:all-srcs", + "//staging/src/k8s.io/apiserver/pkg/util/openapi:all-srcs", "//staging/src/k8s.io/apiserver/pkg/util/proxy:all-srcs", "//staging/src/k8s.io/apiserver/pkg/util/trace:all-srcs", "//staging/src/k8s.io/apiserver/pkg/util/webhook:all-srcs", diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 374a18f55a7..a90c044928e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -1466,6 +1466,10 @@ "ImportPath": "k8s.io/apiserver/pkg/util/flushwriter", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apiserver/pkg/util/openapi", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apiserver/pkg/util/trace", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -2024,23 +2028,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/apimachinery/pkg/api/equality", diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index 0607ae6e038..02e897967ad 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -180,7 +180,7 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 138dc9e4c95..32519a909e2 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -1756,23 +1756,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/client-go/discovery", diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD index 843ae6bd7d6..ebd5e7a761f 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/BUILD @@ -79,6 +79,9 @@ go_library( "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/openapi:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index bb30cf27326..55871a7c790 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -80,6 +80,7 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/BUILD b/staging/src/k8s.io/apiserver/pkg/server/BUILD index dcefdb4b746..1dfabf8fa46 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/BUILD @@ -15,6 +15,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/staging/src/k8s.io/apiserver/pkg/util/openapi/BUILD b/staging/src/k8s.io/apiserver/pkg/util/openapi/BUILD new file mode 100644 index 00000000000..fd9dc89c835 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/util/openapi/BUILD @@ -0,0 +1,41 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["proto.go"], + importpath = "k8s.io/apiserver/pkg/util/openapi", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/go-openapi/spec:go_default_library", + "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", + "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", + "//vendor/gopkg.in/yaml.v2:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["proto_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/go-openapi/spec:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", + ], +) diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index f1337ce7868..f314634d39f 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -588,7 +588,7 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } diff --git a/staging/src/k8s.io/code-generator/Godeps/Godeps.json b/staging/src/k8s.io/code-generator/Godeps/Godeps.json index ad8da9bbe1e..15a35e1fe01 100644 --- a/staging/src/k8s.io/code-generator/Godeps/Godeps.json +++ b/staging/src/k8s.io/code-generator/Godeps/Godeps.json @@ -260,11 +260,11 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/generators", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index 843a6e7082f..f6f37f73e81 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -1138,6 +1138,10 @@ "ImportPath": "k8s.io/apiserver/pkg/util/logs", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apiserver/pkg/util/openapi", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apiserver/pkg/util/proxy", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -1672,27 +1676,27 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index c4b92ce04bf..b3a5c9f4daf 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -1110,6 +1110,10 @@ "ImportPath": "k8s.io/apiserver/pkg/util/logs", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apiserver/pkg/util/openapi", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apiserver/pkg/util/trace", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -1636,23 +1640,23 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 520574a0a56..9d2e0acd719 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -1068,7 +1068,7 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "61db125d227fc9d4e373819a059516f32f7f23c7" + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } From d4e48fd78911ed3e5105d72a2c3e50e6a65695c9 Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Thu, 24 May 2018 09:49:20 -0700 Subject: [PATCH 174/307] graduate DynamicKubeletConfig feature to beta --- cluster/gce/util.sh | 3 +++ cmd/kubelet/app/options/options.go | 2 +- pkg/features/kube_features.go | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index cddafca5250..7c909bd0736 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -621,6 +621,9 @@ function construct-kubelet-flags { flags+=" --experimental-check-node-capabilities-before-mount=true" # Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved) flags+=" --cert-dir=/var/lib/kubelet/pki/" + # Configure the directory that the Kubelet should use to store dynamic config checkpoints + flags+=" --dynamic-config-dir=/var/lib/kubelet/dynamic-config" + if [[ "${master}" == "true" ]]; then flags+=" ${MASTER_KUBELET_TEST_ARGS:-}" diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 2964754eb30..4989b4ac828 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -371,7 +371,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).") - fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. Presently, you must also enable the DynamicKubeletConfig feature gate to pass this flag.") + fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The DynamicKubeletConfig feature gate must be enabled to pass this flag; this gate currently defaults to true because the feature is beta.") fs.BoolVar(&f.RegisterNode, "register-node", f.RegisterNode, "Register the node with the apiserver. If --kubeconfig is not provided, this flag is irrelevant, as the Kubelet won't have an apiserver to register with. Default=true.") fs.Var(utiltaints.NewTaintsVar(&f.RegisterWithTaints), "register-with-taints", "Register the node with the given list of taints (comma separated \"=:\"). No-op if register-node is false.") diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index d773374ec43..2ddface94f7 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -35,6 +35,7 @@ const ( // owner: @mtaufen // alpha: v1.4 + // beta: v1.11 DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig" // owner: @pweil- @@ -290,7 +291,7 @@ func init() { // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ AppArmor: {Default: true, PreRelease: utilfeature.Beta}, - DynamicKubeletConfig: {Default: false, PreRelease: utilfeature.Alpha}, + DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta}, ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, DevicePlugins: {Default: true, PreRelease: utilfeature.Beta}, From c875690d0d8a69a9dde735f9d3a59b2e0adab9a5 Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Thu, 24 May 2018 10:55:40 -0700 Subject: [PATCH 175/307] Use default seccomp profile for DNS addons. --- cluster/addons/dns/coredns.yaml.base | 2 ++ cluster/addons/dns/coredns.yaml.in | 2 ++ cluster/addons/dns/coredns.yaml.sed | 2 ++ cluster/addons/dns/kube-dns.yaml.base | 1 + cluster/addons/dns/kube-dns.yaml.in | 1 + cluster/addons/dns/kube-dns.yaml.sed | 1 + 6 files changed, 9 insertions(+) diff --git a/cluster/addons/dns/coredns.yaml.base b/cluster/addons/dns/coredns.yaml.base index 012ad34dca0..a392c38a7d0 100644 --- a/cluster/addons/dns/coredns.yaml.base +++ b/cluster/addons/dns/coredns.yaml.base @@ -94,6 +94,8 @@ spec: metadata: labels: k8s-app: kube-dns + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: serviceAccountName: coredns tolerations: diff --git a/cluster/addons/dns/coredns.yaml.in b/cluster/addons/dns/coredns.yaml.in index 5ccf70bd0cd..322650444f7 100644 --- a/cluster/addons/dns/coredns.yaml.in +++ b/cluster/addons/dns/coredns.yaml.in @@ -94,6 +94,8 @@ spec: metadata: labels: k8s-app: kube-dns + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: serviceAccountName: coredns tolerations: diff --git a/cluster/addons/dns/coredns.yaml.sed b/cluster/addons/dns/coredns.yaml.sed index d635755e770..9c4f037997b 100644 --- a/cluster/addons/dns/coredns.yaml.sed +++ b/cluster/addons/dns/coredns.yaml.sed @@ -94,6 +94,8 @@ spec: metadata: labels: k8s-app: kube-dns + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: serviceAccountName: coredns tolerations: diff --git a/cluster/addons/dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns.yaml.base index aa19b8822a7..fbd232f5115 100644 --- a/cluster/addons/dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns.yaml.base @@ -83,6 +83,7 @@ spec: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical tolerations: diff --git a/cluster/addons/dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns.yaml.in index 3926631081a..74b73b55024 100644 --- a/cluster/addons/dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns.yaml.in @@ -83,6 +83,7 @@ spec: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical tolerations: diff --git a/cluster/addons/dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns.yaml.sed index cc9bf8de6b9..e692d5ac8e7 100644 --- a/cluster/addons/dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns.yaml.sed @@ -83,6 +83,7 @@ spec: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical tolerations: From e9c7fe57d33046c60ebddaccc4f604d89e7cecfc Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Thu, 24 May 2018 11:22:17 -0700 Subject: [PATCH 176/307] Use default seccomp profile for flutend-elasticsearch addon --- cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml | 1 + cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index eecd6b4801b..6d8f959b7d2 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -71,6 +71,7 @@ spec: # Note that this does not guarantee admission on the nodes (#40573). annotations: scheduler.alpha.kubernetes.io/critical-pod: '' + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-node-critical serviceAccountName: fluentd-es diff --git a/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml b/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml index 8cd00fe52e5..d6653dd15b4 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-deployment.yaml @@ -16,6 +16,8 @@ spec: metadata: labels: k8s-app: kibana-logging + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: containers: - name: kibana-logging From ccec4c507f8668f6d88afa837bb4d2f46656da64 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Thu, 24 May 2018 15:02:08 -0400 Subject: [PATCH 177/307] MapString* should return empty string for String() when null --- staging/src/k8s.io/apiserver/pkg/util/flag/map_string_bool.go | 3 +++ .../src/k8s.io/apiserver/pkg/util/flag/map_string_string.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_bool.go b/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_bool.go index 46ed43836f2..e5a01805b02 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_bool.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_bool.go @@ -39,6 +39,9 @@ func NewMapStringBool(m *map[string]bool) *MapStringBool { // String implements github.com/spf13/pflag.Value func (m *MapStringBool) String() string { + if m == nil || m.Map == nil { + return "" + } pairs := []string{} for k, v := range *m.Map { pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) diff --git a/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_string.go b/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_string.go index 00c550b0499..129470b3c2f 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_string.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flag/map_string_string.go @@ -50,6 +50,9 @@ func NewMapStringStringNoSplit(m *map[string]string) *MapStringString { // String implements github.com/spf13/pflag.Value func (m *MapStringString) String() string { + if m == nil || m.Map == nil { + return "" + } pairs := []string{} for k, v := range *m.Map { pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) From ebc254c40f9e4834d34ba6be39fb08f0dc4a265f Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Thu, 24 May 2018 19:48:49 +0300 Subject: [PATCH 178/307] kubeadm: rename the `kube-dns` phases addon The command `kubeadm alpha phases addon` has a property called `kube-dns` which would install kube-dns, pre 1.11. In the case of 1.11 this property will install CoreDNS, because the property is also bound to the `CoreDNS` feature gate, which is now `true` by default. Fix that by renaming the property to `coredns`, updating the Cobra info and also updating the unit tests. --- cmd/kubeadm/app/cmd/phases/addons.go | 18 +++++++++--------- cmd/kubeadm/app/cmd/phases/addons_test.go | 2 +- docs/.generated_docs | 4 ++-- ...md => kubeadm_alpha_phase_addon_coredns.md} | 0 ...s.1 => kubeadm-alpha-phase-addon-coredns.1} | 0 5 files changed, 12 insertions(+), 12 deletions(-) rename docs/admin/{kubeadm_alpha_phase_addon_kube-dns.md => kubeadm_alpha_phase_addon_coredns.md} (100%) rename docs/man/man1/{kubeadm-alpha-phase-addon-kube-dns.1 => kubeadm-alpha-phase-addon-coredns.1} (100%) diff --git a/cmd/kubeadm/app/cmd/phases/addons.go b/cmd/kubeadm/app/cmd/phases/addons.go index 4951b233827..26ce873ab9e 100644 --- a/cmd/kubeadm/app/cmd/phases/addons.go +++ b/cmd/kubeadm/app/cmd/phases/addons.go @@ -39,24 +39,24 @@ import ( var ( allAddonsLongDesc = normalizer.LongDesc(` - Installs the kube-dns and the kube-proxys addons components via the API server. + Installs the CoreDNS and the kube-proxys addons components via the API server. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. ` + cmdutil.AlphaDisclaimer) allAddonsExample = normalizer.Examples(` - # Installs the kube-dns and the kube-proxys addons components via the API server, + # Installs the CoreDNS and the kube-proxys addons components via the API server, # functionally equivalent to what installed by kubeadm init. kubeadm alpha phase selfhosting from-staticpods `) - kubednsAddonsLongDesc = normalizer.LongDesc(` - Installs the kube-dns addon components via the API server. + corednsAddonsLongDesc = normalizer.LongDesc(` + Installs the CoreDNS addon components via the API server. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. ` + cmdutil.AlphaDisclaimer) kubeproxyAddonsLongDesc = normalizer.LongDesc(` - Installs the kube-proxy addon components via the API server. + Installs the kube-proxy addon components via the API server. ` + cmdutil.AlphaDisclaimer) ) @@ -116,9 +116,9 @@ func getAddonsSubCommands() []*cobra.Command { cmdFunc: EnsureAllAddons, }, { - use: "kube-dns", - short: "Installs the kube-dns addon to a Kubernetes cluster", - long: kubednsAddonsLongDesc, + use: "coredns", + short: "Installs the CoreDNS addon to a Kubernetes cluster", + long: corednsAddonsLongDesc, cmdFunc: dnsaddon.EnsureDNSAddon, }, { @@ -151,7 +151,7 @@ func getAddonsSubCommands() []*cobra.Command { cmd.Flags().StringVar(&cfg.Networking.PodSubnet, "pod-network-cidr", cfg.Networking.PodSubnet, `The range of IP addresses used for the Pod network`) } - if properties.use == "all" || properties.use == "kube-dns" { + if properties.use == "all" || properties.use == "coredns" { cmd.Flags().StringVar(&cfg.Networking.DNSDomain, "service-dns-domain", cfg.Networking.DNSDomain, `Alternative domain for services`) cmd.Flags().StringVar(&cfg.Networking.ServiceSubnet, "service-cidr", cfg.Networking.ServiceSubnet, `The range of IP address used for service VIPs`) cmd.Flags().StringVar(&featureGatesString, "feature-gates", featureGatesString, "A set of key=value pairs that describe feature gates for various features."+ diff --git a/cmd/kubeadm/app/cmd/phases/addons_test.go b/cmd/kubeadm/app/cmd/phases/addons_test.go index e2333ec305b..713e7f8dbda 100644 --- a/cmd/kubeadm/app/cmd/phases/addons_test.go +++ b/cmd/kubeadm/app/cmd/phases/addons_test.go @@ -56,7 +56,7 @@ func TestAddonsSubCommandsHasFlags(t *testing.T) { }, }, { - command: "kube-dns", + command: "coredns", additionalFlags: []string{ "service-dns-domain", "service-cidr", diff --git a/docs/.generated_docs b/docs/.generated_docs index 4cbec27e77f..91926436747 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -9,7 +9,7 @@ docs/admin/kubeadm_alpha.md docs/admin/kubeadm_alpha_phase.md docs/admin/kubeadm_alpha_phase_addon.md docs/admin/kubeadm_alpha_phase_addon_all.md -docs/admin/kubeadm_alpha_phase_addon_kube-dns.md +docs/admin/kubeadm_alpha_phase_addon_coredns.md docs/admin/kubeadm_alpha_phase_addon_kube-proxy.md docs/admin/kubeadm_alpha_phase_bootstrap-token.md docs/admin/kubeadm_alpha_phase_bootstrap-token_all.md @@ -86,7 +86,7 @@ docs/man/man1/kube-controller-manager.1 docs/man/man1/kube-proxy.1 docs/man/man1/kube-scheduler.1 docs/man/man1/kubeadm-alpha-phase-addon-all.1 -docs/man/man1/kubeadm-alpha-phase-addon-kube-dns.1 +docs/man/man1/kubeadm-alpha-phase-addon-coredns.1 docs/man/man1/kubeadm-alpha-phase-addon-kube-proxy.1 docs/man/man1/kubeadm-alpha-phase-addon.1 docs/man/man1/kubeadm-alpha-phase-bootstrap-token-all.1 diff --git a/docs/admin/kubeadm_alpha_phase_addon_kube-dns.md b/docs/admin/kubeadm_alpha_phase_addon_coredns.md similarity index 100% rename from docs/admin/kubeadm_alpha_phase_addon_kube-dns.md rename to docs/admin/kubeadm_alpha_phase_addon_coredns.md diff --git a/docs/man/man1/kubeadm-alpha-phase-addon-kube-dns.1 b/docs/man/man1/kubeadm-alpha-phase-addon-coredns.1 similarity index 100% rename from docs/man/man1/kubeadm-alpha-phase-addon-kube-dns.1 rename to docs/man/man1/kubeadm-alpha-phase-addon-coredns.1 From 9cbd54018fabd4865ac939c93c321de56f557e22 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Tue, 15 May 2018 13:29:05 +0100 Subject: [PATCH 179/307] Remove signal handler registration from pkg/kubelet The goal of this change is to remove the registration of signal handling from pkg/kubelet. We now pass in a stop channel. If you register a signal handler in `main()` to aid in a controlled and deliberate exit then the handler registered in `pkg/kubelet` often wins and the process exits immediately. This means all other signal handler registrations are currently racy if `DockerServer.Start()` is directly or indirectly invoked. This change also removes another signal handler registration from `NewAPIServerCommand()`; a stop channel is now passed to this function. --- cmd/genkubedocs/BUILD | 1 + cmd/genkubedocs/gen_kube_docs.go | 5 +- cmd/genman/BUILD | 1 + cmd/genman/gen_kube_man.go | 5 +- cmd/hyperkube/BUILD | 1 + cmd/hyperkube/main.go | 11 ++-- cmd/kube-apiserver/BUILD | 1 + cmd/kube-apiserver/apiserver.go | 3 +- cmd/kube-apiserver/app/server.go | 4 +- cmd/kubelet/BUILD | 1 + cmd/kubelet/app/server.go | 54 +++++++++++++------ cmd/kubelet/kubelet.go | 3 +- pkg/kubelet/dockershim/remote/BUILD | 1 - .../dockershim/remote/docker_server.go | 13 +++-- pkg/kubelet/kubelet.go | 6 +-- pkg/kubemark/BUILD | 1 + pkg/kubemark/hollow_kubelet.go | 3 +- 17 files changed, 72 insertions(+), 42 deletions(-) diff --git a/cmd/genkubedocs/BUILD b/cmd/genkubedocs/BUILD index fd5d3a064f2..9aebfb14251 100644 --- a/cmd/genkubedocs/BUILD +++ b/cmd/genkubedocs/BUILD @@ -31,6 +31,7 @@ go_library( "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/cobra/doc:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", ], ) diff --git a/cmd/genkubedocs/gen_kube_docs.go b/cmd/genkubedocs/gen_kube_docs.go index 975466f4418..47c2e0b87d6 100644 --- a/cmd/genkubedocs/gen_kube_docs.go +++ b/cmd/genkubedocs/gen_kube_docs.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra/doc" "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server" ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" "k8s.io/kubernetes/cmd/genutils" apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" @@ -53,7 +54,7 @@ func main() { switch module { case "kube-apiserver": // generate docs for kube-apiserver - apiserver := apiservapp.NewAPIServerCommand() + apiserver := apiservapp.NewAPIServerCommand(server.SetupSignalHandler()) doc.GenMarkdownTree(apiserver, outDir) case "kube-controller-manager": // generate docs for kube-controller-manager @@ -73,7 +74,7 @@ func main() { doc.GenMarkdownTree(scheduler, outDir) case "kubelet": // generate docs for kubelet - kubelet := kubeletapp.NewKubeletCommand() + kubelet := kubeletapp.NewKubeletCommand(server.SetupSignalHandler()) doc.GenMarkdownTree(kubelet, outDir) case "kubeadm": // resets global flags created by kubelet or other commands e.g. diff --git a/cmd/genman/BUILD b/cmd/genman/BUILD index e71a704e22f..82398a133d4 100644 --- a/cmd/genman/BUILD +++ b/cmd/genman/BUILD @@ -28,6 +28,7 @@ go_library( "//vendor/github.com/cpuguy83/go-md2man/md2man:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", ], ) diff --git a/cmd/genman/gen_kube_man.go b/cmd/genman/gen_kube_man.go index 8ccc994f31c..62ceaab5223 100644 --- a/cmd/genman/gen_kube_man.go +++ b/cmd/genman/gen_kube_man.go @@ -26,6 +26,7 @@ import ( mangen "github.com/cpuguy83/go-md2man/md2man" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server" ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app" "k8s.io/kubernetes/cmd/genutils" apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app" @@ -62,7 +63,7 @@ func main() { switch module { case "kube-apiserver": // generate manpage for kube-apiserver - apiserver := apiservapp.NewAPIServerCommand() + apiserver := apiservapp.NewAPIServerCommand(server.SetupSignalHandler()) genMarkdown(apiserver, "", outDir) for _, c := range apiserver.Commands() { genMarkdown(c, "kube-apiserver", outDir) @@ -97,7 +98,7 @@ func main() { } case "kubelet": // generate manpage for kubelet - kubelet := kubeletapp.NewKubeletCommand() + kubelet := kubeletapp.NewKubeletCommand(server.SetupSignalHandler()) genMarkdown(kubelet, "", outDir) for _, c := range kubelet.Commands() { genMarkdown(c, "kubelet", outDir) diff --git a/cmd/hyperkube/BUILD b/cmd/hyperkube/BUILD index 9b17e5eb76d..7d60ed6833c 100644 --- a/cmd/hyperkube/BUILD +++ b/cmd/hyperkube/BUILD @@ -29,6 +29,7 @@ go_library( "//pkg/version/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], diff --git a/cmd/hyperkube/main.go b/cmd/hyperkube/main.go index 1c5a276eae1..8482ab1c386 100644 --- a/cmd/hyperkube/main.go +++ b/cmd/hyperkube/main.go @@ -32,6 +32,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server" utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" cloudcontrollermanager "k8s.io/kubernetes/cmd/cloud-controller-manager/app" @@ -48,7 +49,7 @@ import ( func main() { rand.Seed(time.Now().UTC().UnixNano()) - hyperkubeCommand, allCommandFns := NewHyperKubeCommand() + hyperkubeCommand, allCommandFns := NewHyperKubeCommand(server.SetupSignalHandler()) // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the @@ -82,12 +83,12 @@ func commandFor(basename string, defaultCommand *cobra.Command, commands []func( return defaultCommand } -// NewCmdRequestProject implement the OpenShift cli RequestProject command. -func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) { +// NewHyperKubeCommand is the entry point for hyperkube +func NewHyperKubeCommand(stopCh <-chan struct{}) (*cobra.Command, []func() *cobra.Command) { // these have to be functions since the command is polymorphic. Cobra wants you to be top level // command to get executed apiserver := func() *cobra.Command { - ret := kubeapiserver.NewAPIServerCommand() + ret := kubeapiserver.NewAPIServerCommand(stopCh) // add back some unfortunate aliases that should be removed ret.Aliases = []string{"apiserver"} return ret @@ -111,7 +112,7 @@ func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) { return ret } kubectlCmd := func() *cobra.Command { return kubectl.NewDefaultKubectlCommand() } - kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand() } + kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand(stopCh) } cloudController := func() *cobra.Command { return cloudcontrollermanager.NewCloudControllerManagerCommand() } commandFns := []func() *cobra.Command{ diff --git a/cmd/kube-apiserver/BUILD b/cmd/kube-apiserver/BUILD index e13b255fd5b..78684bd716b 100644 --- a/cmd/kube-apiserver/BUILD +++ b/cmd/kube-apiserver/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], diff --git a/cmd/kube-apiserver/apiserver.go b/cmd/kube-apiserver/apiserver.go index 9d3ce436db8..762c09f6ddb 100644 --- a/cmd/kube-apiserver/apiserver.go +++ b/cmd/kube-apiserver/apiserver.go @@ -27,6 +27,7 @@ import ( "github.com/spf13/pflag" + "k8s.io/apiserver/pkg/server" utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/kube-apiserver/app" @@ -37,7 +38,7 @@ import ( func main() { rand.Seed(time.Now().UTC().UnixNano()) - command := app.NewAPIServerCommand() + command := app.NewAPIServerCommand(server.SetupSignalHandler()) // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 86724e41a6a..3fb5ed71bb1 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -48,7 +48,6 @@ import ( "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authorization/authorizer" openapinamer "k8s.io/apiserver/pkg/endpoints/openapi" - "k8s.io/apiserver/pkg/server" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" serveroptions "k8s.io/apiserver/pkg/server/options" @@ -109,7 +108,7 @@ const etcdRetryLimit = 60 const etcdRetryInterval = 1 * time.Second // NewAPIServerCommand creates a *cobra.Command object with default parameters -func NewAPIServerCommand() *cobra.Command { +func NewAPIServerCommand(stopCh <-chan struct{}) *cobra.Command { s := options.NewServerRunOptions() cmd := &cobra.Command{ Use: "kube-apiserver", @@ -132,7 +131,6 @@ cluster's shared state through which all other components interact.`, return utilerrors.NewAggregate(errs) } - stopCh := server.SetupSignalHandler() return Run(completedOptions, stopCh) }, } diff --git a/cmd/kubelet/BUILD b/cmd/kubelet/BUILD index e787c70f79e..690698e8bcb 100644 --- a/cmd/kubelet/BUILD +++ b/cmd/kubelet/BUILD @@ -21,6 +21,7 @@ go_library( "//cmd/kubelet/app:go_default_library", "//pkg/client/metrics/prometheus:go_default_library", "//pkg/version/prometheus:go_default_library", + "//vendor/k8s.io/apiserver/pkg/server:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library", ], ) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 10bbae78cfc..ca13dc51d30 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -103,7 +103,7 @@ const ( ) // NewKubeletCommand creates a *cobra.Command object with default parameters -func NewKubeletCommand() *cobra.Command { +func NewKubeletCommand(stopCh <-chan struct{}) *cobra.Command { cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError) cleanFlagSet.SetNormalizeFunc(flag.WordSepNormalizeFunc) kubeletFlags := options.NewKubeletFlags() @@ -248,14 +248,15 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // start the experimental docker shim, if enabled if kubeletServer.KubeletFlags.ExperimentalDockershim { - if err := RunDockershim(&kubeletServer.KubeletFlags, kubeletConfig); err != nil { + if err := RunDockershim(&kubeletServer.KubeletFlags, kubeletConfig, stopCh); err != nil { glog.Fatal(err) } + return } // run the kubelet glog.V(5).Infof("KubeletConfiguration: %#v", kubeletServer.KubeletConfiguration) - if err := Run(kubeletServer, kubeletDeps); err != nil { + if err := Run(kubeletServer, kubeletDeps, stopCh); err != nil { glog.Fatal(err) } }, @@ -399,13 +400,13 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err // The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer. // Otherwise, the caller is assumed to have set up the Dependencies object and a default one will // not be generated. -func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) error { +func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) error { // To help debugging, immediately log version glog.Infof("Version: %+v", version.Get()) if err := initForOS(s.KubeletFlags.WindowsService); err != nil { return fmt.Errorf("failed OS init: %v", err) } - if err := run(s, kubeDeps); err != nil { + if err := run(s, kubeDeps, stopCh); err != nil { return fmt.Errorf("failed to run Kubelet: %v", err) } return nil @@ -462,7 +463,7 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) } } -func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { +func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) (err error) { // Set global feature gates based on the value on the initial KubeletServer err = utilfeature.DefaultFeatureGate.SetFromMap(s.KubeletConfiguration.FeatureGates) if err != nil { @@ -717,7 +718,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { glog.Warning(err) } - if err := RunKubelet(&s.KubeletFlags, &s.KubeletConfiguration, kubeDeps, s.RunOnce); err != nil { + if err := RunKubelet(&s.KubeletFlags, &s.KubeletConfiguration, kubeDeps, s.RunOnce, stopCh); err != nil { return err } @@ -738,7 +739,13 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) { // If systemd is used, notify it that we have started go daemon.SdNotify(false, "READY=1") - <-done + select { + case <-done: + break + case <-stopCh: + break + } + return nil } @@ -877,7 +884,7 @@ func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config) // 2 Kubelet binary // 3 Standalone 'kubernetes' binary // Eventually, #2 will be replaced with instances of #3 -func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *kubelet.Dependencies, runOnce bool) error { +func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *kubelet.Dependencies, runOnce bool, stopCh <-chan struct{}) error { hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride) // Query the cloud provider for our node name, default to hostname if kubeDeps.Cloud == nil nodeName, err := getNodeName(kubeDeps.Cloud, hostname) @@ -950,7 +957,8 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. kubeFlags.KeepTerminatedPodVolumes, kubeFlags.NodeLabels, kubeFlags.SeccompProfileRoot, - kubeFlags.BootstrapCheckpointPath) + kubeFlags.BootstrapCheckpointPath, + stopCh) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) } @@ -1034,7 +1042,8 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, keepTerminatedPodVolumes bool, nodeLabels map[string]string, seccompProfileRoot string, - bootstrapCheckpointPath string) (k kubelet.Bootstrap, err error) { + bootstrapCheckpointPath string, + stopCh <-chan struct{}) (k kubelet.Bootstrap, err error) { // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations @@ -1067,7 +1076,8 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, keepTerminatedPodVolumes, nodeLabels, seccompProfileRoot, - bootstrapCheckpointPath) + bootstrapCheckpointPath, + stopCh) if err != nil { return nil, err } @@ -1130,7 +1140,7 @@ func BootstrapKubeletConfigController(dynamicConfigDir string, transform dynamic // RunDockershim only starts the dockershim in current process. This is only used for cri validate testing purpose // TODO(random-liu): Move this to a separate binary. -func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConfiguration) error { +func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConfiguration, stopCh <-chan struct{}) error { r := &f.ContainerRuntimeOptions // Initialize docker client configuration. @@ -1167,11 +1177,23 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf } glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(f.RemoteRuntimeEndpoint, ds) - if err := server.Start(); err != nil { + if err := server.Start(stopCh); err != nil { return err } + streamingServer := &http.Server{ + Addr: net.JoinHostPort(c.Address, strconv.Itoa(int(c.Port))), + Handler: ds, + } + + go func() { + <-stopCh + streamingServer.Shutdown(context.Background()) + }() + // Start the streaming server - addr := net.JoinHostPort(c.Address, strconv.Itoa(int(c.Port))) - return http.ListenAndServe(addr, ds) + if err := streamingServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + return err + } + return nil } diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go index 3f5b2937779..8f254adb414 100644 --- a/cmd/kubelet/kubelet.go +++ b/cmd/kubelet/kubelet.go @@ -26,6 +26,7 @@ import ( "os" "time" + "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/util/logs" "k8s.io/kubernetes/cmd/kubelet/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration @@ -35,7 +36,7 @@ import ( func main() { rand.Seed(time.Now().UTC().UnixNano()) - command := app.NewKubeletCommand() + command := app.NewKubeletCommand(server.SetupSignalHandler()) logs.InitLogs() defer logs.FlushLogs() diff --git a/pkg/kubelet/dockershim/remote/BUILD b/pkg/kubelet/dockershim/remote/BUILD index 4ac86b9d370..a40ce09eaae 100644 --- a/pkg/kubelet/dockershim/remote/BUILD +++ b/pkg/kubelet/dockershim/remote/BUILD @@ -13,7 +13,6 @@ go_library( "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/dockershim:go_default_library", "//pkg/kubelet/util:go_default_library", - "//pkg/util/interrupt:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", ], diff --git a/pkg/kubelet/dockershim/remote/docker_server.go b/pkg/kubelet/dockershim/remote/docker_server.go index 5e3fb14544a..1ac7560d41b 100644 --- a/pkg/kubelet/dockershim/remote/docker_server.go +++ b/pkg/kubelet/dockershim/remote/docker_server.go @@ -21,11 +21,9 @@ import ( "github.com/golang/glog" "google.golang.org/grpc" - runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/util" - "k8s.io/kubernetes/pkg/util/interrupt" ) // maxMsgSize use 8MB as the default message size limit. @@ -51,7 +49,7 @@ func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer { } // Start starts the dockershim grpc server. -func (s *DockerServer) Start() error { +func (s *DockerServer) Start(stopCh <-chan struct{}) error { // Start the internal service. if err := s.service.Start(); err != nil { glog.Errorf("Unable to start docker service") @@ -71,13 +69,14 @@ func (s *DockerServer) Start() error { runtimeapi.RegisterRuntimeServiceServer(s.server, s.service) runtimeapi.RegisterImageServiceServer(s.server, s.service) go func() { - // Use interrupt handler to make sure the server to be stopped properly. - h := interrupt.New(nil, s.Stop) - err := h.Run(func() error { return s.server.Serve(l) }) - if err != nil { + if err := s.server.Serve(l); err != nil { glog.Errorf("Failed to serve connections: %v", err) } }() + go func() { + <-stopCh + s.Stop() + }() return nil } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index e3d2df76fed..0229dcf155c 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -343,7 +343,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, keepTerminatedPodVolumes bool, nodeLabels map[string]string, seccompProfileRoot string, - bootstrapCheckpointPath string) (*Kubelet, error) { + bootstrapCheckpointPath string, + stopCh <-chan struct{}) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) } @@ -619,9 +620,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", remoteRuntimeEndpoint, remoteImageEndpoint) - glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds) - if err := server.Start(); err != nil { + if err := server.Start(stopCh); err != nil { return nil, err } diff --git a/pkg/kubemark/BUILD b/pkg/kubemark/BUILD index a11620af86f..b22cdbc47ca 100644 --- a/pkg/kubemark/BUILD +++ b/pkg/kubemark/BUILD @@ -46,6 +46,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index 8e79abed32c..10cde110577 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -20,6 +20,7 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" kubeletapp "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/cmd/kubelet/app/options" @@ -89,7 +90,7 @@ func NewHollowKubelet( // Starts this HollowKubelet and blocks. func (hk *HollowKubelet) Run() { - if err := kubeletapp.RunKubelet(hk.KubeletFlags, hk.KubeletConfiguration, hk.KubeletDeps, false); err != nil { + if err := kubeletapp.RunKubelet(hk.KubeletFlags, hk.KubeletConfiguration, hk.KubeletDeps, false, wait.NeverStop); err != nil { glog.Fatalf("Failed to run HollowKubelet: %v. Exiting.", err) } select {} From ca58578b240a40da487f366b0200698616aa2017 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Thu, 24 May 2018 20:34:27 +0100 Subject: [PATCH 180/307] Resurrect lost log line --- pkg/kubelet/kubelet.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 0229dcf155c..421c0c98bea 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -620,6 +620,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", remoteRuntimeEndpoint, remoteImageEndpoint) + glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds) if err := server.Start(stopCh); err != nil { return nil, err From 1f7671b18dbfad7a84835404ddbc9cdaa5ce09d8 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 24 May 2018 15:05:48 -0700 Subject: [PATCH 181/307] Pull gke-exec-auth-plugin binary on Nodes If the plugin URL is set and VM is not master, pull the plugin binary. --- cluster/gce/gci/configure.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index c950c045b82..d8bd2baebd1 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -264,6 +264,18 @@ runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock} EOF } +function install-exec-auth-plugin { + if [[ ! ${EXEC_AUTH_PLUGIN_URL:-} ]]; then + return + fi + local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}" + local -r plugin_sha1="${EXEC_AUTH_PLUGIN_SHA1}" + + echo "Downloading gke-exec-auth-plugin binary" + download-or-bust "${plugin_sha1}" "${plugin_url}" + mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}" +} + function install-kube-manifests { # Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/. local dst_dir="${KUBE_HOME}/kube-manifests" @@ -403,6 +415,10 @@ function install-kube-binary-config { # Install crictl on each node. install-crictl + if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then + install-exec-auth-plugin + fi + # Clean up. rm -rf "${KUBE_HOME}/kubernetes" rm -f "${KUBE_HOME}/${server_binary_tar}" From d8d2a4e84c892ab476d3f5fc4d0b5a10487237b7 Mon Sep 17 00:00:00 2001 From: Tim Wilfong Date: Thu, 24 May 2018 16:21:01 -0700 Subject: [PATCH 182/307] fix space-vs-tab indent on comment line --- pkg/cloudprovider/providers/aws/tags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/aws/tags.go b/pkg/cloudprovider/providers/aws/tags.go index 2a293e7271f..2daff4d4e4c 100644 --- a/pkg/cloudprovider/providers/aws/tags.go +++ b/pkg/cloudprovider/providers/aws/tags.go @@ -137,7 +137,7 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool { clusterTagKey := t.clusterTagKey() for _, tag := range tags { tagKey := aws.StringValue(tag.Key) - // Check if this is a newer-style cluster tag before checking if legacy tag value matches ClusterID + // Check if this is a newer-style cluster tag before checking if legacy tag value matches ClusterID if tagKey == clusterTagKey { return true } From 5139bb580ac3688640e570803c10b6c65b97141f Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Thu, 24 May 2018 17:53:07 -0700 Subject: [PATCH 183/307] Update nvidia-gpu-device-plugin to apps/v1 and use RollingUpdate updateStrategy. Even though RollingUpdate is the default updateStrategy, we need to specify it explicitly here because otherwise updating from extensions/v1beta1 to apps/v1 doesn't change the updateStrategy. --- cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml b/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml index f6e73cfdee5..7616b8e2589 100644 --- a/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml +++ b/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: nvidia-gpu-device-plugin @@ -7,6 +7,9 @@ metadata: k8s-app: nvidia-gpu-device-plugin addonmanager.kubernetes.io/mode: Reconcile spec: + selector: + matchLabels: + k8s-app: nvidia-gpu-device-plugin template: metadata: labels: @@ -52,3 +55,5 @@ spec: mountPath: /device-plugin - name: dev mountPath: /dev + updateStrategy: + type: RollingUpdate From fc1d9dbd181043003172adebcd23ea12e01f4b65 Mon Sep 17 00:00:00 2001 From: Tim Wilfong Date: Thu, 24 May 2018 19:23:12 -0700 Subject: [PATCH 184/307] Fix hasClusterTag to actually get behavior we want --- pkg/cloudprovider/providers/aws/tags.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/tags.go b/pkg/cloudprovider/providers/aws/tags.go index 2daff4d4e4c..43130c3601f 100644 --- a/pkg/cloudprovider/providers/aws/tags.go +++ b/pkg/cloudprovider/providers/aws/tags.go @@ -137,13 +137,13 @@ func (t *awsTagging) hasClusterTag(tags []*ec2.Tag) bool { clusterTagKey := t.clusterTagKey() for _, tag := range tags { tagKey := aws.StringValue(tag.Key) - // Check if this is a newer-style cluster tag before checking if legacy tag value matches ClusterID - if tagKey == clusterTagKey { + // For 1.6, we continue to recognize the legacy tags, for the 1.5 -> 1.6 upgrade + // Note that we want to continue traversing tag list if we see a legacy tag with value != ClusterID + if (tagKey == TagNameKubernetesClusterLegacy) && (aws.StringValue(tag.Value) == t.ClusterID) { return true } - // For 1.6, we continue to recognize the legacy tags, for the 1.5 -> 1.6 upgrade - if tagKey == TagNameKubernetesClusterLegacy { - return aws.StringValue(tag.Value) == t.ClusterID + if tagKey == clusterTagKey { + return true } } return false From 3bc8b09e78085da3ff5700fc6cb0ebb9e1878f4b Mon Sep 17 00:00:00 2001 From: Guoliang Wang Date: Tue, 22 May 2018 15:21:24 +0800 Subject: [PATCH 185/307] Move SuggestedPodTemplateResources from factory to set_resources --- pkg/kubectl/cmd/set/set_resources.go | 12 +++--------- pkg/kubectl/cmd/util/factory.go | 4 ---- pkg/kubectl/cmd/util/factory_client_access.go | 10 ---------- 3 files changed, 3 insertions(+), 23 deletions(-) diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 8a70ec56b4f..56707753217 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -18,11 +18,10 @@ package set import ( "fmt" - "strings" - - "github.com/spf13/cobra" "github.com/golang/glog" + "github.com/spf13/cobra" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -108,16 +107,11 @@ func NewResourcesOptions(streams genericclioptions.IOStreams) *SetResourcesOptio func NewCmdResources(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { o := NewResourcesOptions(streams) - resourceTypesWithPodTemplate := []string{} - for _, resource := range f.SuggestedPodTemplateResources() { - resourceTypesWithPodTemplate = append(resourceTypesWithPodTemplate, resource.Resource) - } - cmd := &cobra.Command{ Use: "resources (-f FILENAME | TYPE NAME) ([--limits=LIMITS & --requests=REQUESTS]", DisableFlagsInUseLine: true, Short: i18n.T("Update resource requests/limits on objects with pod templates"), - Long: fmt.Sprintf(resources_long, strings.Join(resourceTypesWithPodTemplate, ", ")), + Long: fmt.Sprintf(resources_long, cmdutil.SuggestApiResources("kubectl")), Example: resources_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index b89cdf070c2..d84132ab087 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -22,7 +22,6 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -75,9 +74,6 @@ type ClientAccessFactory interface { // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder - // SuggestedPodTemplateResources returns a list of resource types that declare a pod template - SuggestedPodTemplateResources() []schema.GroupResource - // Returns the default namespace to use in cases where no // other namespace is specified and whether the namespace was // overridden. diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index c31a1e107c8..320aec8ac35 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -116,16 +116,6 @@ func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { return restclient.RESTClientFor(clientConfig) } -func (f *ring0Factory) SuggestedPodTemplateResources() []schema.GroupResource { - return []schema.GroupResource{ - {Resource: "replicationcontroller"}, - {Resource: "deployment"}, - {Resource: "daemonset"}, - {Resource: "job"}, - {Resource: "replicaset"}, - } -} - func (f *ring0Factory) DefaultNamespace() (string, bool, error) { return f.clientGetter.ToRawKubeConfigLoader().Namespace() } From f2cb23ad6a14169f5e2a82c5d3b1bd6ebc1074a5 Mon Sep 17 00:00:00 2001 From: Guoliang Wang Date: Fri, 25 May 2018 13:59:05 +0800 Subject: [PATCH 186/307] HandleError include the type of the error object --- .../apiserver/pkg/endpoints/handlers/responsewriters/status.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go index e37a8ea4786..99673077b2b 100755 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go @@ -61,7 +61,7 @@ func ErrorToAPIStatus(err error) *metav1.Status { // by REST storage - these typically indicate programmer // error by not using pkg/api/errors, or unexpected failure // cases. - runtime.HandleError(fmt.Errorf("apiserver received an error that is not an metav1.Status: %v", err)) + runtime.HandleError(fmt.Errorf("apiserver received an error that is not an metav1.Status: %#+v", err)) return &metav1.Status{ TypeMeta: metav1.TypeMeta{ Kind: "Status", From 096dda3768b9ff9032b054eaaf86c7e69b1b6113 Mon Sep 17 00:00:00 2001 From: Silvery Fu Date: Thu, 24 May 2018 23:29:23 -0700 Subject: [PATCH 187/307] Rename and add unit test for ImageSizes --- .../algorithm/priorities/image_locality.go | 2 +- pkg/scheduler/schedulercache/node_info.go | 4 +- .../schedulercache/node_info_test.go | 40 +++++++++++++++++++ 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/image_locality.go b/pkg/scheduler/algorithm/priorities/image_locality.go index e7128331029..06e93c5e8f2 100644 --- a/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/pkg/scheduler/algorithm/priorities/image_locality.go @@ -72,7 +72,7 @@ func calculateScoreFromSize(sumSize int64) int { func totalImageSize(nodeInfo *schedulercache.NodeInfo, containers []v1.Container) int64 { var total int64 - imageSizes := nodeInfo.Images() + imageSizes := nodeInfo.ImageSizes() for _, container := range containers { if size, ok := imageSizes[container.Image]; ok { total += size diff --git a/pkg/scheduler/schedulercache/node_info.go b/pkg/scheduler/schedulercache/node_info.go index 2773437327f..60593c37bd5 100644 --- a/pkg/scheduler/schedulercache/node_info.go +++ b/pkg/scheduler/schedulercache/node_info.go @@ -293,8 +293,8 @@ func (n *NodeInfo) UsedPorts() util.HostPortInfo { return n.usedPorts } -// Images returns the image size information on this node. -func (n *NodeInfo) Images() map[string]int64 { +// ImageSizes returns the image size information on this node. +func (n *NodeInfo) ImageSizes() map[string]int64 { if n == nil { return nil } diff --git a/pkg/scheduler/schedulercache/node_info_test.go b/pkg/scheduler/schedulercache/node_info_test.go index 2444d6ce61f..34214d9ef00 100644 --- a/pkg/scheduler/schedulercache/node_info_test.go +++ b/pkg/scheduler/schedulercache/node_info_test.go @@ -235,6 +235,46 @@ func TestSetMaxResource(t *testing.T) { } } +func TestImageSizes(t *testing.T) { + ni := fakeNodeInfo() + ni.node = &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Status: v1.NodeStatus{ + Images: []v1.ContainerImage{ + { + Names: []string{ + "gcr.io/10", + "gcr.io/10:v1", + }, + SizeBytes: int64(10 * 1024 * 1024), + }, + { + Names: []string{ + "gcr.io/50", + "gcr.io/50:v1", + }, + SizeBytes: int64(50 * 1024 * 1024), + }, + }, + }, + } + + ni.updateImageSizes() + expected := map[string]int64{ + "gcr.io/10": 10 * 1024 * 1024, + "gcr.io/10:v1": 10 * 1024 * 1024, + "gcr.io/50": 50 * 1024 * 1024, + "gcr.io/50:v1": 50 * 1024 * 1024, + } + + imageSizes := ni.ImageSizes() + if !reflect.DeepEqual(expected, imageSizes) { + t.Errorf("expected: %#v, got: %#v", expected, imageSizes) + } +} + func TestNewNodeInfo(t *testing.T) { nodeName := "test-node" pods := []*v1.Pod{ From 83818ee0c8995d766a0d6c99aa75ac3a57484e9f Mon Sep 17 00:00:00 2001 From: Mayank Kumar Date: Thu, 24 May 2018 23:26:02 -0700 Subject: [PATCH 188/307] simplify else --- .../algorithm/priorities/node_affinity.go | 7 +++--- pkg/scheduler/schedulercache/node_info.go | 22 +++++++++---------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/pkg/scheduler/algorithm/priorities/node_affinity.go b/pkg/scheduler/algorithm/priorities/node_affinity.go index ca0a351650e..347f797d776 100644 --- a/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -37,12 +37,11 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } - var affinity *v1.Affinity + // default is the podspec. + affinity := pod.Spec.Affinity if priorityMeta, ok := meta.(*priorityMetadata); ok { + // We were able to parse metadata, use affinity from there. affinity = priorityMeta.affinity - } else { - // We couldn't parse metadata - fallback to the podspec. - affinity = pod.Spec.Affinity } var count int32 diff --git a/pkg/scheduler/schedulercache/node_info.go b/pkg/scheduler/schedulercache/node_info.go index 2773437327f..b5829cfca3d 100644 --- a/pkg/scheduler/schedulercache/node_info.go +++ b/pkg/scheduler/schedulercache/node_info.go @@ -603,18 +603,18 @@ func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod { } filtered := make([]*v1.Pod, 0, len(pods)) for _, p := range pods { - if p.Spec.NodeName == node.Name { - // If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo. - podKey, _ := getPodKey(p) - for _, np := range n.Pods() { - npodkey, _ := getPodKey(np) - if npodkey == podKey { - filtered = append(filtered, p) - break - } - } - } else { + if p.Spec.NodeName != node.Name { filtered = append(filtered, p) + continue + } + // If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo. + podKey, _ := getPodKey(p) + for _, np := range n.Pods() { + npodkey, _ := getPodKey(np) + if npodkey == podKey { + filtered = append(filtered, p) + break + } } } return filtered From d4714f5c379fee08e93ed72b6db4578cd65ce9cd Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 25 May 2018 02:11:02 -0700 Subject: [PATCH 189/307] Update feature warning for log rotation flags. Signed-off-by: Lantao Liu --- cmd/kubelet/app/options/options.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 4989b4ac828..82d319e21d1 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -539,8 +539,8 @@ func AddKubeletConfigFlags(mainfs *pflag.FlagSet, c *kubeletconfig.KubeletConfig fs.BoolVar(&c.MakeIPTablesUtilChains, "make-iptables-util-chains", c.MakeIPTablesUtilChains, "If true, kubelet will ensure iptables utility rules are present on host.") fs.Int32Var(&c.IPTablesMasqueradeBit, "iptables-masquerade-bit", c.IPTablesMasqueradeBit, "The bit of the fwmark space to mark packets for SNAT. Must be within the range [0, 31]. Please match this parameter with corresponding parameter in kube-proxy.") fs.Int32Var(&c.IPTablesDropBit, "iptables-drop-bit", c.IPTablesDropBit, "The bit of the fwmark space to mark packets for dropping. Must be within the range [0, 31].") - fs.StringVar(&c.ContainerLogMaxSize, "container-log-max-size", c.ContainerLogMaxSize, " Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with --container-runtime=remote.") - fs.Int32Var(&c.ContainerLogMaxFiles, "container-log-max-files", c.ContainerLogMaxFiles, " Set the maximum number of container log files that can be present for a container. The number must be >= 2. This flag can only be used with --container-runtime=remote.") + fs.StringVar(&c.ContainerLogMaxSize, "container-log-max-size", c.ContainerLogMaxSize, " Set the maximum size (e.g. 10Mi) of container log file before it is rotated. This flag can only be used with --container-runtime=remote.") + fs.Int32Var(&c.ContainerLogMaxFiles, "container-log-max-files", c.ContainerLogMaxFiles, " Set the maximum number of container log files that can be present for a container. The number must be >= 2. This flag can only be used with --container-runtime=remote.") // Flags intended for testing, not recommended used in production environments. fs.Int64Var(&c.MaxOpenFiles, "max-open-files", c.MaxOpenFiles, "Number of files that can be opened by Kubelet process.") From cc210a4505c3ef4fa8c8e1cf2e3cf468a631caef Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Fri, 25 May 2018 18:03:22 +0800 Subject: [PATCH 190/307] fix toleration validation invalid error --- pkg/apis/core/validation/validation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 6a01b7f6254..81fd33d183f 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -2749,7 +2749,7 @@ func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *fie // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. // string(core.TaintEffectNoScheduleNoAdmit), } - allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) + allErrors = append(allErrors, field.NotSupported(fldPath, *effect, validValues)) } return allErrors } From 5a099d70c9d2c68afc8f149b0119119d3a928c14 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Fri, 25 May 2018 14:02:59 +0200 Subject: [PATCH 191/307] Move Ceph server secret creation to common code. --- test/e2e/framework/volume_util.go | 25 ++++++++++- test/e2e/storage/volume_io.go | 32 +++----------- test/e2e/storage/volumes.go | 73 +++---------------------------- 3 files changed, 34 insertions(+), 96 deletions(-) diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 1f0a8e019f7..9eb5269ad62 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -186,7 +186,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest } // CephRBD-specific wrapper for CreateStorageServer. -func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) { +func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) { config = VolumeTestConfig{ Namespace: namespace, Prefix: "rbd", @@ -205,7 +205,28 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo Logf("sleeping a bit to give ceph server time to initialize") time.Sleep(VolumeServerPodStartupSleep) - return config, pod, ip + // create secrets for the server + secret = &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-secret", + }, + Data: map[string][]byte{ + // from test/images/volumes-tester/rbd/keyring + "key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="), + }, + Type: "kubernetes.io/rbd", + } + + secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret) + if err != nil { + Failf("Failed to create secrets for Ceph RBD: %v", err) + } + + return config, pod, secret, ip } // Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer diff --git a/test/e2e/storage/volume_io.go b/test/e2e/storage/volume_io.go index 04c803b78cd..9ed5e245fff 100644 --- a/test/e2e/storage/volume_io.go +++ b/test/e2e/storage/volume_io.go @@ -385,33 +385,11 @@ var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() { Describe("Ceph-RBD [Feature:Volumes]", func() { var ( secret *v1.Secret - name string ) testFile := "ceph-rbd_io_test" BeforeEach(func() { - config, serverPod, serverIP = framework.NewRBDServer(cs, ns) - name = config.Prefix + "-server" - - // create server secret - secret = &v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Data: map[string][]byte{ - // from test/images/volumes-tester/rbd/keyring - "key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="), - }, - Type: "kubernetes.io/rbd", - } - var err error - secret, err = cs.CoreV1().Secrets(ns).Create(secret) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("BeforeEach: failed to create secret %q for Ceph-RBD: %v", name, err)) - + config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns) volSource = v1.VolumeSource{ RBD: &v1.RBDVolumeSource{ CephMonitors: []string{serverIP}, @@ -419,7 +397,7 @@ var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() { RBDImage: "foo", RadosUser: "admin", SecretRef: &v1.LocalObjectReference{ - Name: name, + Name: secret.Name, }, FSType: "ext2", ReadOnly: false, @@ -428,13 +406,13 @@ var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() { }) AfterEach(func() { - framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", name) - secErr := cs.CoreV1().Secrets(ns).Delete(name, &metav1.DeleteOptions{}) + framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name) + secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{}) framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name) err := framework.DeletePodWithWait(f, cs, serverPod) if secErr != nil || err != nil { if secErr != nil { - framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", err) + framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr) } if err != nil { framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err) diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 3be74537258..0a4a2e944a8 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -57,7 +57,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" - imageutils "k8s.io/kubernetes/test/utils/image" ) func DeleteCinderVolume(name string) error { @@ -200,34 +199,9 @@ var _ = utils.SIGDescribe("Volumes", func() { Describe("Ceph RBD [Feature:Volumes]", func() { It("should be mountable", func() { - config, _, serverIP := framework.NewRBDServer(cs, namespace.Name) + config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name) defer framework.VolumeTestCleanup(f, config) - - // create secrets for the server - secret := v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-secret", - }, - Data: map[string][]byte{ - // from test/images/volumes-tester/rbd/keyring - "key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="), - }, - Type: "kubernetes.io/rbd", - } - - secClient := cs.CoreV1().Secrets(config.Namespace) - - defer func() { - secClient.Delete(config.Prefix+"-secret", nil) - }() - - if _, err := secClient.Create(&secret); err != nil { - framework.Failf("Failed to create secrets for Ceph RBD: %v", err) - } + defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil) tests := []framework.VolumeTest{ { @@ -238,7 +212,7 @@ var _ = utils.SIGDescribe("Volumes", func() { RBDImage: "foo", RadosUser: "admin", SecretRef: &v1.LocalObjectReference{ - Name: config.Prefix + "-secret", + Name: secret.Name, }, FSType: "ext2", }, @@ -258,44 +232,9 @@ var _ = utils.SIGDescribe("Volumes", func() { //////////////////////////////////////////////////////////////////////// Describe("CephFS [Feature:Volumes]", func() { It("should be mountable", func() { - config := framework.VolumeTestConfig{ - Namespace: namespace.Name, - Prefix: "cephfs", - ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer), - ServerPorts: []int{6789}, - } - + config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name) defer framework.VolumeTestCleanup(f, config) - _, serverIP := framework.CreateStorageServer(cs, config) - By("sleeping a bit to give ceph server time to initialize") - time.Sleep(framework.VolumeServerPodStartupSleep) - - // create ceph secret - secret := &v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-secret", - }, - // from test/images/volumes-tester/rbd/keyring - Data: map[string][]byte{ - "key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="), - }, - Type: "kubernetes.io/cephfs", - } - - defer func() { - if err := cs.CoreV1().Secrets(namespace.Name).Delete(secret.Name, nil); err != nil { - framework.Failf("unable to delete secret %v: %v", secret.Name, err) - } - }() - - var err error - if secret, err = cs.CoreV1().Secrets(namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) - } + defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil) tests := []framework.VolumeTest{ { @@ -303,7 +242,7 @@ var _ = utils.SIGDescribe("Volumes", func() { CephFS: &v1.CephFSVolumeSource{ Monitors: []string{serverIP + ":6789"}, User: "kube", - SecretRef: &v1.LocalObjectReference{Name: config.Prefix + "-secret"}, + SecretRef: &v1.LocalObjectReference{Name: secret.Name}, ReadOnly: true, }, }, From f363f549c0a5b796bab603ade62471e0ed607bdd Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Thu, 24 May 2018 16:47:38 +0200 Subject: [PATCH 192/307] Measure scheduler throughput in density test --- test/e2e/framework/metrics_util.go | 31 +++++++++++++++-------------- test/e2e/scalability/density.go | 32 ++++++++++++++++++++---------- test/utils/runners.go | 5 +++++ 3 files changed, 43 insertions(+), 25 deletions(-) diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index 3b6b95c434c..f9c53706461 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -206,21 +206,22 @@ func (l *PodStartupLatency) PrintJSON() string { return PrettyPrintJSON(PodStartupLatencyToPerfData(l)) } -type SchedulingLatency struct { - Scheduling LatencyMetric `json:"scheduling"` - Binding LatencyMetric `json:"binding"` - Total LatencyMetric `json:"total"` +type SchedulingMetrics struct { + SchedulingLatency LatencyMetric `json:"schedulingLatency"` + BindingLatency LatencyMetric `json:"bindingLatency"` + E2ELatency LatencyMetric `json:"e2eLatency"` + ThroughputSamples []float64 `json:"throughputSamples"` } -func (l *SchedulingLatency) SummaryKind() string { - return "SchedulingLatency" +func (l *SchedulingMetrics) SummaryKind() string { + return "SchedulingMetrics" } -func (l *SchedulingLatency) PrintHumanReadable() string { +func (l *SchedulingMetrics) PrintHumanReadable() string { return PrettyPrintJSON(l) } -func (l *SchedulingLatency) PrintJSON() string { +func (l *SchedulingMetrics) PrintJSON() string { return PrettyPrintJSON(l) } @@ -438,9 +439,9 @@ func getMetrics(c clientset.Interface) (string, error) { return string(body), nil } -// Retrieves scheduler metrics information. -func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) { - result := SchedulingLatency{} +// Retrieves scheduler latency metrics. +func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) { + result := SchedulingMetrics{} // Check if master Node is registered nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) @@ -491,11 +492,11 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) { var metric *LatencyMetric = nil switch sample.Metric[model.MetricNameLabel] { case "scheduler_scheduling_algorithm_latency_microseconds": - metric = &result.Scheduling + metric = &result.SchedulingLatency case "scheduler_binding_latency_microseconds": - metric = &result.Binding + metric = &result.BindingLatency case "scheduler_e2e_scheduling_latency_microseconds": - metric = &result.Total + metric = &result.E2ELatency } if metric == nil { continue @@ -512,7 +513,7 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) { } // Verifies (currently just by logging them) the scheduling latencies. -func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) { +func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) { latency, err := getSchedulingLatency(c) if err != nil { return nil, err diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 3476d071512..490a43a1f1a 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -224,32 +224,42 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC return constraints } -func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) { +func logPodStartupStatus( + c clientset.Interface, + expectedPods int, + observedLabels map[string]string, + period time.Duration, + scheduleThroughputs []float64, + stopCh chan struct{}) { + label := labels.SelectorFromSet(labels.Set(observedLabels)) podStore, err := testutils.NewPodStore(c, metav1.NamespaceAll, label, fields.Everything()) framework.ExpectNoError(err) defer podStore.Stop() ticker := time.NewTicker(period) + startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods) + lastScheduledCount := startupStatus.Scheduled defer ticker.Stop() for { select { case <-ticker.C: - pods := podStore.List() - startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods) - framework.Logf(startupStatus.String("Density")) case <-stopCh: - pods := podStore.List() - startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods) - framework.Logf(startupStatus.String("Density")) return } + // Log status of the pods. + startupStatus := testutils.ComputeRCStartupStatus(podStore.List(), expectedPods) + framework.Logf(startupStatus.String("Density")) + // Compute scheduling throughput for the latest time period. + throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second) + scheduleThroughputs = append(scheduleThroughputs, throughput) + lastScheduledCount = startupStatus.Scheduled } } // runDensityTest will perform a density test and return the time it took for // all pods to start -func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer) time.Duration { +func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs []float64) time.Duration { defer GinkgoRecover() // Create all secrets, configmaps and daemons. @@ -274,7 +284,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi }() } logStopCh := make(chan struct{}) - go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh) + go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, scheduleThroughputs, logStopCh) wg.Wait() startupTime := time.Since(startTime) close(logStopCh) @@ -355,6 +365,7 @@ var _ = SIGDescribe("Density", func() { var nodeCpuCapacity int64 var nodeMemCapacity int64 var nodes *v1.NodeList + var scheduleThroughputs []float64 testCaseBaseName := "density" missingMeasurements := 0 @@ -397,6 +408,7 @@ var _ = SIGDescribe("Density", func() { latency, err := framework.VerifySchedulerLatency(c) framework.ExpectNoError(err) if err == nil { + latency.ThroughputSamples = scheduleThroughputs summaries = append(summaries, latency) } summaries = append(summaries, testPhaseDurations) @@ -643,7 +655,7 @@ var _ = SIGDescribe("Density", func() { LogFunc: framework.Logf, }) } - e2eStartupTime = runDensityTest(dConfig, testPhaseDurations) + e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, scheduleThroughputs) if itArg.runLatencyTest { By("Scheduling additional Pods to measure startup latencies") diff --git a/test/utils/runners.go b/test/utils/runners.go index 0a614abb62e..aef0523297c 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -655,6 +655,7 @@ type RCStartupStatus struct { RunningButNotReady int Waiting int Pending int + Scheduled int Unknown int Inactive int FailedContainers int @@ -708,6 +709,10 @@ func ComputeRCStartupStatus(pods []*v1.Pod, expected int) RCStartupStatus { } else if p.Status.Phase == v1.PodUnknown { startupStatus.Unknown++ } + // Record count of scheduled pods (useful for computing scheduler throughput). + if p.Spec.NodeName != "" { + startupStatus.Scheduled++ + } } return startupStatus } From 2a9d491fb8de037003e9c2bd1f5f76eff0eb31e9 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 25 May 2018 09:05:38 -0400 Subject: [PATCH 193/307] Revert "Change default min-startup-pods value" This reverts commit de0bf05f4637f9f40c0648ef8b341b66726ee46f. --- test/e2e/framework/test_context.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index e3158e8ca4f..b07d02aa282 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -266,7 +266,7 @@ func RegisterClusterFlags() { flag.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.") flag.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.") - flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 8, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.") + flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.") flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.") flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.") flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.") From 7b5f3a1dc58f9accc01c7e90ec5b8373334cff7f Mon Sep 17 00:00:00 2001 From: Daniel Gonzalez Date: Thu, 24 May 2018 14:19:20 +0200 Subject: [PATCH 194/307] Ensure that only IPs are used as node addresses in OpenStack LBs --- .../openstack/openstack_loadbalancer.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index e38d2d157bd..72bccb7e35a 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -487,21 +487,27 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s // The LB needs to be configured with instance addresses on the same // subnet as the LB (aka opts.SubnetID). Currently we're just -// guessing that the node's InternalIP is the right address - and that -// should be sufficient for all "normal" cases. +// guessing that the node's InternalIP is the right address. +// In case no InternalIP can be found, ExternalIP is tried. +// If neither InternalIP nor ExternalIP can be found an error is +// returned. func nodeAddressForLB(node *v1.Node) (string, error) { addrs := node.Status.Addresses if len(addrs) == 0 { return "", ErrNoAddressFound } - for _, addr := range addrs { - if addr.Type == v1.NodeInternalIP { - return addr.Address, nil + allowedAddrTypes := []v1.NodeAddressType{v1.NodeInternalIP, v1.NodeExternalIP} + + for _, allowedAddrType := range allowedAddrTypes { + for _, addr := range addrs { + if addr.Type == allowedAddrType { + return addr.Address, nil + } } } - return addrs[0].Address, nil + return "", ErrNoAddressFound } //getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting From 28b6f34107ceffb9b71e600f238100d563eb12b5 Mon Sep 17 00:00:00 2001 From: Yecheng Fu Date: Sat, 26 May 2018 00:09:25 +0800 Subject: [PATCH 195/307] Should use `hostProcMountinfoPath` constant in nsenter_mount.go. --- pkg/util/mount/nsenter_mount.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index 9b0464c329a..a5ca17ff5f1 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -333,7 +333,7 @@ func (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) { if err != nil { return nil, err } - return searchMountPoints(hostpath, procMountInfoPath) + return searchMountPoints(hostpath, hostProcMountinfoPath) } func (mounter *NsenterMounter) GetFSGroup(pathname string) (int64, error) { @@ -345,5 +345,5 @@ func (mounter *NsenterMounter) GetFSGroup(pathname string) (int64, error) { } func (mounter *NsenterMounter) GetSELinuxSupport(pathname string) (bool, error) { - return getSELinuxSupport(pathname, procMountInfoPath) + return getSELinuxSupport(pathname, hostProcMountsPath) } From d38afb136793bc719281797016a0cccd4d0efaa1 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Fri, 25 May 2018 19:19:12 +0300 Subject: [PATCH 196/307] remove CrictlChecker from preflight checks CrictlChecker uses InPathCheck to check if crictl presents in the PATH. The same check is done in CRICheck function. CrictlChecker is also called unconditionally, producing messages that can confuse users. CRICheck is called only when needed, i.e. when user specifies CRI socket. --- cmd/kubeadm/app/preflight/checks.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 8b3c5cb6a98..3aa67e7bfb3 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -959,15 +959,6 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura // addCommonChecks is a helper function to deplicate checks that are common between both the // kubeadm init and join commands func addCommonChecks(execer utilsexec.Interface, cfg kubeadmapi.CommonConfiguration, checks []Checker) []Checker { - // check if we can use crictl to perform checks via the CRI - glog.V(1).Infoln("checking if we can use crictl to perform checks via the CRI") - criCtlChecker := InPathCheck{ - executable: "crictl", - mandatory: false, - exec: execer, - suggestion: fmt.Sprintf("go get %v", kubeadmconstants.CRICtlPackage), - } - // Check whether or not the CRI socket defined is the default if cfg.GetCRISocket() != kubeadmdefaults.DefaultCRISocket { checks = append(checks, CRICheck{socket: cfg.GetCRISocket(), exec: execer}) @@ -990,7 +981,6 @@ func addCommonChecks(execer utilsexec.Interface, cfg kubeadmapi.CommonConfigurat InPathCheck{executable: "socat", mandatory: false, exec: execer}, InPathCheck{executable: "tc", mandatory: false, exec: execer}, InPathCheck{executable: "touch", mandatory: false, exec: execer}, - criCtlChecker, ResolveCheck{}) } checks = append(checks, From 4c3fa4f9baa79ab08a4d39e5d40fb66617af111d Mon Sep 17 00:00:00 2001 From: andrewsykim Date: Fri, 25 May 2018 12:37:56 -0400 Subject: [PATCH 197/307] disable PersistentVolumeLabel admission controller by default --- pkg/kubeapiserver/options/plugins.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/kubeapiserver/options/plugins.go b/pkg/kubeapiserver/options/plugins.go index a17c3584aeb..35f1763027a 100644 --- a/pkg/kubeapiserver/options/plugins.go +++ b/pkg/kubeapiserver/options/plugins.go @@ -131,7 +131,6 @@ func DefaultOffAdmissionPlugins() sets.String { lifecycle.PluginName, //NamespaceLifecycle limitranger.PluginName, //LimitRanger serviceaccount.PluginName, //ServiceAccount - label.PluginName, //PersistentVolumeLabel setdefault.PluginName, //DefaultStorageClass defaulttolerationseconds.PluginName, //DefaultTolerationSeconds mutatingwebhook.PluginName, //MutatingAdmissionWebhook From 25436cdc6a86b9fed05aa100980a4b51243f2e5a Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Fri, 25 May 2018 18:55:35 +0300 Subject: [PATCH 198/307] fix parsing 'crictl pods -q' output Output of crictl pods -q is a list of running pod ids, one id per line. Current code splits this output incorrectly which makes next command 'crictl stopp' fail if there is more than one pod running. Should be fixed by using strings.Fields instead of strings.Split. --- cmd/kubeadm/app/cmd/reset.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index 81d2cc83cc0..1c6a84aa100 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -215,7 +215,7 @@ func resetWithCrictl(execer utilsexec.Interface, dockerCheck preflight.Checker, resetWithDocker(execer, dockerCheck) return } - sandboxes := strings.Split(string(output), " ") + sandboxes := strings.Fields(string(output)) glog.V(1).Infoln("[reset] Stopping and removing running containers using crictl") for _, s := range sandboxes { if strings.TrimSpace(s) == "" { From 2f2de31d3d76ea8659a96771eaababdd091c83c2 Mon Sep 17 00:00:00 2001 From: Chuck Ha Date: Mon, 21 May 2018 13:12:07 -0400 Subject: [PATCH 199/307] Prepulls images by default kubeadm now pulls container images before the init step if it cannot find them on the system * This commit also cleans up a dependency cycle Closes #825 --- cmd/kubeadm/app/cmd/config.go | 2 +- cmd/kubeadm/app/cmd/init.go | 3 + cmd/kubeadm/app/images/BUILD | 8 +- cmd/kubeadm/app/images/interface.go | 89 ++++++++ cmd/kubeadm/app/images/interface_test.go | 266 +++++++++++++++++++++++ cmd/kubeadm/app/images/puller.go | 57 ----- cmd/kubeadm/app/images/puller_test.go | 138 ------------ cmd/kubeadm/app/preflight/BUILD | 1 + cmd/kubeadm/app/preflight/checks.go | 44 ++++ cmd/kubeadm/app/preflight/checks_test.go | 30 +++ cmd/kubeadm/app/util/BUILD | 2 - cmd/kubeadm/app/util/error.go | 9 +- cmd/kubeadm/app/util/error_test.go | 8 +- 13 files changed, 450 insertions(+), 207 deletions(-) create mode 100644 cmd/kubeadm/app/images/interface.go create mode 100644 cmd/kubeadm/app/images/interface_test.go delete mode 100644 cmd/kubeadm/app/images/puller.go delete mode 100644 cmd/kubeadm/app/images/puller_test.go diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index a06cc315d37..62f89e92788 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -301,7 +301,7 @@ func NewCmdConfigImagesPull() *cobra.Command { kubeadmutil.CheckErr(err) internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, cfg) kubeadmutil.CheckErr(err) - puller, err := images.NewImagePuller(utilsexec.New(), internalcfg.GetCRISocket()) + puller, err := images.NewCRInterfacer(utilsexec.New(), internalcfg.GetCRISocket()) kubeadmutil.CheckErr(err) imagesPull := NewImagesPull(puller, images.GetAllImages(internalcfg)) kubeadmutil.CheckErr(imagesPull.PullAll()) diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index e763d8fd7c0..50d3beaa62a 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -258,6 +258,9 @@ func NewInit(cfgPath string, externalcfg *kubeadmapiv1alpha2.MasterConfiguration if err := preflight.RunInitMasterChecks(utilsexec.New(), cfg, ignorePreflightErrors); err != nil { return nil, err } + if err := preflight.RunPullImagesCheck(utilsexec.New(), cfg, ignorePreflightErrors); err != nil { + return nil, err + } return &Init{cfg: cfg, skipTokenPrint: skipTokenPrint, dryRun: dryRun, ignorePreflightErrors: ignorePreflightErrors}, nil } diff --git a/cmd/kubeadm/app/images/BUILD b/cmd/kubeadm/app/images/BUILD index 27e970e71d4..86aa6e0781e 100644 --- a/cmd/kubeadm/app/images/BUILD +++ b/cmd/kubeadm/app/images/BUILD @@ -10,12 +10,12 @@ go_library( name = "go_default_library", srcs = [ "images.go", - "puller.go", + "interface.go", ], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/images", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", + "//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/features:go_default_library", "//cmd/kubeadm/app/phases/addons/dns:go_default_library", @@ -46,10 +46,10 @@ filegroup( go_test( name = "go_default_xtest", - srcs = ["puller_test.go"], + srcs = ["interface_test.go"], deps = [ ":go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", + "//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kubeadm/app/images/interface.go b/cmd/kubeadm/app/images/interface.go new file mode 100644 index 00000000000..c9bd6b16fa8 --- /dev/null +++ b/cmd/kubeadm/app/images/interface.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package images + +import ( + "fmt" + + kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" + utilsexec "k8s.io/utils/exec" +) + +// Puller is an interface for pulling images +type Puller interface { + Pull(string) error +} + +// Existence is an interface to determine if an image exists on the system +// A nil error means the image was found +type Existence interface { + Exists(string) error +} + +// Images defines the set of behaviors needed for images relating to the CRI +type Images interface { + Puller + Existence +} + +// CRInterfacer is a struct that interfaces with the container runtime +type CRInterfacer struct { + criSocket string + exec utilsexec.Interface + crictlPath string + dockerPath string +} + +// NewCRInterfacer sets up and returns a CRInterfacer +func NewCRInterfacer(execer utilsexec.Interface, criSocket string) (*CRInterfacer, error) { + var crictlPath, dockerPath string + var err error + if criSocket != kubeadmapiv1alpha2.DefaultCRISocket { + if crictlPath, err = execer.LookPath("crictl"); err != nil { + return nil, fmt.Errorf("crictl is required for non docker container runtimes: %v", err) + } + } else { + // use the dockershim + if dockerPath, err = execer.LookPath("docker"); err != nil { + return nil, fmt.Errorf("`docker` is required when docker is the container runtime and the kubelet is not running: %v", err) + } + } + + return &CRInterfacer{ + exec: execer, + criSocket: criSocket, + crictlPath: crictlPath, + dockerPath: dockerPath, + }, nil +} + +// Pull pulls the actual image using either crictl or docker +func (cri *CRInterfacer) Pull(image string) error { + if cri.criSocket != kubeadmapiv1alpha2.DefaultCRISocket { + return cri.exec.Command(cri.crictlPath, "-r", cri.criSocket, "pull", image).Run() + } + return cri.exec.Command(cri.dockerPath, "pull", image).Run() +} + +// Exists checks to see if the image exists on the system already +// Returns an error if the image is not found. +func (cri *CRInterfacer) Exists(image string) error { + if cri.criSocket != kubeadmapiv1alpha2.DefaultCRISocket { + return cri.exec.Command(cri.crictlPath, "-r", cri.criSocket, "inspecti", image).Run() + } + return cri.exec.Command(cri.dockerPath, "inspect", image).Run() +} diff --git a/cmd/kubeadm/app/images/interface_test.go b/cmd/kubeadm/app/images/interface_test.go new file mode 100644 index 00000000000..54e176aaf6a --- /dev/null +++ b/cmd/kubeadm/app/images/interface_test.go @@ -0,0 +1,266 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package images_test + +import ( + "context" + "errors" + "io" + "testing" + + kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" + "k8s.io/kubernetes/cmd/kubeadm/app/images" + "k8s.io/utils/exec" +) + +type fakeCmd struct { + err error +} + +func (f *fakeCmd) Run() error { + return f.err +} +func (f *fakeCmd) CombinedOutput() ([]byte, error) { return nil, nil } +func (f *fakeCmd) Output() ([]byte, error) { return nil, nil } +func (f *fakeCmd) SetDir(dir string) {} +func (f *fakeCmd) SetStdin(in io.Reader) {} +func (f *fakeCmd) SetStdout(out io.Writer) {} +func (f *fakeCmd) SetStderr(out io.Writer) {} +func (f *fakeCmd) Stop() {} + +type fakeExecer struct { + cmd exec.Cmd + findCrictl bool + findDocker bool +} + +func (f *fakeExecer) Command(cmd string, args ...string) exec.Cmd { return f.cmd } +func (f *fakeExecer) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { + return f.cmd +} +func (f *fakeExecer) LookPath(file string) (string, error) { + if file == "crictl" { + if f.findCrictl { + return "/path", nil + } + return "", errors.New("no crictl for you") + } + if file == "docker" { + if f.findDocker { + return "/path", nil + } + return "", errors.New("no docker for you") + } + return "", errors.New("unknown binary") +} + +func TestNewCRInterfacer(t *testing.T) { + testcases := []struct { + name string + criSocket string + findCrictl bool + findDocker bool + expectError bool + }{ + { + name: "need crictl but can only find docker should return an error", + criSocket: "/not/docker", + findCrictl: false, + findDocker: true, + expectError: true, + }, + { + name: "need crictl and cannot find either should return an error", + criSocket: "/not/docker", + findCrictl: false, + findDocker: false, + expectError: true, + }, + { + name: "need crictl and cannot find docker should return no error", + criSocket: "/not/docker", + findCrictl: true, + findDocker: false, + expectError: false, + }, + { + name: "need crictl and can find both should return no error", + criSocket: "/not/docker", + findCrictl: true, + findDocker: true, + expectError: false, + }, + { + name: "need docker and cannot find crictl should return no error", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + findCrictl: false, + findDocker: true, + expectError: false, + }, + { + name: "need docker and cannot find docker should return an error", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + findCrictl: false, + findDocker: false, + expectError: true, + }, + { + name: "need docker and can find both should return no error", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + findCrictl: true, + findDocker: true, + expectError: false, + }, + { + name: "need docker and can only find crictl should return an error", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + findCrictl: true, + findDocker: false, + expectError: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + fe := &fakeExecer{ + findCrictl: tc.findCrictl, + findDocker: tc.findDocker, + } + _, err := images.NewCRInterfacer(fe, tc.criSocket) + if tc.expectError && err == nil { + t.Fatal("expected an error but did not get one") + } + if !tc.expectError && err != nil { + t.Fatalf("did not expedt an error but got an error: %v", err) + } + }) + } +} + +func TestImagePuller(t *testing.T) { + testcases := []struct { + name string + criSocket string + pullFails bool + errorExpected bool + }{ + { + name: "using docker and pull fails", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + pullFails: true, + errorExpected: true, + }, + { + name: "using docker and pull succeeds", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + pullFails: false, + errorExpected: false, + }, + { + name: "using crictl pull fails", + criSocket: "/not/default", + pullFails: true, + errorExpected: true, + }, + { + name: "using crictl and pull succeeds", + criSocket: "/not/default", + pullFails: false, + errorExpected: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var err error + if tc.pullFails { + err = errors.New("error") + } + + fe := &fakeExecer{ + cmd: &fakeCmd{err}, + findCrictl: true, + findDocker: true, + } + ip, _ := images.NewCRInterfacer(fe, tc.criSocket) + + err = ip.Pull("imageName") + if tc.errorExpected && err == nil { + t.Fatal("expected an error and did not get one") + } + if !tc.errorExpected && err != nil { + t.Fatalf("expected no error but got one: %v", err) + } + }) + } +} + +func TestImageExists(t *testing.T) { + testcases := []struct { + name string + criSocket string + existFails bool + errorExpected bool + }{ + { + name: "using docker and exist fails", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + existFails: true, + errorExpected: true, + }, + { + name: "using docker and exist succeeds", + criSocket: kubeadmapiv1alpha2.DefaultCRISocket, + existFails: false, + errorExpected: false, + }, + { + name: "using crictl exist fails", + criSocket: "/not/default", + existFails: true, + errorExpected: true, + }, + { + name: "using crictl and exist succeeds", + criSocket: "/not/default", + existFails: false, + errorExpected: false, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var err error + if tc.existFails { + err = errors.New("error") + } + + fe := &fakeExecer{ + cmd: &fakeCmd{err}, + findCrictl: true, + findDocker: true, + } + ip, _ := images.NewCRInterfacer(fe, tc.criSocket) + + err = ip.Exists("imageName") + if tc.errorExpected && err == nil { + t.Fatal("expected an error and did not get one") + } + if !tc.errorExpected && err != nil { + t.Fatalf("expected no error but got one: %v", err) + } + }) + } +} diff --git a/cmd/kubeadm/app/images/puller.go b/cmd/kubeadm/app/images/puller.go deleted file mode 100644 index 71db11e481f..00000000000 --- a/cmd/kubeadm/app/images/puller.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package images - -import ( - "fmt" - - kubeadmapiv1alpha1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" - utilsexec "k8s.io/utils/exec" -) - -// Puller is an interface for pulling images -type Puller interface { - Pull(string) error -} - -// ImagePuller is a struct that can pull images and hides the implementation (crictl vs docker) -type ImagePuller struct { - criSocket string - exec utilsexec.Interface - crictlPath string -} - -// NewImagePuller returns a ready to go ImagePuller -func NewImagePuller(execer utilsexec.Interface, criSocket string) (*ImagePuller, error) { - crictlPath, err := execer.LookPath("crictl") - if err != nil && criSocket != kubeadmapiv1alpha1.DefaultCRISocket { - return nil, fmt.Errorf("crictl is required for non docker container runtimes: %v", err) - } - return &ImagePuller{ - exec: execer, - criSocket: criSocket, - crictlPath: crictlPath, - }, nil -} - -// Pull pulls the actual image using either crictl or docker -func (ip *ImagePuller) Pull(image string) error { - if ip.criSocket != kubeadmapiv1alpha1.DefaultCRISocket { - return ip.exec.Command(ip.crictlPath, "-r", ip.criSocket, "pull", image).Run() - } - return ip.exec.Command("sh", "-c", fmt.Sprintf("docker pull %v", image)).Run() -} diff --git a/cmd/kubeadm/app/images/puller_test.go b/cmd/kubeadm/app/images/puller_test.go deleted file mode 100644 index 6a27ec03276..00000000000 --- a/cmd/kubeadm/app/images/puller_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package images_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "strings" - "testing" - - kubeadmdefaults "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" - "k8s.io/kubernetes/cmd/kubeadm/app/images" - "k8s.io/utils/exec" -) - -type fakeCmd struct { - cmd string - args []string - out io.Writer -} - -func (f *fakeCmd) Run() error { - fmt.Fprintf(f.out, "%v %v", f.cmd, strings.Join(f.args, " ")) - return nil -} -func (f *fakeCmd) CombinedOutput() ([]byte, error) { return nil, nil } -func (f *fakeCmd) Output() ([]byte, error) { return nil, nil } -func (f *fakeCmd) SetDir(dir string) {} -func (f *fakeCmd) SetStdin(in io.Reader) {} -func (f *fakeCmd) SetStdout(out io.Writer) { - f.out = out -} -func (f *fakeCmd) SetStderr(out io.Writer) {} -func (f *fakeCmd) Stop() {} - -type fakeExecer struct { - cmd exec.Cmd - lookPathSucceeds bool -} - -func (f *fakeExecer) Command(cmd string, args ...string) exec.Cmd { return f.cmd } -func (f *fakeExecer) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { - return f.cmd -} -func (f *fakeExecer) LookPath(file string) (string, error) { - if f.lookPathSucceeds { - return file, nil - } - return "", &os.PathError{Err: errors.New("does not exist")} -} - -func TestImagePuller(t *testing.T) { - testcases := []struct { - name string - criSocket string - cmd exec.Cmd - findCrictl bool - expected string - errorExpected bool - }{ - { - name: "New succeeds even if crictl is not in path", - criSocket: kubeadmdefaults.DefaultCRISocket, - cmd: &fakeCmd{ - cmd: "hello", - args: []string{"world", "and", "friends"}, - }, - findCrictl: false, - expected: "hello world and friends", - }, - { - name: "New succeeds with crictl in path", - criSocket: "/not/default", - cmd: &fakeCmd{ - cmd: "crictl", - args: []string{"-r", "/some/socket", "imagename"}, - }, - findCrictl: true, - expected: "crictl -r /some/socket imagename", - }, - { - name: "New fails with crictl not in path but is required", - criSocket: "/not/docker", - cmd: &fakeCmd{ - cmd: "crictl", - args: []string{"-r", "/not/docker", "an image"}, - }, - findCrictl: false, - errorExpected: true, - }, - } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - var b bytes.Buffer - tc.cmd.SetStdout(&b) - fe := &fakeExecer{ - cmd: tc.cmd, - lookPathSucceeds: tc.findCrictl, - } - ip, err := images.NewImagePuller(fe, tc.criSocket) - - if tc.errorExpected { - if err == nil { - t.Fatalf("expected an error but found nil: %v", fe) - } - return - } - - if err != nil { - t.Fatalf("expected nil but found an error: %v", err) - } - if err = ip.Pull("imageName"); err != nil { - t.Fatalf("expected nil pulling an image but found: %v", err) - } - if b.String() != tc.expected { - t.Fatalf("expected %v but got: %v", tc.expected, b.String()) - } - }) - } -} diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index d6b0d412cdf..a7a26a04e9e 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -52,6 +52,7 @@ go_library( "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", + "//cmd/kubeadm/app/images:go_default_library", "//pkg/apis/core/validation:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/initsystem:go_default_library", diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 8b3c5cb6a98..922ca00825d 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -46,6 +46,7 @@ import ( kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmdefaults "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/kubernetes/cmd/kubeadm/app/images" "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/initsystem" @@ -76,10 +77,16 @@ type Error struct { Msg string } +// Error implements the standard error interface func (e *Error) Error() string { return fmt.Sprintf("[preflight] Some fatal errors occurred:\n%s%s", e.Msg, "[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`") } +// Preflight identifies this error as a preflight error +func (e *Error) Preflight() bool { + return true +} + // Checker validates the state of the system to ensure kubeadm will be // successful as often as possible. type Checker interface { @@ -850,6 +857,30 @@ func (ResolveCheck) Check() (warnings, errors []error) { return warnings, errors } +// ImagePullCheck will pull container images used by kubeadm +type ImagePullCheck struct { + Images images.Images + ImageList []string +} + +// Name returns the label for ImagePullCheck +func (ImagePullCheck) Name() string { + return "ImagePull" +} + +// Check pulls images required by kubeadm. This is a mutating check +func (i ImagePullCheck) Check() (warnings, errors []error) { + for _, image := range i.ImageList { + if err := i.Images.Exists(image); err == nil { + continue + } + if err := i.Images.Pull(image); err != nil { + errors = append(errors, fmt.Errorf("failed to pull image [%s]: %v", image, err)) + } + } + return warnings, errors +} + // RunInitMasterChecks executes all individual, applicable to Master node checks. func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfiguration, ignorePreflightErrors sets.String) error { // First, check if we're root separately from the other preflight checks and fail fast @@ -1012,6 +1043,19 @@ func RunRootCheckOnly(ignorePreflightErrors sets.String) error { return RunChecks(checks, os.Stderr, ignorePreflightErrors) } +// RunPullImagesCheck will pull images kubeadm needs if the are not found on the system +func RunPullImagesCheck(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfiguration, ignorePreflightErrors sets.String) error { + criInterfacer, err := images.NewCRInterfacer(execer, cfg.GetCRISocket()) + if err != nil { + return err + } + + checks := []Checker{ + ImagePullCheck{Images: criInterfacer, ImageList: images.GetAllImages(cfg)}, + } + return RunChecks(checks, os.Stderr, ignorePreflightErrors) +} + // RunChecks runs each check, displays it's warnings/errors, and once all // are processed will exit if any errors occurred. func RunChecks(checks []Checker, ww io.Writer, ignorePreflightErrors sets.String) error { diff --git a/cmd/kubeadm/app/preflight/checks_test.go b/cmd/kubeadm/app/preflight/checks_test.go index cfea0abc450..f6908c7c3e0 100644 --- a/cmd/kubeadm/app/preflight/checks_test.go +++ b/cmd/kubeadm/app/preflight/checks_test.go @@ -18,6 +18,7 @@ package preflight import ( "bytes" + "errors" "fmt" "io/ioutil" "strings" @@ -696,3 +697,32 @@ func TestSetHasItemOrAll(t *testing.T) { } } } + +type imgs struct{} + +func (i *imgs) Pull(image string) error { + if image == "bad pull" { + return errors.New("pull error") + } + return nil +} +func (i *imgs) Exists(image string) error { + if image == "found" { + return nil + } + return errors.New("error") +} + +func TestImagePullCheck(t *testing.T) { + i := ImagePullCheck{ + Images: &imgs{}, + ImageList: []string{"found", "not found", "bad pull"}, + } + warnings, errors := i.Check() + if len(warnings) != 0 { + t.Fatalf("did not expect any warnings but got %q", warnings) + } + if len(errors) != 1 { + t.Fatalf("expected 1 errors but got %d: %q", len(errors), errors) + } +} diff --git a/cmd/kubeadm/app/util/BUILD b/cmd/kubeadm/app/util/BUILD index 3dfc790b59e..181784908cd 100644 --- a/cmd/kubeadm/app/util/BUILD +++ b/cmd/kubeadm/app/util/BUILD @@ -20,7 +20,6 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util", deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/preflight:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -47,7 +46,6 @@ go_test( "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library", - "//cmd/kubeadm/app/preflight:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/cmd/kubeadm/app/util/error.go b/cmd/kubeadm/app/util/error.go index 61327cf5608..58a892dd191 100644 --- a/cmd/kubeadm/app/util/error.go +++ b/cmd/kubeadm/app/util/error.go @@ -22,7 +22,6 @@ import ( "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/kubernetes/cmd/kubeadm/app/preflight" ) const ( @@ -60,13 +59,19 @@ func CheckErr(err error) { checkErr("", err, fatal) } +// preflightError allows us to know if the error is a preflight error or not +// defining the interface here avoids an import cycle of pulling in preflight into the util package +type preflightError interface { + Preflight() bool +} + // checkErr formats a given error as a string and calls the passed handleErr // func with that string and an kubectl exit code. func checkErr(prefix string, err error, handleErr func(string, int)) { switch err.(type) { case nil: return - case *preflight.Error: + case preflightError: handleErr(err.Error(), PreFlightExitCode) case utilerrors.Aggregate: handleErr(err.Error(), ValidationExitCode) diff --git a/cmd/kubeadm/app/util/error_test.go b/cmd/kubeadm/app/util/error_test.go index c28a6cc0566..94f131babae 100644 --- a/cmd/kubeadm/app/util/error_test.go +++ b/cmd/kubeadm/app/util/error_test.go @@ -19,10 +19,12 @@ package util import ( "fmt" "testing" - - "k8s.io/kubernetes/cmd/kubeadm/app/preflight" ) +type pferror struct{} + +func (p *pferror) Preflight() bool { return true } +func (p *pferror) Error() string { return "" } func TestCheckErr(t *testing.T) { var codeReturned int errHandle := func(err string, code int) { @@ -35,7 +37,7 @@ func TestCheckErr(t *testing.T) { }{ {nil, 0}, {fmt.Errorf(""), DefaultErrorExitCode}, - {&preflight.Error{}, PreFlightExitCode}, + {&pferror{}, PreFlightExitCode}, } for _, rt := range tokenTest { From 0dd6e75567ae6b540f480c08ae921f275e7e7c60 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 24 May 2018 15:06:04 -0400 Subject: [PATCH 200/307] Move volume resizing to beta Update bootstrap policies --- pkg/features/kube_features.go | 4 +- pkg/kubeapiserver/options/plugins.go | 1 + .../testdata/cluster-roles.yaml | 8 +++ .../testdata/controller-role-bindings.yaml | 17 +++++ .../testdata/controller-roles.yaml | 64 +++++++++++++++++++ 5 files changed, 92 insertions(+), 2 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index d773374ec43..69d090bf50c 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -94,7 +94,7 @@ const ( LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation" // owner: @gnufied - // alpha: v1.8 + // beta: v1.11 // Ability to Expand persistent volumes ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes" @@ -307,7 +307,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS TaintNodesByCondition: {Default: false, PreRelease: utilfeature.Alpha}, MountPropagation: {Default: true, PreRelease: utilfeature.Beta}, QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, - ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta}, CPUManager: {Default: true, PreRelease: utilfeature.Beta}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, diff --git a/pkg/kubeapiserver/options/plugins.go b/pkg/kubeapiserver/options/plugins.go index a17c3584aeb..2ba2783404f 100644 --- a/pkg/kubeapiserver/options/plugins.go +++ b/pkg/kubeapiserver/options/plugins.go @@ -133,6 +133,7 @@ func DefaultOffAdmissionPlugins() sets.String { serviceaccount.PluginName, //ServiceAccount label.PluginName, //PersistentVolumeLabel setdefault.PluginName, //DefaultStorageClass + resize.PluginName, //PersistentVolumeClaimResize defaulttolerationseconds.PluginName, //DefaultTolerationSeconds mutatingwebhook.PluginName, //MutatingAdmissionWebhook validatingwebhook.PluginName, //ValidatingAdmissionWebhook diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 5af5b9e0e83..300b66485ca 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -1136,6 +1136,14 @@ items: - get - list - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - get + - patch + - update - apiGroups: - storage.k8s.io resources: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml index 4bb97a4d338..6d5cb73e50d 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-role-bindings.yaml @@ -136,6 +136,23 @@ items: - kind: ServiceAccount name: endpoint-controller namespace: kube-system +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:controller:expand-controller + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:controller:expand-controller + subjects: + - kind: ServiceAccount + name: expand-controller + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index d70cb4936d3..3344def7c28 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -425,6 +425,70 @@ items: - create - patch - update +- apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:controller:expand-controller + rules: + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - patch + - update + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - endpoints + - services + verbs: + - get + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: From 23a73283b383882e49bfedc5545b2db2fc9bd71b Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 24 May 2018 15:56:40 -0400 Subject: [PATCH 201/307] Fix breaking volume resize e2e tests --- test/e2e/framework/deployment_util.go | 3 +++ test/e2e/storage/mounted_volume_resize.go | 1 + 2 files changed, 4 insertions(+) diff --git a/test/e2e/framework/deployment_util.go b/test/e2e/framework/deployment_util.go index 2a48e68e628..ea863466d18 100644 --- a/test/e2e/framework/deployment_util.go +++ b/test/e2e/framework/deployment_util.go @@ -243,6 +243,9 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma }, Spec: apps.DeploymentSpec{ Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: podLabels, diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 040ca9b3ffe..2ca7bbe5525 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -113,6 +113,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolume By("Creating a deployment with the provisioned volume") deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") + Expect(err).NotTo(HaveOccurred(), "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) By("Expanding current pvc") From 354cfcf618624a8a8e7c0170cc80d8b3646f2db5 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 24 May 2018 15:57:21 -0400 Subject: [PATCH 202/307] Remove feature tags from e2e test for resize --- test/e2e/storage/mounted_volume_resize.go | 2 +- test/e2e/storage/volume_expand.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 2ca7bbe5525..d7eedfc8fe4 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" ) -var _ = utils.SIGDescribe("Mounted volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() { +var _ = utils.SIGDescribe("Mounted volume expand[Slow]", func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/volume_expand.go b/test/e2e/storage/volume_expand.go index 231b50ca35a..8049d1eda0c 100644 --- a/test/e2e/storage/volume_expand.go +++ b/test/e2e/storage/volume_expand.go @@ -39,7 +39,7 @@ const ( totalResizeWaitPeriod = 20 * time.Minute ) -var _ = utils.SIGDescribe("Volume expand [Feature:ExpandPersistentVolumes] [Slow]", func() { +var _ = utils.SIGDescribe("Volume expand [Slow]", func() { var ( c clientset.Interface ns string From 383872615dfa973ea8965343424e3dc771bd818d Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Thu, 17 May 2018 17:27:44 +0200 Subject: [PATCH 203/307] Remove kubectl reapers --- hack/make-rules/test-cmd-util.sh | 5 +- pkg/kubectl/BUILD | 12 - pkg/kubectl/cmd/BUILD | 4 +- pkg/kubectl/cmd/apply.go | 52 +- pkg/kubectl/cmd/apply_test.go | 77 +- pkg/kubectl/cmd/delete.go | 103 +-- pkg/kubectl/cmd/delete_test.go | 37 +- pkg/kubectl/cmd/drain.go | 5 +- pkg/kubectl/cmd/replace.go | 19 +- pkg/kubectl/cmd/run.go | 10 +- pkg/kubectl/cmd/scale.go | 4 +- pkg/kubectl/cmd/util/factory.go | 2 - pkg/kubectl/cmd/util/factory_builder.go | 19 - pkg/kubectl/delete.go | 504 ----------- pkg/kubectl/delete_test.go | 837 ------------------ pkg/kubectl/scale_test.go | 55 ++ test/e2e/apps/BUILD | 3 - test/e2e/apps/cronjob.go | 7 +- test/e2e/apps/daemon_set.go | 8 +- test/e2e/apps/deployment.go | 15 +- test/e2e/apps/job.go | 7 +- .../cluster_autoscaler_scalability.go | 8 +- .../autoscaling/cluster_size_autoscaling.go | 40 +- test/e2e/common/autoscaling_utils.go | 4 +- test/e2e/framework/jobs_util.go | 11 + test/e2e/framework/rc_util.go | 5 - test/e2e/framework/service_util.go | 5 +- test/e2e/framework/test_context.go | 3 - test/e2e/framework/util.go | 44 - test/e2e/kubectl/kubectl.go | 5 +- test/e2e/network/proxy.go | 2 +- test/e2e/network/service.go | 16 +- test/e2e/node/kubelet.go | 2 +- test/e2e/node/kubelet_perf.go | 2 +- test/e2e/scalability/density.go | 20 +- test/e2e/scalability/load.go | 16 +- .../equivalence_cache_predicates.go | 8 +- test/e2e/scheduling/predicates.go | 2 +- test/e2e/scheduling/priorities.go | 2 +- test/e2e/scheduling/rescheduler.go | 2 +- test/e2e/scheduling/ubernetes_lite.go | 2 +- test/e2e/storage/empty_dir_wrapper.go | 2 +- test/utils/delete_resources.go | 21 - 43 files changed, 189 insertions(+), 1818 deletions(-) delete mode 100644 pkg/kubectl/delete.go delete mode 100644 pkg/kubectl/delete_test.go diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 18ad041e2bd..82cab8a7ba3 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -1468,7 +1468,7 @@ __EOF__ # Test that we can list this new CustomResource kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' # Compare "old" output with experimental output and ensure both are the same - expected_output=$(kubectl get foos "${kube_flags[@]}") + expected_output=$(kubectl get foos "${kube_flags[@]}" | awk 'NF{NF--};1') actual_output=$(kubectl get foos --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1') kube::test::if_has_string "${actual_output}" "${expected_output}" @@ -1480,6 +1480,9 @@ __EOF__ kubectl delete rc frontend "${kube_flags[@]}" kubectl delete ds bind "${kube_flags[@]}" kubectl delete pod valid-pod "${kube_flags[@]}" + + set +o nounset + set +o errexit } run_kubectl_get_tests() { diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index 15a6901fc87..8cc818887be 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -12,7 +12,6 @@ go_test( "autoscale_test.go", "clusterrolebinding_test.go", "configmap_test.go", - "delete_test.go", "deployment_test.go", "env_file_test.go", "generate_test.go", @@ -40,12 +39,9 @@ go_test( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", - "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", @@ -71,8 +67,6 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", @@ -92,7 +86,6 @@ go_library( "clusterrolebinding.go", "conditions.go", "configmap.go", - "delete.go", "deployment.go", "doc.go", "env_file.go", @@ -123,19 +116,15 @@ go_library( "//pkg/api/pod:go_default_library", "//pkg/api/v1/pod:go_default_library", "//pkg/apis/apps:go_default_library", - "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", "//pkg/controller/deployment/util:go_default_library", "//pkg/credentialprovider:go_default_library", "//pkg/kubectl/apps:go_default_library", - "//pkg/kubectl/cmd/scalejob:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/kubectl/util/hash:go_default_library", @@ -171,7 +160,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index ca7d1d13cc8..271c47b5cde 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -197,7 +197,6 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/create:go_default_library", "//pkg/kubectl/cmd/testing:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", @@ -214,7 +213,6 @@ go_test( "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -232,9 +230,9 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", + "//vendor/k8s.io/client-go/dynamic/fake:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest/fake:go_default_library", - "//vendor/k8s.io/client-go/scale/fake:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", "//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library", diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index 93fcd795cce..fc8f67626dc 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -43,7 +43,6 @@ import ( scaleclient "k8s.io/client-go/scale" oapi "k8s.io/kube-openapi/pkg/util/proto" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -82,7 +81,6 @@ type ApplyOptions struct { Mapper meta.RESTMapper Scaler scaleclient.ScalesGetter DynamicClient dynamic.Interface - ClientSetFunc func() (internalclientset.Interface, error) OpenAPISchema openapi.Resources Namespace string @@ -215,7 +213,6 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { o.ShouldIncludeUninitialized = cmdutil.ShouldIncludeUninitialized(cmd, o.Prune) o.OpenAPISchema, _ = f.OpenAPISchema() - o.ClientSetFunc = f.ClientSet o.Validator, err = f.Validator(cmdutil.GetFlagBool(cmd, "validate")) o.Builder = f.NewBuilder() o.Mapper, err = f.ToRESTMapper() @@ -406,7 +403,6 @@ func (o *ApplyOptions) Run() error { mapping: info.Mapping, helper: helper, dynamicClient: o.DynamicClient, - clientsetFunc: o.ClientSetFunc, overwrite: o.Overwrite, backOff: clockwork.NewRealClock(), force: o.DeleteOptions.ForceDeletion, @@ -414,7 +410,6 @@ func (o *ApplyOptions) Run() error { timeout: o.DeleteOptions.Timeout, gracePeriod: o.DeleteOptions.GracePeriod, openapiSchema: openapiSchema, - scaleClient: o.Scaler, } patchBytes, patchedObject, err := patcher.patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut) @@ -491,7 +486,6 @@ func (o *ApplyOptions) Run() error { p := pruner{ mapper: o.Mapper, dynamicClient: o.DynamicClient, - clientsetFunc: o.ClientSetFunc, labelSelector: o.Selector, visitedUids: visitedUids, @@ -580,7 +574,6 @@ func getRESTMappings(mapper meta.RESTMapper, pruneResources *[]pruneResource) (n type pruner struct { mapper meta.RESTMapper dynamicClient dynamic.Interface - clientsetFunc func() (internalclientset.Interface, error) visitedUids sets.String labelSelector string @@ -630,7 +623,7 @@ func (p *pruner) prune(namespace string, mapping *meta.RESTMapping, includeUnini } name := metadata.GetName() if !p.dryRun { - if err := p.delete(namespace, name, mapping, p.scaler); err != nil { + if err := p.delete(namespace, name, mapping); err != nil { return err } } @@ -644,44 +637,31 @@ func (p *pruner) prune(namespace string, mapping *meta.RESTMapping, includeUnini return nil } -func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping, scaleClient scaleclient.ScalesGetter) error { - return runDelete(namespace, name, mapping, p.dynamicClient, p.cascade, p.gracePeriod, p.clientsetFunc, scaleClient) +func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping) error { + return runDelete(namespace, name, mapping, p.dynamicClient, p.cascade, p.gracePeriod) } -func runDelete(namespace, name string, mapping *meta.RESTMapping, c dynamic.Interface, cascade bool, gracePeriod int, clientsetFunc func() (internalclientset.Interface, error), scaleClient scaleclient.ScalesGetter) error { - if !cascade { - return c.Resource(mapping.Resource).Namespace(namespace).Delete(name, nil) - } - cs, err := clientsetFunc() - if err != nil { - return err - } - r, err := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), cs, scaleClient) - if err != nil { - if _, ok := err.(*kubectl.NoSuchReaperError); !ok { - return err - } - return c.Resource(mapping.Resource).Namespace(namespace).Delete(name, nil) - } - var options *metav1.DeleteOptions +func runDelete(namespace, name string, mapping *meta.RESTMapping, c dynamic.Interface, cascade bool, gracePeriod int) error { + options := &metav1.DeleteOptions{} if gracePeriod >= 0 { options = metav1.NewDeleteOptions(int64(gracePeriod)) } - if err := r.Stop(namespace, name, 2*time.Minute, options); err != nil { - return err + policy := metav1.DeletePropagationForeground + if !cascade { + policy = metav1.DeletePropagationOrphan } - return nil + options.PropagationPolicy = &policy + return c.Resource(mapping.Resource).Namespace(namespace).Delete(name, options) } func (p *patcher) delete(namespace, name string) error { - return runDelete(namespace, name, p.mapping, p.dynamicClient, p.cascade, p.gracePeriod, p.clientsetFunc, p.scaleClient) + return runDelete(namespace, name, p.mapping, p.dynamicClient, p.cascade, p.gracePeriod) } type patcher struct { mapping *meta.RESTMapping helper *resource.Helper dynamicClient dynamic.Interface - clientsetFunc func() (internalclientset.Interface, error) overwrite bool backOff clockwork.Clock @@ -692,7 +672,6 @@ type patcher struct { gracePeriod int openapiSchema openapi.Resources - scaleClient scaleclient.ScalesGetter } func (p *patcher) patchSimple(obj runtime.Object, modified []byte, source, namespace, name string, errOut io.Writer) ([]byte, runtime.Object, error) { @@ -790,17 +769,16 @@ func (p *patcher) patch(current runtime.Object, modified []byte, source, namespa } func (p *patcher) deleteAndCreate(original runtime.Object, modified []byte, namespace, name string) ([]byte, runtime.Object, error) { - err := p.delete(namespace, name) - if err != nil { + if err := p.delete(namespace, name); err != nil { return modified, nil, err } - err = wait.PollImmediate(kubectl.Interval, p.timeout, func() (bool, error) { + // TODO: use wait + if err := wait.PollImmediate(1*time.Second, p.timeout, func() (bool, error) { if _, err := p.helper.Get(namespace, name, false); !errors.IsNotFound(err) { return false, err } return true, nil - }) - if err != nil { + }); err != nil { return modified, nil, err } versionedObject, _, err := unstructured.UnstructuredJSONScheme.Decode(modified, nil, nil) diff --git a/pkg/kubectl/cmd/apply_test.go b/pkg/kubectl/cmd/apply_test.go index 52593f2391b..abc4df764c0 100644 --- a/pkg/kubectl/cmd/apply_test.go +++ b/pkg/kubectl/cmd/apply_test.go @@ -31,19 +31,17 @@ import ( "github.com/spf13/cobra" - autoscalingv1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" kubeerr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" sptest "k8s.io/apimachinery/pkg/util/strategicpatch/testing" + dynamicfakeclient "k8s.io/client-go/dynamic/fake" restclient "k8s.io/client-go/rest" "k8s.io/client-go/rest/fake" - fakescale "k8s.io/client-go/scale/fake" - testcore "k8s.io/client-go/testing" + clienttesting "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" api "k8s.io/kubernetes/pkg/apis/core" @@ -1211,18 +1209,18 @@ func checkPatchString(t *testing.T, req *http.Request) { func TestForceApply(t *testing.T) { initTestErrorHandler(t) + scheme := runtime.NewScheme() nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC) pathRC := "/namespaces/test/replicationcontrollers/" + nameRC pathRCList := "/namespaces/test/replicationcontrollers" expected := map[string]int{ - "getOk": 7, + "getOk": 6, "getNotFound": 1, - "getList": 1, + "getList": 0, "patch": 6, "delete": 1, "post": 1, } - scaleClientExpected := []string{"get", "update", "get", "get"} for _, fn := range testingOpenAPISchemaFns { t.Run("test apply with --force", func(t *testing.T) { @@ -1282,10 +1280,6 @@ func TestForceApply(t *testing.T) { } t.Fatalf("unexpected request: %#v after %v tries\n%#v", req.URL, counts["patch"], req) return nil, nil - case strings.HasSuffix(p, pathRC) && m == "DELETE": - counts["delete"]++ - deleted = true - return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte{}))}, nil case strings.HasSuffix(p, pathRC) && m == "PUT": counts["put"]++ bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) @@ -1303,43 +1297,18 @@ func TestForceApply(t *testing.T) { } }), } - newReplicas := int32(3) - scaleClient := &fakescale.FakeScaleClient{} - scaleClient.AddReactor("get", "replicationcontrollers", func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { - action := rawAction.(testcore.GetAction) - if action.GetName() != "test-rc" { - return true, nil, fmt.Errorf("expected = test-rc, got = %s", action.GetName()) + fakeDynamicClient := dynamicfakeclient.NewSimpleDynamicClient(scheme) + fakeDynamicClient.PrependReactor("delete", "replicationcontrollers", func(action clienttesting.Action) (bool, runtime.Object, error) { + if deleteAction, ok := action.(clienttesting.DeleteAction); ok { + if deleteAction.GetName() == nameRC { + counts["delete"]++ + deleted = true + return true, nil, nil + } } - obj := &autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: action.GetName(), - Namespace: action.GetNamespace(), - }, - Spec: autoscalingv1.ScaleSpec{ - Replicas: newReplicas, - }, - } - return true, obj, nil + return false, nil, nil }) - scaleClient.AddReactor("update", "replicationcontrollers", func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { - action := rawAction.(testcore.UpdateAction) - obj := action.GetObject().(*autoscalingv1.Scale) - if obj.Name != "test-rc" { - return true, nil, fmt.Errorf("expected = test-rc, got = %s", obj.Name) - } - newReplicas = obj.Spec.Replicas - return true, &autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: obj.Name, - Namespace: action.GetNamespace(), - }, - Spec: autoscalingv1.ScaleSpec{ - Replicas: newReplicas, - }, - }, nil - }) - - tf.ScaleGetter = scaleClient + tf.FakeDynamicClient = fakeDynamicClient tf.OpenAPISchemaFunc = fn tf.Client = tf.UnstructuredClient tf.ClientConfigVal = &restclient.Config{} @@ -1364,22 +1333,6 @@ func TestForceApply(t *testing.T) { if errBuf.String() != "" { t.Fatalf("unexpected error output: %s", errBuf.String()) } - - scale, err := scaleClient.Scales(tf.Namespace).Get(schema.GroupResource{Group: "", Resource: "replicationcontrollers"}, nameRC) - if err != nil { - t.Error(err) - } - if scale.Spec.Replicas != 0 { - t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) - } - if len(scaleClient.Actions()) != len(scaleClientExpected) { - t.Fatalf("a fake scale client has unexpected amout of API calls, wanted = %d, got = %d", len(scaleClientExpected), len(scaleClient.Actions())) - } - for index, action := range scaleClient.Actions() { - if scaleClientExpected[index] != action.GetVerb() { - t.Errorf("unexpected API method called on a fake scale client, wanted = %s, got = %s at index = %d", scaleClientExpected[index], action.GetVerb(), index) - } - } }) } } diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 043b6bc3a47..846f2e31761 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -27,9 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" kubectlwait "k8s.io/kubernetes/pkg/kubectl/cmd/wait" @@ -103,8 +101,6 @@ type DeleteOptions struct { ForceDeletion bool WaitForDeletion bool - Reaper func(mapping *meta.RESTMapping) (kubectl.Reaper, error) - GracePeriod int Timeout time.Duration @@ -128,15 +124,9 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra Example: delete_example, Run: func(cmd *cobra.Command, args []string) { o := deleteFlags.ToOptions(nil, streams) - if err := o.Complete(f, args, cmd); err != nil { - cmdutil.CheckErr(err) - } - if err := o.Validate(cmd); err != nil { - cmdutil.CheckErr(cmdutil.UsageErrorf(cmd, err.Error())) - } - if err := o.RunDelete(); err != nil { - cmdutil.CheckErr(err) - } + cmdutil.CheckErr(o.Complete(f, args, cmd)) + cmdutil.CheckErr(o.Validate(cmd)) + cmdutil.CheckErr(o.RunDelete()) }, SuggestFor: []string{"rm"}, } @@ -178,8 +168,6 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co o.WaitForDeletion = b } - o.Reaper = f.Reaper - includeUninitialized := cmdutil.ShouldIncludeUninitialized(cmd, false) r := f.NewBuilder(). Unstructured(). @@ -234,62 +222,9 @@ func (o *DeleteOptions) Validate(cmd *cobra.Command) error { } func (o *DeleteOptions) RunDelete() error { - // By default use a reaper to delete all related resources. - if o.Cascade { - // TODO(juanvallejo): although o.Result can be accessed from the options - // it is also passed here so that callers of this method outside of the "delete" - // command do not have to tack it to the "delete" options as well. - // Find a cleaner way to approach this. - return o.ReapResult(o.Result, true, false) - } return o.DeleteResult(o.Result) } -func (o *DeleteOptions) ReapResult(r *resource.Result, isDefaultDelete, quiet bool) error { - found := 0 - if o.IgnoreNotFound { - r = r.IgnoreErrors(errors.IsNotFound) - } - err := r.Visit(func(info *resource.Info, err error) error { - if err != nil { - return err - } - found++ - reaper, err := o.Reaper(info.Mapping) - if err != nil { - // If there is no reaper for this resources and the user didn't explicitly ask for stop. - if kubectl.IsNoSuchReaperError(err) && isDefaultDelete { - // No client side reaper found. Let the server do cascading deletion. - return o.cascadingDeleteResource(info) - } - return cmdutil.AddSourceToErr("reaping", info.Source, err) - } - var options *metav1.DeleteOptions - if o.GracePeriod >= 0 { - options = metav1.NewDeleteOptions(int64(o.GracePeriod)) - } - if err := reaper.Stop(info.Namespace, info.Name, o.Timeout, options); err != nil { - return cmdutil.AddSourceToErr("stopping", info.Source, err) - } - if o.WaitForDeletion { - if err := waitForObjectDeletion(info, o.Timeout); err != nil { - return cmdutil.AddSourceToErr("stopping", info.Source, err) - } - } - if !quiet { - o.PrintObj(info) - } - return nil - }) - if err != nil { - return err - } - if found == 0 { - fmt.Fprintf(o.Out, "No resources found\n") - } - return nil -} - func (o *DeleteOptions) DeleteResult(r *resource.Result) error { found := 0 if o.IgnoreNotFound { @@ -301,12 +236,14 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { } found++ - // if we're here, it means that cascade=false (not the default), so we should orphan as requested options := &metav1.DeleteOptions{} if o.GracePeriod >= 0 { options = metav1.NewDeleteOptions(int64(o.GracePeriod)) } - policy := metav1.DeletePropagationOrphan + policy := metav1.DeletePropagationForeground + if !o.Cascade { + policy = metav1.DeletePropagationOrphan + } options.PropagationPolicy = &policy return o.deleteResource(info, options) }) @@ -349,11 +286,6 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { return err } -func (o *DeleteOptions) cascadingDeleteResource(info *resource.Info) error { - policy := metav1.DeletePropagationForeground - return o.deleteResource(info, &metav1.DeleteOptions{PropagationPolicy: &policy}) -} - func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav1.DeleteOptions) error { if err := resource.NewHelper(info.Client, info.Mapping).DeleteWithOptions(info.Namespace, info.Name, deleteOptions); err != nil { return cmdutil.AddSourceToErr("deleting", info.Source, err) @@ -386,24 +318,3 @@ func (o *DeleteOptions) PrintObj(info *resource.Info) { // understandable output by default fmt.Fprintf(o.Out, "%s \"%s\" %s\n", kindString, info.Name, operation) } - -// objectDeletionWaitInterval is the interval to wait between checks for deletion. -var objectDeletionWaitInterval = time.Second - -// waitForObjectDeletion refreshes the object, waiting until it is deleted, a timeout is reached, or -// an error is encountered. It checks once a second. -func waitForObjectDeletion(info *resource.Info, timeout time.Duration) error { - copied := *info - info = &copied - // TODO: refactor Reaper so that we can pass the "wait" option into it, and then check for UID change. - return wait.PollImmediate(objectDeletionWaitInterval, timeout, func() (bool, error) { - switch err := info.Get(); { - case err == nil: - return false, nil - case errors.IsNotFound(err): - return true, nil - default: - return false, err - } - }) -} diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index 55a4d915377..51fb4a887c7 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -23,19 +23,15 @@ import ( "net/http" "strings" "testing" - "time" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest/fake" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" @@ -259,34 +255,10 @@ func TestDeleteObject(t *testing.T) { } } -type fakeReaper struct { - namespace, name string - timeout time.Duration - deleteOptions *metav1.DeleteOptions - err error -} - -func (r *fakeReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - r.namespace, r.name = namespace, name - r.timeout = timeout - r.deleteOptions = gracePeriod - return r.err -} - -type fakeReaperFactory struct { - cmdutil.Factory - reaper kubectl.Reaper -} - -func (f *fakeReaperFactory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - return f.reaper, nil -} - func TestDeleteObjectGraceZero(t *testing.T) { initTestErrorHandler(t) pods, _, _ := testData() - objectDeletionWaitInterval = time.Millisecond count := 0 tf := cmdtesting.NewTestFactory() defer tf.Cleanup() @@ -318,10 +290,8 @@ func TestDeleteObjectGraceZero(t *testing.T) { } tf.Namespace = "test" - reaper := &fakeReaper{} - fake := &fakeReaperFactory{Factory: tf, reaper: reaper} streams, _, buf, errBuf := genericclioptions.NewTestIOStreams() - cmd := NewCmdDelete(fake, streams) + cmd := NewCmdDelete(tf, streams) cmd.Flags().Set("output", "name") cmd.Flags().Set("grace-period", "0") cmd.Run(cmd, []string{"pods/nginx"}) @@ -330,10 +300,7 @@ func TestDeleteObjectGraceZero(t *testing.T) { if buf.String() != "pod/nginx\n" { t.Errorf("unexpected output: %s\n---\n%s", buf.String(), errBuf.String()) } - if reaper.deleteOptions == nil || reaper.deleteOptions.GracePeriodSeconds == nil || *reaper.deleteOptions.GracePeriodSeconds != 1 { - t.Errorf("unexpected reaper options: %#v", reaper) - } - if count != 4 { + if count != 0 { t.Errorf("unexpected calls to GET: %d", count) } } diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 60c3442a486..8a6383f9a2e 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -43,7 +43,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -594,7 +593,7 @@ func (o *DrainOptions) evictPods(pods []corev1.Pod, policyGroupVersion string, g } } podArray := []corev1.Pod{pod} - _, err = o.waitForDelete(podArray, kubectl.Interval, time.Duration(math.MaxInt64), true, getPodFn) + _, err = o.waitForDelete(podArray, 1*time.Second, time.Duration(math.MaxInt64), true, getPodFn) if err == nil { doneCh <- true } else { @@ -640,7 +639,7 @@ func (o *DrainOptions) deletePods(pods []corev1.Pod, getPodFn func(namespace, na return err } } - _, err := o.waitForDelete(pods, kubectl.Interval, globalTimeout, false, getPodFn) + _, err := o.waitForDelete(pods, 1*time.Second, globalTimeout, false, getPodFn) return err } diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 03fa62192c2..3f210c6a57a 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "os" "path/filepath" + "time" "github.com/spf13/cobra" @@ -158,7 +159,6 @@ func (o *ReplaceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] //Replace will create a resource if it doesn't exist already, so ignore not found error deleteOpts.IgnoreNotFound = true - deleteOpts.Reaper = f.Reaper if o.PrintFlags.OutputFormat != nil { deleteOpts.Output = *o.PrintFlags.OutputFormat } @@ -273,25 +273,20 @@ func (o *ReplaceOptions) forceReplace() error { return err } - var err error - - // By default use a reaper to delete all related resources. - if o.DeleteOptions.Cascade { - glog.Warningf("\"cascade\" is set, kubectl will delete and re-create all resources managed by this resource (e.g. Pods created by a ReplicationController). Consider using \"kubectl rolling-update\" if you want to update a ReplicationController together with its Pods.") - err = o.DeleteOptions.ReapResult(r, o.DeleteOptions.Cascade, false) - } else { - err = o.DeleteOptions.DeleteResult(r) + if err := o.DeleteOptions.DeleteResult(r); err != nil { + return err } + timeout := o.DeleteOptions.Timeout if timeout == 0 { - timeout = kubectl.Timeout + timeout = 5 * time.Minute } - err = r.Visit(func(info *resource.Info, err error) error { + err := r.Visit(func(info *resource.Info, err error) error { if err != nil { return err } - return wait.PollImmediate(kubectl.Interval, timeout, func() (bool, error) { + return wait.PollImmediate(1*time.Second, timeout, func() (bool, error) { if err := info.Get(); !errors.IsNotFound(err) { return false, err } diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 5ffbc294dc9..de450080765 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -227,7 +227,6 @@ func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { deleteOpts.IgnoreNotFound = true deleteOpts.WaitForDeletion = false deleteOpts.GracePeriod = -1 - deleteOpts.Reaper = f.Reaper o.DeleteOptions = deleteOpts @@ -459,14 +458,7 @@ func (o *RunOptions) removeCreatedObjects(f cmdutil.Factory, createdObjects []*R ResourceNames(obj.Mapping.Resource.Resource+"."+obj.Mapping.Resource.Group, name). Flatten(). Do() - // Note: we pass in "true" for the "quiet" parameter because - // ReadResult will only print one thing based on the "quiet" - // flag, and that's the "pod xxx deleted" message. If they - // asked for us to remove the pod (via --rm) then telling them - // its been deleted is unnecessary since that's what they asked - // for. We should only print something if the "rm" fails. - err = o.DeleteOptions.ReapResult(r, true, true) - if err != nil { + if err := o.DeleteOptions.DeleteResult(r); err != nil { return err } } diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index aa5640e32dd..6b4ffb1b5ff 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -214,11 +214,11 @@ func (o *ScaleOptions) RunScale() error { } precondition := &kubectl.ScalePrecondition{Size: o.CurrentReplicas, ResourceVersion: o.ResourceVersion} - retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) + retry := kubectl.NewRetryParams(1*time.Second, 5*time.Minute) var waitForReplicas *kubectl.RetryParams if o.Timeout != 0 { - waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) + waitForReplicas = kubectl.NewRetryParams(1*time.Second, timeout) } counter := 0 diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index b9adb8be415..864e1b50ea8 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -105,8 +105,6 @@ type ObjectMappingFactory interface { type BuilderFactory interface { // ScaleClient gives you back scale getter ScaleClient() (scaleclient.ScalesGetter, error) - // Returns a Reaper for gracefully shutting down resources. - Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) } type factory struct { diff --git a/pkg/kubectl/cmd/util/factory_builder.go b/pkg/kubectl/cmd/util/factory_builder.go index 1bbd0ce5373..392e1d25932 100644 --- a/pkg/kubectl/cmd/util/factory_builder.go +++ b/pkg/kubectl/cmd/util/factory_builder.go @@ -19,10 +19,8 @@ limitations under the License. package util import ( - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/dynamic" scaleclient "k8s.io/client-go/scale" - "k8s.io/kubernetes/pkg/kubectl" ) type ring2Factory struct { @@ -56,20 +54,3 @@ func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) { return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil } - -func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - clientset, clientsetErr := f.clientAccessFactory.ClientSet() - if clientsetErr != nil { - return nil, clientsetErr - } - scaler, err := f.ScaleClient() - if err != nil { - return nil, err - } - - reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset, scaler) - if kubectl.IsNoSuchReaperError(reaperErr) { - return nil, reaperErr - } - return reaper, reaperErr -} diff --git a/pkg/kubectl/delete.go b/pkg/kubectl/delete.go deleted file mode 100644 index adeb829a14a..00000000000 --- a/pkg/kubectl/delete.go +++ /dev/null @@ -1,504 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/util/wait" - scaleclient "k8s.io/client-go/scale" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" - batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" - extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/kubectl/cmd/scalejob" -) - -const ( - Interval = time.Second * 1 - Timeout = time.Minute * 5 -) - -// A Reaper terminates an object as gracefully as possible. -type Reaper interface { - // Stop a given object within a namespace. timeout is how long we'll - // wait for the termination to be successful. gracePeriod is time given - // to an API object for it to delete itself cleanly (e.g., pod - // shutdown). It may or may not be supported by the API object. - Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error -} - -type NoSuchReaperError struct { - kind schema.GroupKind -} - -func (n *NoSuchReaperError) Error() string { - return fmt.Sprintf("no reaper has been implemented for %v", n.kind) -} - -func IsNoSuchReaperError(err error) bool { - _, ok := err.(*NoSuchReaperError) - return ok -} - -func ReaperFor(kind schema.GroupKind, c internalclientset.Interface, sc scaleclient.ScalesGetter) (Reaper, error) { - switch kind { - case api.Kind("ReplicationController"): - return &ReplicationControllerReaper{c.Core(), Interval, Timeout, sc}, nil - - case extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"): - return &ReplicaSetReaper{c.Extensions(), Interval, Timeout, sc, schema.GroupResource{Group: kind.Group, Resource: "replicasets"}}, nil - - case extensions.Kind("DaemonSet"), apps.Kind("DaemonSet"): - return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil - - case api.Kind("Pod"): - return &PodReaper{c.Core()}, nil - - case batch.Kind("Job"): - return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil - - case apps.Kind("StatefulSet"): - return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout, sc}, nil - - case extensions.Kind("Deployment"), apps.Kind("Deployment"): - return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout, sc, schema.GroupResource{Group: kind.Group, Resource: "deployments"}}, nil - - } - return nil, &NoSuchReaperError{kind} -} - -func ReaperForReplicationController(rcClient coreclient.ReplicationControllersGetter, scaleClient scaleclient.ScalesGetter, timeout time.Duration) (Reaper, error) { - return &ReplicationControllerReaper{rcClient, Interval, timeout, scaleClient}, nil -} - -type ReplicationControllerReaper struct { - client coreclient.ReplicationControllersGetter - pollInterval, timeout time.Duration - scaleClient scaleclient.ScalesGetter -} -type ReplicaSetReaper struct { - client extensionsclient.ReplicaSetsGetter - pollInterval, timeout time.Duration - scaleClient scaleclient.ScalesGetter - gr schema.GroupResource -} -type DaemonSetReaper struct { - client extensionsclient.DaemonSetsGetter - pollInterval, timeout time.Duration -} -type JobReaper struct { - client batchclient.JobsGetter - podClient coreclient.PodsGetter - pollInterval, timeout time.Duration -} -type DeploymentReaper struct { - dClient extensionsclient.DeploymentsGetter - rsClient extensionsclient.ReplicaSetsGetter - pollInterval, timeout time.Duration - scaleClient scaleclient.ScalesGetter - gr schema.GroupResource -} -type PodReaper struct { - client coreclient.PodsGetter -} -type StatefulSetReaper struct { - client appsclient.StatefulSetsGetter - podClient coreclient.PodsGetter - pollInterval, timeout time.Duration - scaleClient scaleclient.ScalesGetter -} - -// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. -func getOverlappingControllers(rcClient coreclient.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { - rcs, err := rcClient.List(metav1.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting replication controllers: %v", err) - } - var matchingRCs []api.ReplicationController - rcLabels := labels.Set(rc.Spec.Selector) - for _, controller := range rcs.Items { - newRCLabels := labels.Set(controller.Spec.Selector) - if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) { - matchingRCs = append(matchingRCs, controller) - } - } - return matchingRCs, nil -} - -func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - rc := reaper.client.ReplicationControllers(namespace) - scaler := NewScaler(reaper.scaleClient) - ctrl, err := rc.Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - if timeout == 0 { - timeout = Timeout + time.Duration(10*ctrl.Spec.Replicas)*time.Second - } - - // The rc manager will try and detect all matching rcs for a pod's labels, - // and only sync the oldest one. This means if we have a pod with labels - // [(k1: v1), (k2: v2)] and two rcs: rc1 with selector [(k1=v1)], and rc2 with selector [(k1=v1),(k2=v2)], - // the rc manager will sync the older of the two rcs. - // - // If there are rcs with a superset of labels, eg: - // deleting: (k1=v1), superset: (k2=v2, k1=v1) - // - It isn't safe to delete the rc because there could be a pod with labels - // (k1=v1) that isn't managed by the superset rc. We can't scale it down - // either, because there could be a pod (k2=v2, k1=v1) that it deletes - // causing a fight with the superset rc. - // If there are rcs with a subset of labels, eg: - // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) - // - Even if it's safe to delete this rc without a scale down because all it's pods - // are being controlled by the subset rc the code returns an error. - - // In theory, creating overlapping controllers is user error, so the loop below - // tries to account for this logic only in the common case, where we end up - // with multiple rcs that have an exact match on selectors. - - overlappingCtrls, err := getOverlappingControllers(rc, ctrl) - if err != nil { - return fmt.Errorf("error getting replication controllers: %v", err) - } - exactMatchRCs := []api.ReplicationController{} - overlapRCs := []string{} - for _, overlappingRC := range overlappingCtrls { - if len(overlappingRC.Spec.Selector) == len(ctrl.Spec.Selector) { - exactMatchRCs = append(exactMatchRCs, overlappingRC) - } else { - overlapRCs = append(overlapRCs, overlappingRC.Name) - } - } - if len(overlapRCs) > 0 { - return fmt.Errorf( - "Detected overlapping controllers for rc %v: %v, please manage deletion individually with --cascade=false.", - ctrl.Name, strings.Join(overlapRCs, ",")) - } - if len(exactMatchRCs) == 1 { - // No overlapping controllers. - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas, schema.GroupResource{Resource: "replicationcontrollers"}); err != nil && !errors.IsNotFound(err) { - return err - } - } - // Using a background deletion policy because the replication controller - // has already been scaled down. - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return rc.Delete(name, deleteOptions) -} - -// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210 -// getOverlappingReplicaSets finds ReplicaSets that this ReplicaSet overlaps, as well as ReplicaSets overlapping this ReplicaSet. -func getOverlappingReplicaSets(c extensionsclient.ReplicaSetInterface, rs *extensions.ReplicaSet) ([]extensions.ReplicaSet, []extensions.ReplicaSet, error) { - var overlappingRSs, exactMatchRSs []extensions.ReplicaSet - return overlappingRSs, exactMatchRSs, nil -} - -func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - rsc := reaper.client.ReplicaSets(namespace) - scaler := NewScaler(reaper.scaleClient) - rs, err := rsc.Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - if timeout == 0 { - timeout = Timeout + time.Duration(10*rs.Spec.Replicas)*time.Second - } - - // The ReplicaSet controller will try and detect all matching ReplicaSets - // for a pod's labels, and only sync the oldest one. This means if we have - // a pod with labels [(k1: v1), (k2: v2)] and two ReplicaSets: rs1 with - // selector [(k1=v1)], and rs2 with selector [(k1=v1),(k2=v2)], the - // ReplicaSet controller will sync the older of the two ReplicaSets. - // - // If there are ReplicaSets with a superset of labels, eg: - // deleting: (k1=v1), superset: (k2=v2, k1=v1) - // - It isn't safe to delete the ReplicaSet because there could be a pod - // with labels (k1=v1) that isn't managed by the superset ReplicaSet. - // We can't scale it down either, because there could be a pod - // (k2=v2, k1=v1) that it deletes causing a fight with the superset - // ReplicaSet. - // If there are ReplicaSets with a subset of labels, eg: - // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) - // - Even if it's safe to delete this ReplicaSet without a scale down because - // all it's pods are being controlled by the subset ReplicaSet the code - // returns an error. - - // In theory, creating overlapping ReplicaSets is user error, so the loop below - // tries to account for this logic only in the common case, where we end up - // with multiple ReplicaSets that have an exact match on selectors. - - // TODO(madhusudancs): Re-evaluate again when controllerRef is implemented - - // https://github.com/kubernetes/kubernetes/issues/2210 - overlappingRSs, exactMatchRSs, err := getOverlappingReplicaSets(rsc, rs) - if err != nil { - return fmt.Errorf("error getting ReplicaSets: %v", err) - } - - if len(overlappingRSs) > 0 { - var names []string - for _, overlappingRS := range overlappingRSs { - names = append(names, overlappingRS.Name) - } - return fmt.Errorf( - "Detected overlapping ReplicaSets for ReplicaSet %v: %v, please manage deletion individually with --cascade=false.", - rs.Name, strings.Join(names, ",")) - } - if len(exactMatchRSs) == 0 { - // No overlapping ReplicaSets. - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas, reaper.gr); err != nil && !errors.IsNotFound(err) { - return err - } - } - - // Using a background deletion policy because the replica set has already - // been scaled down. - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return rsc.Delete(name, deleteOptions) -} - -func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - ds, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - - // We set the nodeSelector to a random label. This label is nearly guaranteed - // to not be set on any node so the DameonSetController will start deleting - // daemon pods. Once it's done deleting the daemon pods, it's safe to delete - // the DaemonSet. - ds.Spec.Template.Spec.NodeSelector = map[string]string{ - string(uuid.NewUUID()): string(uuid.NewUUID()), - } - // force update to avoid version conflict - ds.ResourceVersion = "" - - if ds, err = reaper.client.DaemonSets(namespace).Update(ds); err != nil { - return err - } - - // Wait for the daemon set controller to kill all the daemon pods. - if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) { - updatedDS, err := reaper.client.DaemonSets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil - }); err != nil { - return err - } - - // Using a background deletion policy because the daemon set has already - // been scaled down. - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return reaper.client.DaemonSets(namespace).Delete(name, deleteOptions) -} - -func (reaper *StatefulSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - statefulsets := reaper.client.StatefulSets(namespace) - scaler := NewScaler(reaper.scaleClient) - ss, err := statefulsets.Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - - if timeout == 0 { - numReplicas := ss.Spec.Replicas - // See discussion of this behavior here: - // https://github.com/kubernetes/kubernetes/pull/46468#discussion_r118589512 - timeout = Timeout + time.Duration(10*numReplicas)*time.Second - } - - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForStatefulSet := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForStatefulSet, apps.Resource("statefulsets")); err != nil && !errors.IsNotFound(err) { - return err - } - - // TODO: Cleanup volumes? We don't want to accidentally delete volumes from - // stop, so just leave this up to the statefulset. - // Using a background deletion policy because the stateful set has already - // been scaled down. - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return statefulsets.Delete(name, deleteOptions) -} - -func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - jobs := reaper.client.Jobs(namespace) - pods := reaper.podClient.Pods(namespace) - scaler := &scalejob.JobPsuedoScaler{ - JobsClient: reaper.client, - } - job, err := jobs.Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - if timeout == 0 { - // we will never have more active pods than job.Spec.Parallelism - parallelism := *job.Spec.Parallelism - timeout = Timeout + time.Duration(10*parallelism)*time.Second - } - - // TODO: handle overlapping jobs - retry := &scalejob.RetryParams{Interval: reaper.pollInterval, Timeout: reaper.timeout} - waitForJobs := &scalejob.RetryParams{Interval: reaper.pollInterval, Timeout: reaper.timeout} - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil && !errors.IsNotFound(err) { - return err - } - // at this point only dead pods are left, that should be removed - selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector) - options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := pods.List(options) - if err != nil { - return err - } - errList := []error{} - for _, pod := range podList.Items { - if err := pods.Delete(pod.Name, gracePeriod); err != nil { - // ignores the error when the pod isn't found - if !errors.IsNotFound(err) { - errList = append(errList, err) - } - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - // once we have all the pods removed we can safely remove the job itself. - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return jobs.Delete(name, deleteOptions) -} - -func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - deployments := reaper.dClient.Deployments(namespace) - rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout, reaper.scaleClient, schema.GroupResource{Group: reaper.gr.Group, Resource: "replicasets"}} - - deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { - // set deployment's history and scale to 0 - // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 - rhl := int32(0) - d.Spec.RevisionHistoryLimit = &rhl - d.Spec.Replicas = 0 - d.Spec.Paused = true - }) - if err != nil { - return err - } - if deployment.Initializers != nil { - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return deployments.Delete(name, deleteOptions) - } - - // Use observedGeneration to determine if the deployment controller noticed the pause. - if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) { - return deployments.Get(name, metav1.GetOptions{}) - }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { - return err - } - - // Stop all replica sets belonging to this Deployment. - rss, err := deploymentutil.ListReplicaSetsInternal(deployment, - func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { - rsList, err := reaper.rsClient.ReplicaSets(namespace).List(options) - if err != nil { - return nil, err - } - rss := make([]*extensions.ReplicaSet, 0, len(rsList.Items)) - for i := range rsList.Items { - rss = append(rss, &rsList.Items[i]) - } - return rss, nil - }) - if err != nil { - return err - } - - errList := []error{} - for _, rs := range rss { - if err := rsReaper.Stop(rs.Namespace, rs.Name, timeout, gracePeriod); err != nil { - if errors.IsNotFound(err) { - continue - } - errList = append(errList, err) - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - - // Delete deployment at the end. - // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. - policy := metav1.DeletePropagationBackground - deleteOptions := &metav1.DeleteOptions{PropagationPolicy: &policy} - return deployments.Delete(name, deleteOptions) -} - -type updateDeploymentFunc func(d *extensions.Deployment) - -func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { - deployments := reaper.dClient.Deployments(namespace) - err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if deployment, err = deployments.Get(name, metav1.GetOptions{}); err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(deployment) - if deployment, err = deployments.Update(deployment); err == nil { - return true, nil - } - // Retry only on update conflict. - if errors.IsConflict(err) { - return false, nil - } - return false, err - }) - return deployment, err -} - -func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *metav1.DeleteOptions) error { - pods := reaper.client.Pods(namespace) - _, err := pods.Get(name, metav1.GetOptions{}) - if err != nil { - return err - } - return pods.Delete(name, gracePeriod) -} diff --git a/pkg/kubectl/delete_test.go b/pkg/kubectl/delete_test.go deleted file mode 100644 index 6bc06d1404d..00000000000 --- a/pkg/kubectl/delete_test.go +++ /dev/null @@ -1,837 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "reflect" - "strings" - "testing" - "time" - - autoscalingv1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/api/errors" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/watch" - fakescale "k8s.io/client-go/scale/fake" - testcore "k8s.io/client-go/testing" - "k8s.io/kubernetes/pkg/apis/batch" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" -) - -func TestReplicationControllerStop(t *testing.T) { - name := "foo" - ns := "default" - tests := []struct { - Name string - Objs []runtime.Object - ScaledDown bool - StopError error - ExpectedActions []string - ScaleClientExpectedAction []string - }{ - { - Name: "OnlyOneRC", - Objs: []runtime.Object{ - &api.ReplicationControllerList{ // LIST - Items: []api.ReplicationController{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1"}}, - }, - }, - }, - }, - ScaledDown: true, - StopError: nil, - ExpectedActions: []string{"get", "list", "delete"}, - ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, - }, - { - Name: "NoOverlapping", - Objs: []runtime.Object{ - &api.ReplicationControllerList{ // LIST - Items: []api.ReplicationController{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "baz", - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k3": "v3"}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1"}}, - }, - }, - }, - }, - ScaledDown: true, - StopError: nil, - ExpectedActions: []string{"get", "list", "delete"}, - ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, - }, - { - Name: "OverlappingError", - Objs: []runtime.Object{ - - &api.ReplicationControllerList{ // LIST - Items: []api.ReplicationController{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "baz", - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1", "k2": "v2"}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1"}}, - }, - }, - }, - }, - ScaledDown: false, // scale resource was not scaled down due to overlapping controllers - StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz, please manage deletion individually with --cascade=false."), - ExpectedActions: []string{"get", "list"}, - }, - { - Name: "OverlappingButSafeDelete", - Objs: []runtime.Object{ - - &api.ReplicationControllerList{ // LIST - Items: []api.ReplicationController{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "baz", - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "zaz", - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1"}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1", "k2": "v2"}}, - }, - }, - }, - }, - ScaledDown: false, // scale resource was not scaled down due to overlapping controllers - StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz,zaz, please manage deletion individually with --cascade=false."), - ExpectedActions: []string{"get", "list"}, - }, - - { - Name: "TwoExactMatchRCs", - Objs: []runtime.Object{ - - &api.ReplicationControllerList{ // LIST - Items: []api.ReplicationController{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "zaz", - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1"}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: 0, - Selector: map[string]string{"k1": "v1"}}, - }, - }, - }, - }, - ScaledDown: false, // scale resource was not scaled down because there is still an additional replica - StopError: nil, - ExpectedActions: []string{"get", "list", "delete"}, - }, - } - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - copiedForWatch := test.Objs[0].DeepCopyObject() - scaleClient := createFakeScaleClient("replicationcontrollers", "foo", 3, nil) - fake := fake.NewSimpleClientset(test.Objs...) - fakeWatch := watch.NewFake() - fake.PrependWatchReactor("replicationcontrollers", testcore.DefaultWatchReactor(fakeWatch, nil)) - - go func() { - fakeWatch.Add(copiedForWatch) - }() - - reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond, scaleClient} - err := reaper.Stop(ns, name, 0, nil) - if !reflect.DeepEqual(err, test.StopError) { - t.Fatalf("unexpected error: %v", err) - } - - actions := fake.Actions() - if len(actions) != len(test.ExpectedActions) { - t.Fatalf("unexpected actions: %v, expected %d actions got %d", actions, len(test.ExpectedActions), len(actions)) - } - for i, verb := range test.ExpectedActions { - if actions[i].GetResource().GroupResource() != api.Resource("replicationcontrollers") { - t.Errorf("unexpected action: %+v, expected %s-replicationController", actions[i], verb) - } - if actions[i].GetVerb() != verb { - t.Errorf("unexpected action: %+v, expected %s-replicationController", actions[i], verb) - } - } - if test.ScaledDown { - scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "", Resource: "replicationcontrollers"}, name) - if err != nil { - t.Error(err) - } - if scale.Spec.Replicas != 0 { - t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) - } - actions := scaleClient.Actions() - if len(actions) != len(test.ScaleClientExpectedAction) { - t.Errorf("unexpected actions: %v, expected %d actions got %d", actions, len(test.ScaleClientExpectedAction), len(actions)) - } - for i, verb := range test.ScaleClientExpectedAction { - if actions[i].GetVerb() != verb { - t.Errorf("unexpected action: %+v, expected %s", actions[i].GetVerb(), verb) - } - } - } - }) - } -} - -func TestReplicaSetStop(t *testing.T) { - name := "foo" - ns := "default" - tests := []struct { - Name string - Objs []runtime.Object - DiscoveryResources []*metav1.APIResourceList - PathsResources map[string]runtime.Object - ScaledDown bool - StopError error - ExpectedActions []string - ScaleClientExpectedAction []string - }{ - { - Name: "OnlyOneRS", - Objs: []runtime.Object{ - &extensions.ReplicaSetList{ // LIST - TypeMeta: metav1.TypeMeta{ - APIVersion: extensions.SchemeGroupVersion.String(), - }, - Items: []extensions.ReplicaSet{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: 0, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, - }, - }, - }, - }, - }, - ScaledDown: true, - StopError: nil, - ExpectedActions: []string{"get", "delete"}, - ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, - }, - { - Name: "NoOverlapping", - Objs: []runtime.Object{ - &extensions.ReplicaSetList{ // LIST - Items: []extensions.ReplicaSet{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "baz", - Namespace: ns, - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: 0, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"k3": "v3"}}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: extensions.ReplicaSetSpec{ - Replicas: 0, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, - }, - }, - }, - }, - }, - ScaledDown: true, - StopError: nil, - ExpectedActions: []string{"get", "delete"}, - ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, - }, - // TODO: Implement tests for overlapping replica sets, similar to replication controllers, - // when the overlapping checks are implemented for replica sets. - } - - for _, test := range tests { - fake := fake.NewSimpleClientset(test.Objs...) - scaleClient := createFakeScaleClient("replicasets", "foo", 3, nil) - - reaper := ReplicaSetReaper{fake.Extensions(), time.Millisecond, time.Millisecond, scaleClient, schema.GroupResource{Group: "extensions", Resource: "replicasets"}} - err := reaper.Stop(ns, name, 0, nil) - if !reflect.DeepEqual(err, test.StopError) { - t.Errorf("%s unexpected error: %v", test.Name, err) - continue - } - - actions := fake.Actions() - if len(actions) != len(test.ExpectedActions) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) - continue - } - for i, verb := range test.ExpectedActions { - if actions[i].GetResource().GroupResource() != extensions.Resource("replicasets") { - t.Errorf("%s unexpected action: %+v, expected %s-replicaSet", test.Name, actions[i], verb) - } - if actions[i].GetVerb() != verb { - t.Errorf("%s unexpected action: %+v, expected %s-replicaSet", test.Name, actions[i], verb) - } - } - if test.ScaledDown { - scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "extensions", Resource: "replicasets"}, name) - if err != nil { - t.Error(err) - } - if scale.Spec.Replicas != 0 { - t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) - } - actions := scaleClient.Actions() - if len(actions) != len(test.ScaleClientExpectedAction) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ScaleClientExpectedAction), len(actions)) - } - for i, verb := range test.ScaleClientExpectedAction { - if actions[i].GetVerb() != verb { - t.Errorf("%s unexpected action: %+v, expected %s", test.Name, actions[i].GetVerb(), verb) - } - } - } - } -} - -func TestJobStop(t *testing.T) { - name := "foo" - ns := "default" - zero := int32(0) - tests := []struct { - Name string - Objs []runtime.Object - StopError error - ExpectedActions []string - }{ - { - Name: "OnlyOneJob", - Objs: []runtime.Object{ - &batch.JobList{ // LIST - Items: []batch.Job{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: batch.JobSpec{ - Parallelism: &zero, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"k1": "v1"}, - }, - }, - }, - }, - }, - }, - StopError: nil, - ExpectedActions: []string{"get:jobs", "get:jobs", "update:jobs", - "get:jobs", "get:jobs", "list:pods", "delete:jobs"}, - }, - { - Name: "JobWithDeadPods", - Objs: []runtime.Object{ - &batch.JobList{ // LIST - Items: []batch.Job{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: batch.JobSpec{ - Parallelism: &zero, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"k1": "v1"}, - }, - }, - }, - }, - }, - &api.PodList{ // LIST - Items: []api.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "pod1", - Namespace: ns, - Labels: map[string]string{"k1": "v1"}, - }, - }, - }, - }, - }, - StopError: nil, - ExpectedActions: []string{"get:jobs", "get:jobs", "update:jobs", - "get:jobs", "get:jobs", "list:pods", "delete:pods", "delete:jobs"}, - }, - } - - for _, test := range tests { - fake := fake.NewSimpleClientset(test.Objs...) - reaper := JobReaper{fake.Batch(), fake.Core(), time.Millisecond, time.Millisecond} - err := reaper.Stop(ns, name, 0, nil) - if !reflect.DeepEqual(err, test.StopError) { - t.Errorf("%s unexpected error: %v", test.Name, err) - continue - } - - actions := fake.Actions() - if len(actions) != len(test.ExpectedActions) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) - continue - } - for i, expAction := range test.ExpectedActions { - action := strings.Split(expAction, ":") - if actions[i].GetVerb() != action[0] { - t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction) - } - if actions[i].GetResource().Resource != action[1] { - t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction) - } - } - } -} - -func TestDeploymentStop(t *testing.T) { - name := "foo" - ns := "default" - deployment := extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - UID: uuid.NewUUID(), - Namespace: ns, - }, - Spec: extensions.DeploymentSpec{ - Replicas: 0, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, - }, - Status: extensions.DeploymentStatus{ - Replicas: 0, - }, - } - trueVar := true - tests := []struct { - Name string - Objs []runtime.Object - ScaledDown bool - StopError error - ExpectedActions []string - ScaleClientExpectedAction []string - }{ - { - Name: "SimpleDeployment", - Objs: []runtime.Object{ - &extensions.Deployment{ // GET - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: extensions.DeploymentSpec{ - Replicas: 0, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, - }, - Status: extensions.DeploymentStatus{ - Replicas: 0, - }, - }, - }, - StopError: nil, - ExpectedActions: []string{"get:deployments", "update:deployments", - "get:deployments", "list:replicasets", "delete:deployments"}, - }, - { - Name: "Deployment with single replicaset", - Objs: []runtime.Object{ - &deployment, // GET - &extensions.ReplicaSetList{ // LIST - Items: []extensions.ReplicaSet{ - // ReplicaSet owned by this Deployment. - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - Labels: map[string]string{"k1": "v1"}, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: extensions.SchemeGroupVersion.String(), - Kind: "Deployment", - Name: deployment.Name, - UID: deployment.UID, - Controller: &trueVar, - }, - }, - }, - Spec: extensions.ReplicaSetSpec{}, - }, - // ReplicaSet owned by something else (should be ignored). - { - ObjectMeta: metav1.ObjectMeta{ - Name: "rs2", - Namespace: ns, - Labels: map[string]string{"k1": "v1"}, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: extensions.SchemeGroupVersion.String(), - Kind: "Deployment", - Name: "somethingelse", - UID: uuid.NewUUID(), - Controller: &trueVar, - }, - }, - }, - Spec: extensions.ReplicaSetSpec{}, - }, - }, - }, - }, - ScaledDown: true, - StopError: nil, - ExpectedActions: []string{"get:deployments", "update:deployments", - "get:deployments", "list:replicasets", "get:replicasets", - "delete:replicasets", "delete:deployments"}, - ScaleClientExpectedAction: []string{"get", "update", "get", "get"}, - }, - } - - for _, test := range tests { - scaleClient := createFakeScaleClient("deployments", "foo", 3, nil) - - fake := fake.NewSimpleClientset(test.Objs...) - reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond, scaleClient, schema.GroupResource{Group: "extensions", Resource: "deployments"}} - err := reaper.Stop(ns, name, 0, nil) - if !reflect.DeepEqual(err, test.StopError) { - t.Errorf("%s unexpected error: %v", test.Name, err) - continue - } - - actions := fake.Actions() - if len(actions) != len(test.ExpectedActions) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions)) - continue - } - for i, expAction := range test.ExpectedActions { - action := strings.Split(expAction, ":") - if actions[i].GetVerb() != action[0] { - t.Errorf("%s unexpected verb: %+v, expected %s", test.Name, actions[i], expAction) - } - if actions[i].GetResource().Resource != action[1] { - t.Errorf("%s unexpected resource: %+v, expected %s", test.Name, actions[i], expAction) - } - if len(action) == 3 && actions[i].GetSubresource() != action[2] { - t.Errorf("%s unexpected subresource: %+v, expected %s", test.Name, actions[i], expAction) - } - } - if test.ScaledDown { - scale, err := scaleClient.Scales(ns).Get(schema.GroupResource{Group: "extensions", Resource: "replicaset"}, name) - if err != nil { - t.Error(err) - } - if scale.Spec.Replicas != 0 { - t.Errorf("a scale subresource has unexpected number of replicas, got %d expected 0", scale.Spec.Replicas) - } - actions := scaleClient.Actions() - if len(actions) != len(test.ScaleClientExpectedAction) { - t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ScaleClientExpectedAction), len(actions)) - } - for i, verb := range test.ScaleClientExpectedAction { - if actions[i].GetVerb() != verb { - t.Errorf("%s unexpected action: %+v, expected %s", test.Name, actions[i].GetVerb(), verb) - } - } - } - } -} - -type noSuchPod struct { - coreclient.PodInterface -} - -func (c *noSuchPod) Get(name string, options metav1.GetOptions) (*api.Pod, error) { - return nil, fmt.Errorf("%s does not exist", name) -} - -type noDeletePod struct { - coreclient.PodInterface -} - -func (c *noDeletePod) Delete(name string, o *metav1.DeleteOptions) error { - return fmt.Errorf("I'm afraid I can't do that, Dave") -} - -type reaperFake struct { - *fake.Clientset - noSuchPod, noDeletePod bool -} - -func (c *reaperFake) Core() coreclient.CoreInterface { - return &reaperCoreFake{c.Clientset.Core(), c.noSuchPod, c.noDeletePod} -} - -type reaperCoreFake struct { - coreclient.CoreInterface - noSuchPod, noDeletePod bool -} - -func (c *reaperCoreFake) Pods(namespace string) coreclient.PodInterface { - pods := c.CoreInterface.Pods(namespace) - if c.noSuchPod { - return &noSuchPod{pods} - } - if c.noDeletePod { - return &noDeletePod{pods} - } - return pods -} - -func newPod() *api.Pod { - return &api.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "foo"}} -} - -func TestSimpleStop(t *testing.T) { - tests := []struct { - fake *reaperFake - kind schema.GroupKind - actions []testcore.Action - expectError bool - test string - }{ - { - fake: &reaperFake{ - Clientset: fake.NewSimpleClientset(newPod()), - }, - kind: api.Kind("Pod"), - actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("pods").WithVersion(""), metav1.NamespaceDefault, "foo"), - testcore.NewDeleteAction(api.Resource("pods").WithVersion(""), metav1.NamespaceDefault, "foo"), - }, - expectError: false, - test: "stop pod succeeds", - }, - { - fake: &reaperFake{ - Clientset: fake.NewSimpleClientset(), - noSuchPod: true, - }, - kind: api.Kind("Pod"), - actions: []testcore.Action{}, - expectError: true, - test: "stop pod fails, no pod", - }, - { - fake: &reaperFake{ - Clientset: fake.NewSimpleClientset(newPod()), - noDeletePod: true, - }, - kind: api.Kind("Pod"), - actions: []testcore.Action{ - testcore.NewGetAction(api.Resource("pods").WithVersion(""), metav1.NamespaceDefault, "foo"), - }, - expectError: true, - test: "stop pod fails, can't delete", - }, - } - for _, test := range tests { - fake := test.fake - reaper, err := ReaperFor(test.kind, fake, nil) - if err != nil { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - err = reaper.Stop("default", "foo", 0, nil) - if err != nil && !test.expectError { - t.Errorf("unexpected error: %v (%s)", err, test.test) - } - if err == nil { - if test.expectError { - t.Errorf("unexpected non-error: %v (%s)", err, test.test) - } - } - actions := fake.Actions() - if len(test.actions) != len(actions) { - t.Errorf("unexpected actions: %v; expected %v (%s)", actions, test.actions, test.test) - } - for i, action := range actions { - testAction := test.actions[i] - if action != testAction { - t.Errorf("unexpected action: %#v; expected %v (%s)", action, testAction, test.test) - } - } - } -} - -func TestDeploymentNotFoundError(t *testing.T) { - name := "foo" - ns := "default" - deployment := &extensions.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: extensions.DeploymentSpec{ - Replicas: 0, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, - }, - Status: extensions.DeploymentStatus{ - Replicas: 0, - }, - } - - fake := fake.NewSimpleClientset( - deployment, - &extensions.ReplicaSetList{Items: []extensions.ReplicaSet{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: extensions.ReplicaSetSpec{}, - }, - }, - }, - ) - fake.AddReactor("get", "replicasets", func(action testcore.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(api.Resource("replicaset"), "doesn't-matter") - }) - - reaper := DeploymentReaper{fake.Extensions(), fake.Extensions(), time.Millisecond, time.Millisecond, nil, schema.GroupResource{}} - if err := reaper.Stop(ns, name, 0, nil); err != nil { - t.Fatalf("unexpected error: %#v", err) - } -} - -func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*kerrors.StatusError) *fakescale.FakeScaleClient { - shouldReturnAnError := func(verb string) (*kerrors.StatusError, bool) { - if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists { - return anError, true - } - return &kerrors.StatusError{}, false - } - newReplicas := int32(replicas) - scaleClient := &fakescale.FakeScaleClient{} - scaleClient.AddReactor("get", resource, func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { - action := rawAction.(testcore.GetAction) - if action.GetName() != resourceName { - return true, nil, fmt.Errorf("expected = %s, got = %s", resourceName, action.GetName()) - } - if anError, should := shouldReturnAnError("get"); should { - return true, nil, anError - } - obj := &autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: action.GetName(), - Namespace: action.GetNamespace(), - }, - Spec: autoscalingv1.ScaleSpec{ - Replicas: newReplicas, - }, - } - return true, obj, nil - }) - scaleClient.AddReactor("update", resource, func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { - action := rawAction.(testcore.UpdateAction) - obj := action.GetObject().(*autoscalingv1.Scale) - if obj.Name != resourceName { - return true, nil, fmt.Errorf("expected = %s, got = %s", resourceName, obj.Name) - } - if anError, should := shouldReturnAnError("update"); should { - return true, nil, anError - } - newReplicas = obj.Spec.Replicas - return true, &autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: obj.Name, - Namespace: action.GetNamespace(), - }, - Spec: autoscalingv1.ScaleSpec{ - Replicas: newReplicas, - }, - }, nil - }) - return scaleClient -} diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index e6a5106d56c..2e79ce7daa3 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -21,10 +21,14 @@ import ( "testing" "time" + autoscalingv1 "k8s.io/api/autoscaling/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/scale" + fakescale "k8s.io/client-go/scale/fake" + testcore "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" ) @@ -626,3 +630,54 @@ func TestGenericScale(t *testing.T) { }) } } + +func createFakeScaleClient(resource string, resourceName string, replicas int, errorsOnVerb map[string]*kerrors.StatusError) *fakescale.FakeScaleClient { + shouldReturnAnError := func(verb string) (*kerrors.StatusError, bool) { + if anError, anErrorExists := errorsOnVerb[verb]; anErrorExists { + return anError, true + } + return &kerrors.StatusError{}, false + } + newReplicas := int32(replicas) + scaleClient := &fakescale.FakeScaleClient{} + scaleClient.AddReactor("get", resource, func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { + action := rawAction.(testcore.GetAction) + if action.GetName() != resourceName { + return true, nil, fmt.Errorf("expected = %s, got = %s", resourceName, action.GetName()) + } + if anError, should := shouldReturnAnError("get"); should { + return true, nil, anError + } + obj := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: action.GetName(), + Namespace: action.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: newReplicas, + }, + } + return true, obj, nil + }) + scaleClient.AddReactor("update", resource, func(rawAction testcore.Action) (handled bool, ret runtime.Object, err error) { + action := rawAction.(testcore.UpdateAction) + obj := action.GetObject().(*autoscalingv1.Scale) + if obj.Name != resourceName { + return true, nil, fmt.Errorf("expected = %s, got = %s", resourceName, obj.Name) + } + if anError, should := shouldReturnAnError("update"); should { + return true, nil, anError + } + newReplicas = obj.Spec.Replicas + return true, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: obj.Name, + Namespace: action.GetNamespace(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: newReplicas, + }, + }, nil + }) + return scaleClient +} diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 593b05aae77..a0cf78546b2 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -29,14 +29,12 @@ go_library( "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/controller/daemon:go_default_library", "//pkg/controller/deployment/util:go_default_library", "//pkg/controller/job:go_default_library", "//pkg/controller/nodelifecycle:go_default_library", "//pkg/controller/replicaset:go_default_library", "//pkg/controller/replication:go_default_library", - "//pkg/kubectl:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/scheduler/schedulercache:go_default_library", "//pkg/util/pointer:go_default_library", @@ -67,7 +65,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/scale:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index 7a31d5c0234..00c7e81d625 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" batchinternal "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/controller/job" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/test/e2e/framework" ) @@ -207,11 +206,7 @@ var _ = SIGDescribe("CronJob", func() { By("Deleting the job") job := cronJob.Status.Active[0] - reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset, f.ScalesGetter) - Expect(err).NotTo(HaveOccurred()) - timeout := 1 * time.Minute - err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) By("Ensuring job was deleted") _, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index b77cc0ddcdf..c58b95e600a 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -36,7 +36,6 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller/daemon" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/scheduler/schedulercache" "k8s.io/kubernetes/test/e2e/framework" @@ -69,11 +68,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets") if daemonsets != nil && len(daemonsets.Items) > 0 { for _, ds := range daemonsets.Items { - By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name)) - dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset, f.ScalesGetter) - Expect(err).NotTo(HaveOccurred()) - err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil) - Expect(err).NotTo(HaveOccurred()) + By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name)) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds)) Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped") } diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 8ffd6bf74a0..d8e89d5cb38 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -36,11 +36,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" - scaleclient "k8s.io/client-go/scale" appsinternal "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/kubernetes/pkg/kubectl" utilpointer "k8s.io/kubernetes/pkg/util/pointer" "k8s.io/kubernetes/test/e2e/framework" testutil "k8s.io/kubernetes/test/utils" @@ -160,17 +157,12 @@ func newDeploymentRollback(name string, annotations map[string]string, revision } } -func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, deploymentName string) { +func stopDeployment(c clientset.Interface, ns, deploymentName string) { deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) framework.Logf("Deleting deployment %s", deploymentName) - reaper, err := kubectl.ReaperFor(appsinternal.Kind("Deployment"), internalClient, scaleClient) - Expect(err).NotTo(HaveOccurred()) - timeout := 1 * time.Minute - - err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)) framework.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) @@ -203,7 +195,6 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte func testDeleteDeployment(f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet - internalClient := f.InternalClientset deploymentName := "test-new-deployment" podLabels := map[string]string{"name": NginxImageName} @@ -226,7 +217,7 @@ func testDeleteDeployment(f *framework.Framework) { newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1()) Expect(err).NotTo(HaveOccurred()) Expect(newRS).NotTo(Equal(nilRs)) - stopDeployment(c, internalClient, f.ScalesGetter, ns, deploymentName) + stopDeployment(c, ns, deploymentName) } func testRollingUpdateDeployment(f *framework.Framework) { diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 8543c29d599..d10a444f07d 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchinternal "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" @@ -111,11 +110,7 @@ var _ = SIGDescribe("Job", func() { Expect(err).NotTo(HaveOccurred()) By("delete a job") - reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset, f.ScalesGetter) - Expect(err).NotTo(HaveOccurred()) - timeout := 1 * time.Minute - err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name)) By("Ensuring job was deleted") _, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 5ebcb978367..f726f2ddf4a 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -348,7 +348,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun timeToWait := 5 * time.Minute podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait) framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, podsConfig.Name) + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name) // Ensure that no new nodes have been added so far. Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount)) @@ -418,7 +418,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC } timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes)) return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, config.extraPods.Name) + return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name) } } @@ -501,7 +501,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p err := framework.RunRC(*config) framework.ExpectNoError(err) return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id) + return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) } } @@ -541,7 +541,7 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist framework.ExpectNoError(framework.RunRC(*rcConfig)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id) + return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) } } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 139b0c8aa81..51015b95239 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -169,7 +169,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { By("Creating unschedulable pod") ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") By("Waiting for scale up hoping it won't happen") // Verify that the appropriate event was generated @@ -196,7 +196,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { simpleScaleUpTest := func(unready int) { ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, @@ -269,7 +269,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer disableAutoscaler(gpuPoolName, 0, 1) Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc") + framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size == nodeCount }, scaleDownTimeout)) @@ -319,7 +319,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Schedule more pods than can fit and wait for cluster to scale-up") ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { return s.status == caOngoingScaleUpStatus @@ -362,7 +362,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Reserving 0.1x more memory than the cluster holds to trigger scale up") totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout) // Verify, that cluster size is increased @@ -386,7 +386,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "host-port") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) @@ -401,12 +401,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } By("starting a pod with anti-affinity on each node") framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "some-pod") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) By("scheduling extra pods with anti-affinity to existing ones") framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "extra-pod") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) @@ -420,14 +420,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { "anti-affinity": "yes", } framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "some-pod") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") By("waiting for all pods before triggering scale up") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) By("creating a pod requesting EmptyDir") framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "extra-pod") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod") framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) @@ -484,7 +484,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) defer func() { - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "some-pod") + framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") glog.Infof("RC and pods not using volume deleted") }() @@ -497,7 +497,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { volumes := buildVolumes(pv, pvc) framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes)) defer func() { - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, pvcPodName) + framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }() @@ -602,7 +602,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer removeLabels(registeredNodes) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "node-selector")) + framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector")) }) It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { @@ -620,7 +620,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { extraPods := extraNodes + 1 totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb)) By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout) // Apparently GKE master is restarted couple minutes after the node pool is added @@ -759,7 +759,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { By("Run a scale-up test") ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, @@ -872,7 +872,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction) } else { ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, "memory-reservation") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") time.Sleep(scaleUpTimeout) currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) @@ -1076,7 +1076,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str labelMap := map[string]string{"test_id": testID} framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, namespace, "reschedulable-pods") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods") By("Create a PodDisruptionBudget") minAvailable := intstr.FromInt(numPods - pdbSize) @@ -1523,7 +1523,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e framework.ExpectNoError(err) } return func() error { - return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, id) + return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) } } framework.Failf("Failed to reserve memory within timeout") @@ -1929,7 +1929,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa func runReplicatedPodOnEachNodeWithCleanup(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) (func(), error) { err := runReplicatedPodOnEachNode(f, nodes, namespace, podsPerNode, id, labels, memRequest) return func() { - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, namespace, id) + framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, id) }, err } diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 2b467946dbc..edd8a5370d4 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -414,9 +414,9 @@ func (rc *ResourceConsumer) CleanUp() { // Wait some time to ensure all child goroutines are finished. time.Sleep(10 * time.Second) kind := rc.kind.GroupKind() - framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, rc.scaleClient, kind, rc.nsName, rc.name)) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil)) - framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, rc.scaleClient, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil)) } diff --git a/test/e2e/framework/jobs_util.go b/test/e2e/framework/jobs_util.go index 42d475df537..c5c97f1c772 100644 --- a/test/e2e/framework/jobs_util.go +++ b/test/e2e/framework/jobs_util.go @@ -210,6 +210,17 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D }) } +// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed. +func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error { + return wait.Poll(Poll, timeout, func() (bool, error) { + _, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return true, nil + } + return false, err + }) +} + // CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not // nil the returned bool is true if the Job is running. func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) { diff --git a/test/e2e/framework/rc_util.go b/test/e2e/framework/rc_util.go index d05e27ba253..ff2827e35c0 100644 --- a/test/e2e/framework/rc_util.go +++ b/test/e2e/framework/rc_util.go @@ -31,7 +31,6 @@ import ( clientset "k8s.io/client-go/kubernetes" scaleclient "k8s.io/client-go/scale" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" testutils "k8s.io/kubernetes/test/utils" ) @@ -153,10 +152,6 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name) } -func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, name string) error { - return DeleteResourceAndPods(clientset, internalClientset, scaleClient, api.Kind("ReplicationController"), ns, name) -} - func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers")) } diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 1748c3f881b..012ce7630c4 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -47,7 +47,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - scaleclient "k8s.io/client-go/scale" ) const ( @@ -1261,8 +1260,8 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli return podNames, serviceIP, nil } -func StopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, ns, name string) error { - if err := DeleteRCAndPods(clientset, internalClientset, scaleClient, ns, name); err != nil { +func StopServeHostnameService(clientset clientset.Interface, ns, name string) error { + if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil { return err } if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index e3158e8ca4f..cb7109f6acc 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -109,8 +109,6 @@ type TestContextType struct { DisableLogDump bool // Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty. LogexporterGCSPath string - // If the garbage collector is enabled in the kube-apiserver and kube-controller-manager. - GarbageCollectorEnabled bool // featureGates is a map of feature names to bools that enable or disable alpha/experimental features. FeatureGates map[string]bool // Node e2e specific test context @@ -276,7 +274,6 @@ func RegisterClusterFlags() { flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.") flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.") flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.") - flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.") } // Register flags specific to the node e2e test suite. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 618a1c6520a..15a1e64a2bd 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3042,50 +3042,6 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) { } } -// DeleteResourceAndPods deletes a given resource and all pods it spawned -func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, scaleClient scaleclient.ScalesGetter, kind schema.GroupKind, ns, name string) error { - By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns)) - - rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name) - if err != nil { - if apierrs.IsNotFound(err) { - Logf("%v %s not found: %v", kind, name, err) - return nil - } - return err - } - selector, err := getSelectorFromRuntimeObject(rtObject) - if err != nil { - return err - } - ps, err := testutils.NewPodStore(clientset, ns, selector, fields.Everything()) - if err != nil { - return err - } - defer ps.Stop() - startTime := time.Now() - if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil { - return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err) - } - deleteTime := time.Since(startTime) - Logf("Deleting %v %s took: %v", kind, name, deleteTime) - err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute) - if err != nil { - return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) - } - terminatePodTime := time.Since(startTime) - deleteTime - Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime) - // this is to relieve namespace controller's pressure when deleting the - // namespace after a test. - err = waitForPodsGone(ps, 100*time.Millisecond, 10*time.Minute) - if err != nil { - return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) - } - gcPodTime := time.Since(startTime) - terminatePodTime - Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime) - return nil -} - // DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods. func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error { By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns)) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index b992ba78de1..254391188d5 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -1545,8 +1545,11 @@ metadata: Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) + err := framework.WaitForJobGone(c, ns, jobName, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + By("verifying the job " + jobName + " was deleted") - _, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + _, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) Expect(err).To(HaveOccurred()) Expect(apierrs.IsNotFound(err)).To(BeTrue()) }) diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index b6be82183d9..33de281c543 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -161,7 +161,7 @@ var _ = SIGDescribe("Proxy", func() { CreatedPods: &pods, } Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, cfg.Name) + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name) Expect(framework.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred()) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index ffbfe670a1d..09746b44072 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -339,7 +339,7 @@ var _ = SIGDescribe("Services", func() { // Stop service 1 and make sure it is gone. By("stopping service1") - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "service1")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) By("verifying service1 is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) @@ -373,13 +373,13 @@ var _ = SIGDescribe("Services", func() { svc2 := "service2" defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, svc1)) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc1), ns, numPods) Expect(err).NotTo(HaveOccurred()) defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, svc2)) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService(svc2), ns, numPods) Expect(err).NotTo(HaveOccurred()) @@ -426,7 +426,7 @@ var _ = SIGDescribe("Services", func() { numPods, servicePort := 3, 80 defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "service1")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service1"), ns, numPods) Expect(err).NotTo(HaveOccurred()) @@ -453,7 +453,7 @@ var _ = SIGDescribe("Services", func() { // Create a new service and check if it's not reusing IP. defer func() { - framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "service2")) + framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2")) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, internalClientset, getServeHostnameService("service2"), ns, numPods) Expect(err).NotTo(HaveOccurred()) @@ -1753,7 +1753,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(HaveOccurred()) } - framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, namespace, serviceName)) + framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) } }) @@ -1968,7 +1968,7 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf _, _, err := framework.StartServeHostnameService(cs, f.InternalClientset, svc, ns, numPods) Expect(err).NotTo(HaveOccurred()) defer func() { - framework.StopServeHostnameService(cs, f.InternalClientset, f.ScalesGetter, ns, serviceName) + framework.StopServeHostnameService(cs, ns, serviceName) }() jig := framework.NewServiceTestJig(cs, serviceName) svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) @@ -2023,7 +2023,7 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) defer func() { - framework.StopServeHostnameService(cs, f.InternalClientset, f.ScalesGetter, ns, serviceName) + framework.StopServeHostnameService(cs, ns, serviceName) lb := cloudprovider.GetLoadBalancerName(svc) framework.Logf("cleaning gce resource for %s", lb) framework.CleanupServiceGCEResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone) diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index fb799e42eb8..1953681a6e5 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -334,7 +334,7 @@ var _ = SIGDescribe("kubelet", func() { } By("Deleting the RC") - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rcName) + framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) // Check that the pods really are gone by querying /runningpods on the // node. The /runningpods handler checks the container runtime (or its // cache) and returns a list of running pods. Some possible causes of diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index cef6610ff84..029155da6c5 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -118,7 +118,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames verifyCPULimits(expectedCPU, cpuSummary) By("Deleting the RC") - framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rcName) + framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) } func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 3476d071512..58e792b09fb 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -116,10 +116,8 @@ func (dtc *DensityTestConfig) deleteConfigMaps(testPhase *timer.Phase) { func (dtc *DensityTestConfig) deleteDaemonSets(numberOfClients int, testPhase *timer.Phase) { defer testPhase.End() for i := range dtc.DaemonConfigs { - framework.ExpectNoError(framework.DeleteResourceAndPods( + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC( dtc.ClientSets[i%numberOfClients], - dtc.InternalClientsets[i%numberOfClients], - dtc.ScaleClients[i%numberOfClients], extensions.Kind("DaemonSet"), dtc.DaemonConfigs[i].Namespace, dtc.DaemonConfigs[i].Name, @@ -320,15 +318,9 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha name := dtc.Configs[i].GetName() namespace := dtc.Configs[i].GetNamespace() kind := dtc.Configs[i].GetKind() - if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) { - By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind)) - err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name) - framework.ExpectNoError(err) - } else { - By(fmt.Sprintf("Cleaning up the %v and pods", kind)) - err := framework.DeleteResourceAndPods(dtc.ClientSets[i%numberOfClients], dtc.InternalClientsets[i%numberOfClients], dtc.ScaleClients[i%numberOfClients], kind, namespace, name) - framework.ExpectNoError(err) - } + By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind)) + err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name) + framework.ExpectNoError(err) } podCleanupPhase.End() @@ -922,7 +914,3 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController"))) framework.Logf("Found pod '%s' running", name) } - -func kindSupportsGarbageCollector(kind schema.GroupKind) bool { - return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job") -} diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 31442b94d88..5dadeed360f 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -286,10 +286,8 @@ var _ = SIGDescribe("Load capacity", func() { } daemonConfig.Run() defer func(config *testutils.DaemonConfig) { - framework.ExpectNoError(framework.DeleteResourceAndPods( + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC( f.ClientSet, - f.InternalClientset, - f.ScalesGetter, extensions.Kind("DaemonSet"), config.Namespace, config.Name, @@ -694,15 +692,9 @@ func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deleti defer wg.Done() sleepUpTo(deletingTime) - if framework.TestContext.GarbageCollectorEnabled && config.GetKind() != extensions.Kind("Deployment") { - framework.ExpectNoError(framework.DeleteResourceAndWaitForGC( - config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()), - fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) - } else { - framework.ExpectNoError(framework.DeleteResourceAndPods( - config.GetClient(), config.GetInternalClient(), config.GetScalesGetter(), config.GetKind(), config.GetNamespace(), config.GetName()), - fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) - } + framework.ExpectNoError(framework.DeleteResourceAndWaitForGC( + config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()), + fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName())) } func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string, testPhase *timer.Phase) ([]*v1.Namespace, error) { diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index 7c7e9345780..d55cabc03b5 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false) return err }, ns, rcName, false) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName) + defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, rcName) // the first replica pod is scheduled, and the second pod will be rejected. verifyResult(cs, 1, 1, ns) }) @@ -141,7 +141,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { }, } rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, imageutils.GetPauseImageName()) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, affinityRCName) + defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, affinityRCName) // RC should be running successfully // TODO: WaitForSchedulerAfterAction() can on be used to wait for failure event, @@ -167,7 +167,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { It("validates pod anti-affinity works properly when new replica pod is scheduled", func() { By("Launching two pods on two distinct nodes to get two node names") CreateHostPortPods(f, "host-port", 2, true) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "host-port") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port") podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) Expect(len(podList.Items)).To(Equal(2)) @@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { } rc := getRCWithInterPodAffinityNodeSelector(labelRCName, labelsMap, replica, affinity, imageutils.GetPauseImageName(), map[string]string{k: v}) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, labelRCName) + defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, labelRCName) WaitForSchedulerAfterAction(f, func() error { _, err := cs.CoreV1().ReplicationControllers(ns).Create(rc) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index db1049897fa..bd784f506c8 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -71,7 +71,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{}) if err == nil && *(rc.Spec.Replicas) != 0 { By("Cleaning up the replication controller") - err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, RCName) + err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName) framework.ExpectNoError(err) } }) diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 7a552adbedc..484745460d4 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -153,7 +153,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rc.Name); err != nil { + if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err) } }() diff --git a/test/e2e/scheduling/rescheduler.go b/test/e2e/scheduling/rescheduler.go index 5452f119244..8595b97606a 100644 --- a/test/e2e/scheduling/rescheduler.go +++ b/test/e2e/scheduling/rescheduler.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("Rescheduler [Serial]", func() { It("should ensure that critical pod is scheduled in case there is no resources available", func() { By("reserving all available cpu") err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "reserve-all-cpu") + defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "reserve-all-cpu") framework.ExpectNoError(err) By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled") diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 329871ff4c5..2834e3925b5 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -224,7 +224,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. - if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, controller.Name); err != nil { + if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 0e6d8a6dc61..d6bfca35d51 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -374,7 +374,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume Expect(err).NotTo(HaveOccurred(), "error creating replication controller") defer func() { - err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.ScalesGetter, f.Namespace.Name, rcName) + err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) framework.ExpectNoError(err) }() diff --git a/test/utils/delete_resources.go b/test/utils/delete_resources.go index acc42c3a3c2..5c67afda201 100644 --- a/test/utils/delete_resources.go +++ b/test/utils/delete_resources.go @@ -25,13 +25,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" - scaleclient "k8s.io/client-go/scale" appsinternal "k8s.io/kubernetes/pkg/apis/apps" batchinternal "k8s.io/kubernetes/pkg/apis/batch" api "k8s.io/kubernetes/pkg/apis/core" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubectl" ) func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions) error { @@ -72,21 +69,3 @@ func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, nam } return RetryWithExponentialBackOff(deleteFunc) } - -func DeleteResourceUsingReaperWithRetries(c internalclientset.Interface, kind schema.GroupKind, namespace, name string, options *metav1.DeleteOptions, scaleClient scaleclient.ScalesGetter) error { - reaper, err := kubectl.ReaperFor(kind, c, scaleClient) - if err != nil { - return err - } - deleteFunc := func() (bool, error) { - err := reaper.Stop(namespace, name, 0, options) - if err == nil || apierrs.IsNotFound(err) { - return true, nil - } - if IsRetryableAPIError(err) { - return false, nil - } - return false, fmt.Errorf("Failed to delete object with non-retriable error: %v", err) - } - return RetryWithExponentialBackOff(deleteFunc) -} From b8b4c7c81bfbdcdc9637f27f15ea16dce96636ae Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 25 May 2018 16:11:40 +0200 Subject: [PATCH 204/307] Handle DaemonSet removal the old way --- pkg/kubectl/cmd/BUILD | 2 ++ pkg/kubectl/cmd/delete.go | 63 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 271c47b5cde..82074478ae1 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -105,6 +105,7 @@ go_library( "//vendor/github.com/renstrom/dedent:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/policy/v1beta1:go_default_library", @@ -126,6 +127,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 846f2e31761..2f2bd91508a 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -24,9 +24,15 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -287,6 +293,16 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { } func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav1.DeleteOptions) error { + // TODO: this should be removed as soon as DaemonSet controller properly handles object deletion + // see https://github.com/kubernetes/kubernetes/issues/64313 for details + mapping := info.ResourceMapping() + if mapping.Resource.GroupResource() == (schema.GroupResource{Group: "extensions", Resource: "daemonsets"}) || + mapping.Resource.GroupResource() == (schema.GroupResource{Group: "apps", Resource: "daemonsets"}) { + if err := updateDaemonSet(info.Namespace, info.Name, o.DynamicClient); err != nil { + return err + } + } + if err := resource.NewHelper(info.Client, info.Mapping).DeleteWithOptions(info.Namespace, info.Name, deleteOptions); err != nil { return cmdutil.AddSourceToErr("deleting", info.Source, err) } @@ -295,6 +311,53 @@ func (o *DeleteOptions) deleteResource(info *resource.Info, deleteOptions *metav return nil } +func updateDaemonSet(namespace, name string, dynamicClient dynamic.Interface) error { + dsClient := dynamicClient.Resource(schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}).Namespace(namespace) + obj, err := dsClient.Get(name, metav1.GetOptions{}) + if err != nil { + return err + } + ds := &appsv1.DaemonSet{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, ds); err != nil { + return err + } + + // We set the nodeSelector to a random label. This label is nearly guaranteed + // to not be set on any node so the DameonSetController will start deleting + // daemon pods. Once it's done deleting the daemon pods, it's safe to delete + // the DaemonSet. + ds.Spec.Template.Spec.NodeSelector = map[string]string{ + string(uuid.NewUUID()): string(uuid.NewUUID()), + } + // force update to avoid version conflict + ds.ResourceVersion = "" + + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ds) + if err != nil { + return err + } + if _, err = dsClient.Update(&unstructured.Unstructured{Object: out}); err != nil { + return err + } + + // Wait for the daemon set controller to kill all the daemon pods. + if err := wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) { + updatedObj, err := dsClient.Get(name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + updatedDS := &appsv1.DaemonSet{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updatedObj.Object, ds); err != nil { + return false, nil + } + + return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil + }); err != nil { + return err + } + return nil +} + // deletion printing is special because we do not have an object to print. // This mirrors name printer behavior func (o *DeleteOptions) PrintObj(info *resource.Info) { From 2a76535018046e5510d2dcb5279977766d626f83 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 25 May 2018 20:47:09 +0200 Subject: [PATCH 205/307] Remove initializer test Initializers are alpha, broken and a subject for removal. They don't work well with finalizers and the previous hack present in deployment and replicaset reapers was just hiding this problem. --- hack/make-rules/test-cmd-util.sh | 24 ------------------ .../testdata/deployment-with-initializer.yaml | 25 ------------------- .../testdata/replicaset-with-initializer.yaml | 23 ----------------- 3 files changed, 72 deletions(-) delete mode 100644 hack/testdata/deployment-with-initializer.yaml delete mode 100644 hack/testdata/replicaset-with-initializer.yaml diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 82cab8a7ba3..732bdb64d65 100755 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -3209,18 +3209,6 @@ run_deployment_tests() { kubectl delete configmap test-set-env-config "${kube_flags[@]}" kubectl delete secret test-set-env-secret "${kube_flags[@]}" - ### Delete a deployment with initializer - # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - # Create a deployment - kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true - kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web' - # Delete a deployment - kubectl delete deployment web "${kube_flags[@]}" - # Check Deployment web doesn't exist - output_message=$(! kubectl get deployment web 2>&1 "${kube_flags[@]}") - kube::test::if_has_string "${output_message}" '"web" not found' - set +o nounset set +o errexit } @@ -3362,18 +3350,6 @@ run_rs_tests() { # Post-condition: no replica set exists kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - ### Delete a rs with initializer - # Pre-condition: no rs exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - # Create a rs - kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true - kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx' - # Delete a rs - kubectl delete rs nginx "${kube_flags[@]}" - # check rs nginx doesn't exist - output_message=$(! kubectl get rs nginx 2>&1 "${kube_flags[@]}") - kube::test::if_has_string "${output_message}" '"nginx" not found' - if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then ### Auto scale replica set # Pre-condition: no replica set exists diff --git a/hack/testdata/deployment-with-initializer.yaml b/hack/testdata/deployment-with-initializer.yaml deleted file mode 100644 index 2fb81498cc5..00000000000 --- a/hack/testdata/deployment-with-initializer.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: web - labels: - run: web - initializers: - pending: - - name: podimage.initializer.com -spec: - replicas: 5 - selector: - matchLabels: - run: web - template: - metadata: - labels: - run: web - spec: - containers: - - image: nginx:1.10 - name: web - ports: - - containerPort: 80 - protocol: TCP diff --git a/hack/testdata/replicaset-with-initializer.yaml b/hack/testdata/replicaset-with-initializer.yaml deleted file mode 100644 index 8530faf90ff..00000000000 --- a/hack/testdata/replicaset-with-initializer.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: ReplicaSet -metadata: - name: nginx - initializers: - pending: - - name: podimage.initializer.com -spec: - replicas: 3 - selector: - matchLabels: - app: nginx - template: - metadata: - name: nginx - labels: - app: nginx - spec: - containers: - - name: nginx - image: nginx:1.10 - ports: - - containerPort: 80 From 069062365aa79880621058065c882ae0dbf90e0d Mon Sep 17 00:00:00 2001 From: Ashley Gau Date: Fri, 25 May 2018 14:19:00 -0700 Subject: [PATCH 206/307] use fakeGCECloud instead of gce address fakes --- .../providers/gce/cloud/mock/mock.go | 2 +- .../providers/gce/gce_address_manager_test.go | 67 ++--- .../providers/gce/gce_addresses_fakes.go | 239 ------------------ 3 files changed, 39 insertions(+), 269 deletions(-) delete mode 100644 pkg/cloudprovider/providers/gce/gce_addresses_fakes.go diff --git a/pkg/cloudprovider/providers/gce/cloud/mock/mock.go b/pkg/cloudprovider/providers/gce/cloud/mock/mock.go index f0e87cf0ce7..523691c635e 100644 --- a/pkg/cloudprovider/providers/gce/cloud/mock/mock.go +++ b/pkg/cloudprovider/providers/gce/cloud/mock/mock.go @@ -204,7 +204,7 @@ func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta. errorCode = http.StatusBadRequest } - return false, &googleapi.Error{Code: errorCode, Message: msg} + return true, &googleapi.Error{Code: errorCode, Message: msg} } } diff --git a/pkg/cloudprovider/providers/gce/gce_address_manager_test.go b/pkg/cloudprovider/providers/gce/gce_address_manager_test.go index 3c7d60dc564..3b7409c6c19 100644 --- a/pkg/cloudprovider/providers/gce/gce_address_manager_test.go +++ b/pkg/cloudprovider/providers/gce/gce_address_manager_test.go @@ -26,95 +26,104 @@ import ( ) const testSvcName = "my-service" -const testRegion = "us-central1" const testSubnet = "/projects/x/testRegions/us-central1/testSubnetworks/customsub" const testLBName = "a111111111111111" +var vals = DefaultTestClusterValues() + // TestAddressManagerNoRequestedIP tests the typical case of passing in no requested IP func TestAddressManagerNoRequestedIP(t *testing.T) { - svc := NewFakeCloudAddressService() + svc, err := fakeGCECloud(vals) + require.NoError(t, err) targetIP := "" - mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal) - testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal)) - testReleaseAddress(t, mgr, svc, testLBName, testRegion) + mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal) + testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal)) + testReleaseAddress(t, mgr, svc, testLBName, vals.Region) } // TestAddressManagerBasic tests the typical case of reserving and unreserving an address. func TestAddressManagerBasic(t *testing.T) { - svc := NewFakeCloudAddressService() + svc, err := fakeGCECloud(vals) + require.NoError(t, err) targetIP := "1.1.1.1" - mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal) - testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal)) - testReleaseAddress(t, mgr, svc, testLBName, testRegion) + mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal) + testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal)) + testReleaseAddress(t, mgr, svc, testLBName, vals.Region) } // TestAddressManagerOrphaned tests the case where the address exists with the IP being equal // to the requested address (forwarding rule or loadbalancer IP). func TestAddressManagerOrphaned(t *testing.T) { - svc := NewFakeCloudAddressService() + svc, err := fakeGCECloud(vals) + require.NoError(t, err) targetIP := "1.1.1.1" addr := &compute.Address{Name: testLBName, Address: targetIP, AddressType: string(cloud.SchemeInternal)} - err := svc.ReserveRegionAddress(addr, testRegion) + err = svc.ReserveRegionAddress(addr, vals.Region) require.NoError(t, err) - mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal) - testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal)) - testReleaseAddress(t, mgr, svc, testLBName, testRegion) + mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal) + testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal)) + testReleaseAddress(t, mgr, svc, testLBName, vals.Region) } // TestAddressManagerOutdatedOrphan tests the case where an address exists but points to // an IP other than the forwarding rule or loadbalancer IP. func TestAddressManagerOutdatedOrphan(t *testing.T) { - svc := NewFakeCloudAddressService() + svc, err := fakeGCECloud(vals) + require.NoError(t, err) previousAddress := "1.1.0.0" targetIP := "1.1.1.1" addr := &compute.Address{Name: testLBName, Address: previousAddress, AddressType: string(cloud.SchemeExternal)} - err := svc.ReserveRegionAddress(addr, testRegion) + err = svc.ReserveRegionAddress(addr, vals.Region) require.NoError(t, err) - mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal) - testHoldAddress(t, mgr, svc, testLBName, testRegion, targetIP, string(cloud.SchemeInternal)) - testReleaseAddress(t, mgr, svc, testLBName, testRegion) + mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal) + testHoldAddress(t, mgr, svc, testLBName, vals.Region, targetIP, string(cloud.SchemeInternal)) + testReleaseAddress(t, mgr, svc, testLBName, vals.Region) } // TestAddressManagerExternallyOwned tests the case where the address exists but isn't // owned by the controller. func TestAddressManagerExternallyOwned(t *testing.T) { - svc := NewFakeCloudAddressService() + svc, err := fakeGCECloud(vals) + require.NoError(t, err) targetIP := "1.1.1.1" addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(cloud.SchemeInternal)} - err := svc.ReserveRegionAddress(addr, testRegion) + err = svc.ReserveRegionAddress(addr, vals.Region) require.NoError(t, err) - mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal) + mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal) ipToUse, err := mgr.HoldAddress() require.NoError(t, err) assert.NotEmpty(t, ipToUse) - _, err = svc.GetRegionAddress(testLBName, testRegion) + ad, err := svc.GetRegionAddress(testLBName, vals.Region) assert.True(t, isNotFound(err)) + require.Nil(t, ad) - testReleaseAddress(t, mgr, svc, testLBName, testRegion) + testReleaseAddress(t, mgr, svc, testLBName, vals.Region) } // TestAddressManagerExternallyOwned tests the case where the address exists but isn't // owned by the controller. However, this address has the wrong type. func TestAddressManagerBadExternallyOwned(t *testing.T) { - svc := NewFakeCloudAddressService() + svc, err := fakeGCECloud(vals) + require.NoError(t, err) targetIP := "1.1.1.1" addr := &compute.Address{Name: "my-important-address", Address: targetIP, AddressType: string(cloud.SchemeExternal)} - err := svc.ReserveRegionAddress(addr, testRegion) + err = svc.ReserveRegionAddress(addr, vals.Region) require.NoError(t, err) - mgr := newAddressManager(svc, testSvcName, testRegion, testSubnet, testLBName, targetIP, cloud.SchemeInternal) - _, err = mgr.HoldAddress() - assert.NotNil(t, err) + mgr := newAddressManager(svc, testSvcName, vals.Region, testSubnet, testLBName, targetIP, cloud.SchemeInternal) + ad, err := mgr.HoldAddress() + assert.NotNil(t, err) // FIXME + require.Equal(t, ad, "") } func testHoldAddress(t *testing.T, mgr *addressManager, svc CloudAddressService, name, region, targetIP, scheme string) { diff --git a/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go b/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go deleted file mode 100644 index 75dfa571c9c..00000000000 --- a/pkg/cloudprovider/providers/gce/gce_addresses_fakes.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gce - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - - computealpha "google.golang.org/api/compute/v0.alpha" - computebeta "google.golang.org/api/compute/v0.beta" - compute "google.golang.org/api/compute/v1" - - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" -) - -// test - -type FakeCloudAddressService struct { - count int - // reservedAddrs tracks usage of IP addresses - // Key is the IP address as a string - reservedAddrs map[string]bool - // addrsByRegionAndName - // Outer key is for region string; inner key is for address name. - addrsByRegionAndName map[string]map[string]*computealpha.Address -} - -// FakeCloudAddressService Implements CloudAddressService -var _ CloudAddressService = &FakeCloudAddressService{} - -func NewFakeCloudAddressService() *FakeCloudAddressService { - return &FakeCloudAddressService{ - reservedAddrs: make(map[string]bool), - addrsByRegionAndName: make(map[string]map[string]*computealpha.Address), - } -} - -// SetRegionalAddresses sets the addresses of there region. This is used for -// setting the test environment. -func (cas *FakeCloudAddressService) SetRegionalAddresses(region string, addrs []*computealpha.Address) { - // Reset addresses in the region. - cas.addrsByRegionAndName[region] = make(map[string]*computealpha.Address) - - for _, addr := range addrs { - cas.reservedAddrs[addr.Address] = true - cas.addrsByRegionAndName[region][addr.Name] = addr - } -} - -func (cas *FakeCloudAddressService) ReserveAlphaRegionAddress(addr *computealpha.Address, region string) error { - if addr.Address == "" { - addr.Address = fmt.Sprintf("1.2.3.%d", cas.count) - cas.count++ - } - - if addr.AddressType == "" { - addr.AddressType = string(cloud.SchemeExternal) - } - - if cas.reservedAddrs[addr.Address] { - msg := "IP in use" - // When the IP is already in use, this call returns an error code based - // on the type (internal vs external) of the address. This is to be - // consistent with actual GCE API. - switch cloud.LbScheme(addr.AddressType) { - case cloud.SchemeExternal: - return makeGoogleAPIError(http.StatusBadRequest, msg) - default: - return makeGoogleAPIError(http.StatusConflict, msg) - } - } - - if _, exists := cas.addrsByRegionAndName[region]; !exists { - cas.addrsByRegionAndName[region] = make(map[string]*computealpha.Address) - } - - if _, exists := cas.addrsByRegionAndName[region][addr.Name]; exists { - return makeGoogleAPIError(http.StatusConflict, "name in use") - } - - cas.addrsByRegionAndName[region][addr.Name] = addr - cas.reservedAddrs[addr.Address] = true - return nil -} - -func (cas *FakeCloudAddressService) ReserveBetaRegionAddress(addr *computebeta.Address, region string) error { - alphaAddr := convertToAlphaAddress(addr) - return cas.ReserveAlphaRegionAddress(alphaAddr, region) -} - -func (cas *FakeCloudAddressService) ReserveRegionAddress(addr *compute.Address, region string) error { - alphaAddr := convertToAlphaAddress(addr) - return cas.ReserveAlphaRegionAddress(alphaAddr, region) -} - -func (cas *FakeCloudAddressService) GetAlphaRegionAddress(name, region string) (*computealpha.Address, error) { - if _, exists := cas.addrsByRegionAndName[region]; !exists { - return nil, makeGoogleAPINotFoundError("") - } - - if addr, exists := cas.addrsByRegionAndName[region][name]; !exists { - return nil, makeGoogleAPINotFoundError("") - } else { - return addr, nil - } -} - -func (cas *FakeCloudAddressService) GetBetaRegionAddress(name, region string) (*computebeta.Address, error) { - addr, err := cas.GetAlphaRegionAddress(name, region) - if addr != nil { - return convertToBetaAddress(addr), err - } - return nil, err -} - -func (cas *FakeCloudAddressService) GetRegionAddress(name, region string) (*compute.Address, error) { - addr, err := cas.GetAlphaRegionAddress(name, region) - if addr != nil { - return convertToV1Address(addr), err - } - return nil, err -} - -func (cas *FakeCloudAddressService) DeleteRegionAddress(name, region string) error { - if _, exists := cas.addrsByRegionAndName[region]; !exists { - return makeGoogleAPINotFoundError("") - } - - addr, exists := cas.addrsByRegionAndName[region][name] - if !exists { - return makeGoogleAPINotFoundError("") - } - - delete(cas.reservedAddrs, addr.Address) - delete(cas.addrsByRegionAndName[region], name) - return nil -} - -func (cas *FakeCloudAddressService) GetAlphaRegionAddressByIP(region, ipAddress string) (*computealpha.Address, error) { - if _, exists := cas.addrsByRegionAndName[region]; !exists { - return nil, makeGoogleAPINotFoundError("") - } - - for _, addr := range cas.addrsByRegionAndName[region] { - if addr.Address == ipAddress { - return addr, nil - } - } - return nil, makeGoogleAPINotFoundError("") -} - -func (cas *FakeCloudAddressService) GetBetaRegionAddressByIP(name, region string) (*computebeta.Address, error) { - addr, err := cas.GetAlphaRegionAddressByIP(name, region) - if addr != nil { - return convertToBetaAddress(addr), nil - } - return nil, err -} - -func (cas *FakeCloudAddressService) GetRegionAddressByIP(name, region string) (*compute.Address, error) { - addr, err := cas.GetAlphaRegionAddressByIP(name, region) - if addr != nil { - return convertToV1Address(addr), nil - } - return nil, err -} - -func (cas *FakeCloudAddressService) getNetworkTierFromAddress(name, region string) (string, error) { - addr, err := cas.GetAlphaRegionAddress(name, region) - if err != nil { - return "", err - } - return addr.NetworkTier, nil -} - -func convertToV1Address(object gceObject) *compute.Address { - enc, err := object.MarshalJSON() - if err != nil { - panic(fmt.Sprintf("Failed to encode to json: %v", err)) - } - var addr compute.Address - if err := json.Unmarshal(enc, &addr); err != nil { - panic(fmt.Sprintf("Failed to convert GCE apiObject %v to v1 address: %v", object, err)) - } - return &addr -} - -func convertToAlphaAddress(object gceObject) *computealpha.Address { - enc, err := object.MarshalJSON() - if err != nil { - panic(fmt.Sprintf("Failed to encode to json: %v", err)) - } - var addr computealpha.Address - if err := json.Unmarshal(enc, &addr); err != nil { - panic(fmt.Sprintf("Failed to convert GCE apiObject %v to alpha address: %v", object, err)) - } - // Set the default values for the Alpha fields. - addr.NetworkTier = cloud.NetworkTierDefault.ToGCEValue() - return &addr -} - -func convertToBetaAddress(object gceObject) *computebeta.Address { - enc, err := object.MarshalJSON() - if err != nil { - panic(fmt.Sprintf("Failed to encode to json: %v", err)) - } - var addr computebeta.Address - if err := json.Unmarshal(enc, &addr); err != nil { - panic(fmt.Sprintf("Failed to convert GCE apiObject %v to beta address: %v", object, err)) - } - return &addr -} - -func (cas *FakeCloudAddressService) String() string { - var b bytes.Buffer - for region, regAddresses := range cas.addrsByRegionAndName { - b.WriteString(fmt.Sprintf("%v:\n", region)) - for name, addr := range regAddresses { - b.WriteString(fmt.Sprintf(" %v: %v\n", name, addr.Address)) - } - } - return b.String() -} From a96c5f2884739176a545313f670cfb6d57ff1e24 Mon Sep 17 00:00:00 2001 From: Ashley Gau Date: Fri, 25 May 2018 14:33:33 -0700 Subject: [PATCH 207/307] mocks must return true in order to trigger err --- .../providers/gce/cloud/mock/mock.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/mock/mock.go b/pkg/cloudprovider/providers/gce/cloud/mock/mock.go index 523691c635e..6a28c9f8e4b 100644 --- a/pkg/cloudprovider/providers/gce/cloud/mock/mock.go +++ b/pkg/cloudprovider/providers/gce/cloud/mock/mock.go @@ -94,7 +94,7 @@ func RemoveInstanceHook(ctx context.Context, key *meta.Key, req *ga.TargetPoolsR func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules map[meta.Key]*cloud.MockForwardingRulesObj, version meta.Version, projectID string) (bool, error) { if !key.Valid() { - return false, fmt.Errorf("invalid GCE key (%+v)", key) + return true, fmt.Errorf("invalid GCE key (%+v)", key) } if _, ok := mRules[*key]; ok { @@ -102,16 +102,16 @@ func convertAndInsertAlphaForwardingRule(key *meta.Key, obj gceObject, mRules ma Code: http.StatusConflict, Message: fmt.Sprintf("MockForwardingRule %v exists", key), } - return false, err + return true, err } enc, err := obj.MarshalJSON() if err != nil { - return false, err + return true, err } var fwdRule alpha.ForwardingRule if err := json.Unmarshal(enc, &fwdRule); err != nil { - return false, err + return true, err } // Set the default values for the Alpha fields. if fwdRule.NetworkTier == "" { @@ -162,7 +162,7 @@ type AddressAttributes struct { func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta.Key]*cloud.MockAddressesObj, version meta.Version, projectID string, addressAttrs AddressAttributes) (bool, error) { if !key.Valid() { - return false, fmt.Errorf("invalid GCE key (%+v)", key) + return true, fmt.Errorf("invalid GCE key (%+v)", key) } if _, ok := mAddrs[*key]; ok { @@ -170,16 +170,16 @@ func convertAndInsertAlphaAddress(key *meta.Key, obj gceObject, mAddrs map[meta. Code: http.StatusConflict, Message: fmt.Sprintf("MockAddresses %v exists", key), } - return false, err + return true, err } enc, err := obj.MarshalJSON() if err != nil { - return false, err + return true, err } var addr alpha.Address if err := json.Unmarshal(enc, &addr); err != nil { - return false, err + return true, err } // Set default address type if not present. From ffeca161018fd6218532786876070a5fcfe96542 Mon Sep 17 00:00:00 2001 From: liz Date: Fri, 25 May 2018 17:48:17 -0400 Subject: [PATCH 208/307] Remove some unnecessarily gendered pronouns in comments --- cmd/kubeadm/app/cmd/reset.go | 2 +- pkg/scheduler/algorithmprovider/defaults/defaults.go | 2 +- pkg/scheduler/core/extender_test.go | 2 +- staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index 81d2cc83cc0..33754e51f4c 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -156,7 +156,7 @@ func (r *Reset) Run(out io.Writer) error { dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d", "/var/lib/dockershim", "/var/run/kubernetes"} // Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user - // provided external etcd endpoints. In that case, it is his own responsibility to reset etcd + // provided external etcd endpoints. In that case, it is their own responsibility to reset etcd etcdManifestPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, "etcd.yaml") glog.V(1).Infof("[reset] checking for etcd manifest") if _, err := os.Stat(etcdManifestPath); err == nil { diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go index 267e5d6ba5f..2995aa9dd28 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -56,7 +56,7 @@ func init() { // For example: // https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422 - // Registers predicates and priorities that are not enabled by default, but user can pick when creating his + // Registers predicates and priorities that are not enabled by default, but user can pick when creating their // own set of priorities/predicates. // PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding. diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index b0e7489312d..31bd82017b8 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -135,7 +135,7 @@ func (f *FakeExtender) ProcessPreemption( nodeToVictimsCopy := map[*v1.Node]*schedulerapi.Victims{} // We don't want to change the original nodeToVictims for k, v := range nodeToVictims { - // In real world implementation, extender's user should have his own way to get node object + // In real world implementation, extender's user should have their own way to get node object // by name if needed (e.g. query kube-apiserver etc). // // For test purpose, we just use node from parameters directly. diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 56905c36277..78700c33a88 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -167,7 +167,7 @@ func WithMaxInFlightLimit( metrics.DroppedRequests.WithLabelValues(metrics.ReadOnlyKind).Inc() } // at this point we're about to return a 429, BUT not all actors should be rate limited. A system:master is so powerful - // that he should always get an answer. It's a super-admin or a loopback connection. + // that they should always get an answer. It's a super-admin or a loopback connection. if currUser, ok := apirequest.UserFrom(ctx); ok { for _, group := range currUser.GetGroups() { if group == user.SystemPrivilegedGroup { From cf393d7a7bc68338a072cc93353c356ed7462966 Mon Sep 17 00:00:00 2001 From: Ashley Gau Date: Fri, 25 May 2018 14:39:27 -0700 Subject: [PATCH 209/307] remove gce_address_fakes.go from BUILD file --- pkg/cloudprovider/providers/gce/BUILD | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 86b2c9e6c67..0185082ebca 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -13,7 +13,6 @@ go_library( "gce.go", "gce_address_manager.go", "gce_addresses.go", - "gce_addresses_fakes.go", "gce_alpha.go", "gce_annotations.go", "gce_backendservice.go", From 6e0badc0d1d8ef4f52778a1f9c4505754fc24c12 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Fri, 25 May 2018 14:46:05 -0700 Subject: [PATCH 210/307] Fix DsFromManifest() after we switch from extensions/v1beta1 to apps/v1 in cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml. --- test/e2e/framework/util.go | 4 ++-- test/e2e/scheduling/BUILD | 2 +- test/e2e/scheduling/nvidia-gpus.go | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 618a1c6520a..e20855b0e22 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -5194,8 +5194,8 @@ func DumpDebugInfo(c clientset.Interface, ns string) { } // DsFromManifest reads a .json/yaml file and returns the daemonset in it. -func DsFromManifest(url string) (*extensions.DaemonSet, error) { - var controller extensions.DaemonSet +func DsFromManifest(url string) (*apps.DaemonSet, error) { + var controller apps.DaemonSet Logf("Parsing ds from %v", url) var response *http.Response diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 1e1cda3a2c4..33a4eda78ed 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -20,8 +20,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/quota/evaluator/core:go_default_library", diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 48fa541351e..5e08e339c5f 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" - extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -133,14 +133,14 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra ds, err := framework.DsFromManifest(dsYamlUrl) Expect(err).NotTo(HaveOccurred()) ds.Namespace = f.Namespace.Name - _, err = f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Create(ds) + _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") framework.Logf("Successfully created daemonset to install Nvidia drivers.") - pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) + pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, apps.Kind("DaemonSet")) framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset") - devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) + devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", apps.Kind("DaemonSet")) if err == nil { framework.Logf("Adding deviceplugin addon pod.") pods.Items = append(pods.Items, devicepluginPods.Items...) From c05e89d0e551c753407941abad4c479883f0629b Mon Sep 17 00:00:00 2001 From: Nick Sardo Date: Fri, 25 May 2018 16:09:16 -0700 Subject: [PATCH 211/307] Fix nodeport repair for ESIPP services --- .../core/service/portallocator/controller/repair.go | 5 +++++ .../service/portallocator/controller/repair_test.go | 10 ++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/pkg/registry/core/service/portallocator/controller/repair.go b/pkg/registry/core/service/portallocator/controller/repair.go index 1e4dc685c3d..c1a4f0dadbd 100644 --- a/pkg/registry/core/service/portallocator/controller/repair.go +++ b/pkg/registry/core/service/portallocator/controller/repair.go @@ -204,5 +204,10 @@ func collectServiceNodePorts(service *api.Service) []int { servicePorts = append(servicePorts, int(servicePort.NodePort)) } } + + if service.Spec.HealthCheckNodePort != 0 { + servicePorts = append(servicePorts, int(service.Spec.HealthCheckNodePort)) + } + return servicePorts } diff --git a/pkg/registry/core/service/portallocator/controller/repair_test.go b/pkg/registry/core/service/portallocator/controller/repair_test.go index 151c791cc39..48c41aa989a 100644 --- a/pkg/registry/core/service/portallocator/controller/repair_test.go +++ b/pkg/registry/core/service/portallocator/controller/repair_test.go @@ -164,6 +164,12 @@ func TestRepairWithExisting(t *testing.T) { Ports: []api.ServicePort{{NodePort: 111}}, }, }, + &api.Service{ + ObjectMeta: metav1.ObjectMeta{Namespace: "six", Name: "six"}, + Spec: api.ServiceSpec{ + HealthCheckNodePort: 144, + }, + }, ) registry := &mockRangeRegistry{ @@ -183,10 +189,10 @@ func TestRepairWithExisting(t *testing.T) { if err != nil { t.Fatal(err) } - if !after.Has(111) || !after.Has(122) || !after.Has(133) { + if !after.Has(111) || !after.Has(122) || !after.Has(133) || !after.Has(144) { t.Errorf("unexpected portallocator state: %#v", after) } - if free := after.Free(); free != 98 { + if free := after.Free(); free != 97 { t.Errorf("unexpected portallocator state: %d free", free) } } From 9475292cd8f60df59db2cb365f20f805efefe00c Mon Sep 17 00:00:00 2001 From: Vishnu kannan Date: Thu, 24 May 2018 17:05:22 -0700 Subject: [PATCH 212/307] Adding a shutdown script that would enable handling preemptible VM terminations gracefully in GCP environment Signed-off-by: Vishnu kannan --- cluster/gce/gci/BUILD | 1 + cluster/gce/gci/node-helper.sh | 1 + cluster/gce/gci/shutdown.sh | 23 +++++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100755 cluster/gce/gci/shutdown.sh diff --git a/cluster/gce/gci/BUILD b/cluster/gce/gci/BUILD index 0a16226b9b6..7494cf12ff0 100644 --- a/cluster/gce/gci/BUILD +++ b/cluster/gce/gci/BUILD @@ -28,6 +28,7 @@ release_filegroup( "configure.sh", "master.yaml", "node.yaml", + "shutdown.sh", ], visibility = ["//visibility:public"], ) diff --git a/cluster/gce/gci/node-helper.sh b/cluster/gce/gci/node-helper.sh index fc782c55b51..fd4ca7378ad 100755 --- a/cluster/gce/gci/node-helper.sh +++ b/cluster/gce/gci/node-helper.sh @@ -28,6 +28,7 @@ function get-node-instance-metadata { metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt," metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt," metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt," + metadata+="shutdown-script=${KUBE_ROOT}/cluster/gce/gci/shutdown.sh," metadata+="${NODE_EXTRA_METADATA}" echo "${metadata}" } diff --git a/cluster/gce/gci/shutdown.sh b/cluster/gce/gci/shutdown.sh new file mode 100755 index 00000000000..dab0d34c62f --- /dev/null +++ b/cluster/gce/gci/shutdown.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script that let's gci preemptible nodes gracefully terminate in the event of a VM shutdown. +preemptible=$(curl "http://metadata.google.internal/computeMetadata/v1/instance/scheduling/preemptible" -H "Metadata-Flavor: Google") +if [ ${preemptible} == "TRUE" ]; then + echo "Shutting down! Sleeping for a minute to let the node gracefully terminate" + # https://cloud.google.com/compute/docs/instances/stopping-or-deleting-an-instance#delete_timeout + sleep 30 +fi From 332a3e846784bb46003c2bf148c991162a32692a Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 25 May 2018 15:27:24 -0700 Subject: [PATCH 213/307] [gce provider] Add more wrapper for securiti policy --- .../providers/gce/cloud/meta/meta.go | 7 ++ .../providers/gce/gce_securitypolicy.go | 90 +++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 pkg/cloudprovider/providers/gce/gce_securitypolicy.go diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go index 3fe5e393a4c..7c1139b9400 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -345,6 +345,13 @@ var AllServices = []*ServiceInfo{ version: VersionBeta, keyType: Global, serviceType: reflect.TypeOf(&beta.SecurityPoliciesService{}), + additionalMethods: []string{ + "AddRule", + "GetRule", + "Patch", + "PatchRule", + "RemoveRule", + }, }, { Object: "SslCertificate", diff --git a/pkg/cloudprovider/providers/gce/gce_securitypolicy.go b/pkg/cloudprovider/providers/gce/gce_securitypolicy.go new file mode 100644 index 00000000000..bec23a644f6 --- /dev/null +++ b/pkg/cloudprovider/providers/gce/gce_securitypolicy.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "context" + + computebeta "google.golang.org/api/compute/v0.beta" + + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" +) + +func newSecurityPolicyMetricContextWithVersion(request, version string) *metricContext { + return newGenericMetricContext("securitypolicy", request, "", unusedMetricLabel, version) +} + +// GetBetaSecurityPolicy retrieves a security policy. +func (gce *GCECloud) GetBetaSecurityPolicy(name string) (*computebeta.SecurityPolicy, error) { + mc := newSecurityPolicyMetricContextWithVersion("get", computeBetaVersion) + v, err := gce.c.BetaSecurityPolicies().Get(context.Background(), meta.GlobalKey(name)) + return v, mc.Observe(err) +} + +// ListBetaSecurityPolicy lists all security policies in the project. +func (gce *GCECloud) ListBetaSecurityPolicy() ([]*computebeta.SecurityPolicy, error) { + mc := newSecurityPolicyMetricContextWithVersion("list", computeBetaVersion) + v, err := gce.c.BetaSecurityPolicies().List(context.Background(), filter.None) + return v, mc.Observe(err) +} + +// CreateBetaSecurityPolicy creates the given security policy. +func (gce *GCECloud) CreateBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { + mc := newSecurityPolicyMetricContextWithVersion("create", computeBetaVersion) + return mc.Observe(gce.c.BetaSecurityPolicies().Insert(context.Background(), meta.GlobalKey(sp.Name), sp)) +} + +// DeleteBetaSecurityPolicy deletes the given security policy. +func (gce *GCECloud) DeleteBetaSecurityPolicy(name string) error { + mc := newSecurityPolicyMetricContextWithVersion("delete", computeBetaVersion) + return mc.Observe(gce.c.BetaSecurityPolicies().Delete(context.Background(), meta.GlobalKey(name))) +} + +// PatchBetaSecurityPolicy applies the given security policy as a +// patch to an existing security policy. +func (gce *GCECloud) PatchBetaSecurityPolicy(sp *computebeta.SecurityPolicy) error { + mc := newSecurityPolicyMetricContextWithVersion("patch", computeBetaVersion) + return mc.Observe(gce.c.BetaSecurityPolicies().Patch(context.Background(), meta.GlobalKey(sp.Name), sp)) +} + +// GetRuleForBetaSecurityPolicy gets rule from a security policy. +func (gce *GCECloud) GetRuleForBetaSecurityPolicy(name string) (*computebeta.SecurityPolicyRule, error) { + mc := newSecurityPolicyMetricContextWithVersion("get_rule", computeBetaVersion) + v, err := gce.c.BetaSecurityPolicies().GetRule(context.Background(), meta.GlobalKey(name)) + return v, mc.Observe(err) +} + +// AddRuletoBetaSecurityPolicy adds the given security policy rule to +// a security policy. +func (gce *GCECloud) AddRuletoBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { + mc := newSecurityPolicyMetricContextWithVersion("add_rule", computeBetaVersion) + return mc.Observe(gce.c.BetaSecurityPolicies().AddRule(context.Background(), meta.GlobalKey(name), spr)) +} + +// PatchRuleForBetaSecurityPolicy patches the given security policy +// rule to a security policy. +func (gce *GCECloud) PatchRuleForBetaSecurityPolicy(name string, spr *computebeta.SecurityPolicyRule) error { + mc := newSecurityPolicyMetricContextWithVersion("patch_rule", computeBetaVersion) + return mc.Observe(gce.c.BetaSecurityPolicies().PatchRule(context.Background(), meta.GlobalKey(name), spr)) +} + +// RemoveRuleFromBetaSecurityPolicy removes rule from a security policy. +func (gce *GCECloud) RemoveRuleFromBetaSecurityPolicy(name string) error { + mc := newSecurityPolicyMetricContextWithVersion("remove_rule", computeBetaVersion) + return mc.Observe(gce.c.BetaSecurityPolicies().RemoveRule(context.Background(), meta.GlobalKey(name))) +} From 19d7006d6b384f7cea7342f626a5be2cd66c71e7 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 25 May 2018 15:28:05 -0700 Subject: [PATCH 214/307] [gce provider] Update auto-generated codes --- pkg/cloudprovider/providers/gce/BUILD | 1 + pkg/cloudprovider/providers/gce/cloud/gen.go | 218 ++++++++++++++++++- 2 files changed, 215 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 86b2c9e6c67..05a1f345e57 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -34,6 +34,7 @@ go_library( "gce_networkendpointgroup.go", "gce_op.go", "gce_routes.go", + "gce_securitypolicy.go", "gce_targetpool.go", "gce_targetproxy.go", "gce_tpu.go", diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go index 2dff8a8f4b2..9b08d3c8d3a 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -11535,6 +11535,11 @@ type BetaSecurityPolicies interface { List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error Delete(ctx context.Context, key *meta.Key) error + AddRule(context.Context, *meta.Key, *beta.SecurityPolicyRule) error + GetRule(context.Context, *meta.Key) (*beta.SecurityPolicyRule, error) + Patch(context.Context, *meta.Key, *beta.SecurityPolicy) error + PatchRule(context.Context, *meta.Key, *beta.SecurityPolicyRule) error + RemoveRule(context.Context, *meta.Key) error } // NewMockBetaSecurityPolicies returns a new mock for SecurityPolicies. @@ -11570,10 +11575,15 @@ type MockBetaSecurityPolicies struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, *beta.SecurityPolicy, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockBetaSecurityPolicies) (bool, []*beta.SecurityPolicy, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy, m *MockBetaSecurityPolicies) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, error) + GetHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, *beta.SecurityPolicy, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockBetaSecurityPolicies) (bool, []*beta.SecurityPolicy, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy, m *MockBetaSecurityPolicies) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockBetaSecurityPolicies) (bool, error) + AddRuleHook func(context.Context, *meta.Key, *beta.SecurityPolicyRule, *MockBetaSecurityPolicies) error + GetRuleHook func(context.Context, *meta.Key, *MockBetaSecurityPolicies) (*beta.SecurityPolicyRule, error) + PatchHook func(context.Context, *meta.Key, *beta.SecurityPolicy, *MockBetaSecurityPolicies) error + PatchRuleHook func(context.Context, *meta.Key, *beta.SecurityPolicyRule, *MockBetaSecurityPolicies) error + RemoveRuleHook func(context.Context, *meta.Key, *MockBetaSecurityPolicies) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -11719,6 +11729,46 @@ func (m *MockBetaSecurityPolicies) Obj(o *beta.SecurityPolicy) *MockSecurityPoli return &MockSecurityPoliciesObj{o} } +// AddRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + if m.AddRuleHook != nil { + return m.AddRuleHook(ctx, key, arg0, m) + } + return nil +} + +// GetRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { + if m.GetRuleHook != nil { + return m.GetRuleHook(ctx, key, m) + } + return nil, fmt.Errorf("GetRuleHook must be set") +} + +// Patch is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { + if m.PatchHook != nil { + return m.PatchHook(ctx, key, arg0, m) + } + return nil +} + +// PatchRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + if m.PatchRuleHook != nil { + return m.PatchRuleHook(ctx, key, arg0, m) + } + return nil +} + +// RemoveRule is a mock for the corresponding method. +func (m *MockBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { + if m.RemoveRuleHook != nil { + return m.RemoveRuleHook(ctx, key, m) + } + return nil +} + // GCEBetaSecurityPolicies is a simplifying adapter for the GCE SecurityPolicies. type GCEBetaSecurityPolicies struct { s *Service @@ -11861,6 +11911,166 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err return err } +// AddRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + glog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "AddRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.AddRule(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// GetRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { + glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + glog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return nil, fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "GetRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return nil, err + } + call := g.s.Beta.SecurityPolicies.GetRule(projectID, key.Name) + call.Context(ctx) + v, err := call.Do() + glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) + return v, err +} + +// Patch is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { + glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + glog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "Patch", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.Patch(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// PatchRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { + glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + glog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "PatchRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.PatchRule(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + return err +} + +// RemoveRule is a method on GCEBetaSecurityPolicies. +func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { + glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + glog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "RemoveRule", + Version: meta.Version("beta"), + Service: "SecurityPolicies", + } + glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Beta.SecurityPolicies.RemoveRule(projectID, key.Name) + call.Context(ctx) + op, err := call.Do() + if err != nil { + glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + return err +} + // SslCertificates is an interface that allows for mocking of SslCertificates. type SslCertificates interface { Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) From 7c17ee25ecf9ae05afe0e201044214198d2626d4 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 25 May 2018 16:55:08 -0700 Subject: [PATCH 215/307] Remove unused limit writer. Signed-off-by: Lantao Liu --- pkg/kubelet/server/BUILD | 1 - pkg/kubelet/server/server.go | 11 +---------- pkg/kubelet/server/server_test.go | 27 --------------------------- 3 files changed, 1 insertion(+), 38 deletions(-) diff --git a/pkg/kubelet/server/BUILD b/pkg/kubelet/server/BUILD index 0d8138acf2d..c2e338a1ad8 100644 --- a/pkg/kubelet/server/BUILD +++ b/pkg/kubelet/server/BUILD @@ -26,7 +26,6 @@ go_library( "//pkg/kubelet/server/streaming:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/util/configz:go_default_library", - "//pkg/util/limitwriter:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index b472015e148..fdde1fee4e8 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -61,7 +61,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/server/streaming" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/configz" - "k8s.io/kubernetes/pkg/util/limitwriter" ) const ( @@ -532,17 +531,9 @@ func (s *Server) getContainerLogs(request *restful.Request, response *restful.Re return } fw := flushwriter.Wrap(response.ResponseWriter) - // Byte limit logic is already implemented in kuberuntime. However, we still need this for - // old runtime integration. - // TODO(random-liu): Remove this once we switch to CRI integration. - if logOptions.LimitBytes != nil { - fw = limitwriter.New(fw, *logOptions.LimitBytes) - } response.Header().Set("Transfer-Encoding", "chunked") if err := s.host.GetKubeletContainerLogs(kubecontainer.GetPodFullName(pod), containerName, logOptions, fw, fw); err != nil { - if err != limitwriter.ErrMaximumWrite { - response.WriteError(http.StatusBadRequest, err) - } + response.WriteError(http.StatusBadRequest, err) return } } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index b5169de1a7c..cdb978078cf 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -943,33 +943,6 @@ func TestContainerLogs(t *testing.T) { } } -func TestContainerLogsWithLimitBytes(t *testing.T) { - fw := newServerTest() - defer fw.testHTTPServer.Close() - output := "foo bar" - podNamespace := "other" - podName := "foo" - expectedPodName := getPodName(podName, podNamespace) - expectedContainerName := "baz" - bytes := int64(3) - setPodByNameFunc(fw, podNamespace, podName, expectedContainerName) - setGetContainerLogsFunc(fw, t, expectedPodName, expectedContainerName, &v1.PodLogOptions{LimitBytes: &bytes}, output) - resp, err := http.Get(fw.testHTTPServer.URL + "/containerLogs/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?limitBytes=3") - if err != nil { - t.Errorf("Got error GETing: %v", err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Errorf("Error reading container logs: %v", err) - } - result := string(body) - if result != output[:bytes] { - t.Errorf("Expected: '%v', got: '%v'", output[:bytes], result) - } -} - func TestContainerLogsWithTail(t *testing.T) { fw := newServerTest() defer fw.testHTTPServer.Close() From 2d28e0d6a322b3c61a82967c67f33b1675d44bf1 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 25 May 2018 22:29:07 -0400 Subject: [PATCH 216/307] bump(github.com/evanphx/json-patch): 94e38aa1586e8a6c8a75770bddf5ff84c48a106b --- Godeps/Godeps.json | 2 +- .../Godeps/Godeps.json | 2 +- .../k8s.io/apimachinery/Godeps/Godeps.json | 2 +- .../src/k8s.io/apiserver/Godeps/Godeps.json | 2 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 2 +- .../sample-apiserver/Godeps/Godeps.json | 2 +- .../github.com/evanphx/json-patch/README.md | 289 ++++++++++++++++-- vendor/github.com/evanphx/json-patch/merge.go | 102 ++++++- vendor/github.com/evanphx/json-patch/patch.go | 4 +- 9 files changed, 371 insertions(+), 36 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 6327cde93d6..ca14a53b477 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1240,7 +1240,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "ed7cfbae1fffc071f71e068df27bf4f0521402d8" + "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" }, { "ImportPath": "github.com/exponent-io/jsonpath", diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index a90c044928e..fba71ace596 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -372,7 +372,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "ed7cfbae1fffc071f71e068df27bf4f0521402d8" + "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index 02e897967ad..2de98ffda14 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -24,7 +24,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "ed7cfbae1fffc071f71e068df27bf4f0521402d8" + "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 32519a909e2..0bdf0001024 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -372,7 +372,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "ed7cfbae1fffc071f71e068df27bf4f0521402d8" + "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index f6f37f73e81..9a0970403aa 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -112,7 +112,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "ed7cfbae1fffc071f71e068df27bf4f0521402d8" + "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index b3a5c9f4daf..f0635220789 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -104,7 +104,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "ed7cfbae1fffc071f71e068df27bf4f0521402d8" + "Rev": "94e38aa1586e8a6c8a75770bddf5ff84c48a106b" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index d0d826bacd2..078629004d7 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -1,29 +1,284 @@ -## JSON-Patch - -Provides the ability to modify and test a JSON according to a -[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). - -*Version*: **1.0** +# JSON-Patch +`jsonpatch` is a library which provides functionallity for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). [![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) - [![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) -### API Usage +# Get It! -* Given a `[]byte`, obtain a Patch object +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch +``` - `obj, err := jsonpatch.DecodePatch(patch)` +**Stable Versions**: +* Version 3: `go get -u gopkg.in/evanphx/json-patch.v3` -* Apply the patch and get a new document back +(previous versions below `v3` are unavailable) - `out, err := obj.Apply(doc)` +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) -* Create a JSON Merge Patch document based on two json documents (a to b): +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` - -* Bonus API: compare documents for structural equality +It can describe the changes needed to convert from the original to the +modified JSON document. - `jsonpatch.Equal(doca, docb)` +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated tina doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "similar"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "similar" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go index b9af252fec3..6806c4c200b 100644 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -1,6 +1,7 @@ package jsonpatch import ( + "bytes" "encoding/json" "fmt" "reflect" @@ -89,6 +90,7 @@ func pruneAryNulls(ary *partialArray) *partialArray { var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") // MergeMergePatches merges two merge patches together, such that // applying this resulting merged merge patch to a document yields the same @@ -160,30 +162,106 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { return json.Marshal(doc) } -// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -// -// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. -// The function will return a mergeable json document with differences from a to b. -// -// An error will be returned if any of the two documents are invalid. -func CreateMergePatch(a, b []byte) ([]byte, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} - err := json.Unmarshal(a, &aI) +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) if err != nil { return nil, errBadJSONDoc } - err = json.Unmarshal(b, &bI) + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) if err != nil { return nil, errBadJSONDoc } - dest, err := getDiff(aI, bI) + + dest, err := getDiff(originalDoc, modifiedDoc) if err != nil { return nil, err } + return json.Marshal(dest) } +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + // Returns true if the array matches (must be json types). // As is idiomatic for go, an empty array is not the same as a nil array. func matchesArray(a, b []interface{}) bool { diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index 755d8ba3b1b..1a3aa387eef 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -397,7 +397,9 @@ func (d *partialArray) add(key string, val *lazyNode) error { } idx = len(ary) - idx } - + if idx < 0 || idx >= len(ary) || idx > len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } copy(ary[0:idx], cur[0:idx]) ary[idx] = val copy(ary[idx+1:], cur[idx:]) From 350d2c2402c9596d67a262c9c30b5c30c0c84bd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Thu, 24 May 2018 13:24:58 +0200 Subject: [PATCH 217/307] Run cluster-autoscaler+GPU e2e tests for all gpu types --- .../autoscaling/cluster_size_autoscaling.go | 143 +++++++++--------- 1 file changed, 74 insertions(+), 69 deletions(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 51015b95239..c08a9fa4e7b 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -207,101 +207,106 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() { simpleScaleUpTest(0) }) - It("Should scale up GPU pool from 0 [Feature:ClusterSizeAutoscalingGpu]", func() { - framework.SkipUnlessProviderIs("gke") + supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"} + for _, gpuType := range supportedGpuTypes { - const gpuPoolName = "gpu-pool" - addGpuNodePool(gpuPoolName, "nvidia-tesla-k80", 1, 0) - defer deleteNodePool(gpuPoolName) + It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + framework.SkipUnlessProviderIs("gke") - installNvidiaDriversDaemonSet() + const gpuPoolName = "gpu-pool" + addGpuNodePool(gpuPoolName, gpuType, 1, 0) + defer deleteNodePool(gpuPoolName) - By("Enable autoscaler") - framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) - defer disableAutoscaler(gpuPoolName, 0, 1) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) + installNvidiaDriversDaemonSet() - By("Schedule a pod which requires GPU") - framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc")) + By("Enable autoscaler") + framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) + defer disableAutoscaler(gpuPoolName, 0, 1) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, - func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout)) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) - }) + By("Schedule a pod which requires GPU") + framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc")) - It("Should scale up GPU pool from 1 [Feature:ClusterSizeAutoscalingGpu]", func() { - framework.SkipUnlessProviderIs("gke") + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout)) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) + }) - const gpuPoolName = "gpu-pool" - addGpuNodePool(gpuPoolName, "nvidia-tesla-k80", 1, 1) - defer deleteNodePool(gpuPoolName) + It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + framework.SkipUnlessProviderIs("gke") - installNvidiaDriversDaemonSet() + const gpuPoolName = "gpu-pool" + addGpuNodePool(gpuPoolName, gpuType, 1, 1) + defer deleteNodePool(gpuPoolName) - By("Schedule a single pod which requires GPU") - framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc")) + installNvidiaDriversDaemonSet() - By("Enable autoscaler") - framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2)) - defer disableAutoscaler(gpuPoolName, 0, 2) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) + By("Schedule a single pod which requires GPU") + framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc")) - framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, false) + By("Enable autoscaler") + framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2)) + defer disableAutoscaler(gpuPoolName, 0, 2) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, - func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout)) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2)) - }) + By("Scale GPU deployment") + framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, false) - It("Should not scale GPU pool up if pod does not require GPUs [Feature:ClusterSizeAutoscalingGpu]", func() { - framework.SkipUnlessProviderIs("gke") + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout)) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2)) + }) - const gpuPoolName = "gpu-pool" - addGpuNodePool(gpuPoolName, "nvidia-tesla-k80", 1, 0) - defer deleteNodePool(gpuPoolName) + It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + framework.SkipUnlessProviderIs("gke") - installNvidiaDriversDaemonSet() + const gpuPoolName = "gpu-pool" + addGpuNodePool(gpuPoolName, gpuType, 1, 0) + defer deleteNodePool(gpuPoolName) - By("Enable autoscaler") - framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) - defer disableAutoscaler(gpuPoolName, 0, 1) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) + installNvidiaDriversDaemonSet() - By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") - ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) - defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") + By("Enable autoscaler") + framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) + defer disableAutoscaler(gpuPoolName, 0, 1) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) - // Verify that cluster size is increased - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, - func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) + By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") + ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) + defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") + // Verify that cluster size is increased + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) - // Expect gpu pool to stay intact - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) - }) + // Expect gpu pool to stay intact + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) + }) - It("Should scale down GPU pool from 1 [Feature:ClusterSizeAutoscalingGpu]", func() { - framework.SkipUnlessProviderIs("gke") + It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { + framework.SkipUnlessProviderIs("gke") - const gpuPoolName = "gpu-pool" - addGpuNodePool(gpuPoolName, "nvidia-tesla-k80", 1, 1) - defer deleteNodePool(gpuPoolName) + const gpuPoolName = "gpu-pool" + addGpuNodePool(gpuPoolName, gpuType, 1, 1) + defer deleteNodePool(gpuPoolName) - installNvidiaDriversDaemonSet() + installNvidiaDriversDaemonSet() - By("Schedule a single pod which requires GPU") - framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc")) + By("Schedule a single pod which requires GPU") + framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc")) - By("Enable autoscaler") - framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) - defer disableAutoscaler(gpuPoolName, 0, 1) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) + By("Enable autoscaler") + framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) + defer disableAutoscaler(gpuPoolName, 0, 1) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) - framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") + By("Remove the only POD requiring GPU") + framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") - framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, - func(size int) bool { return size == nodeCount }, scaleDownTimeout)) - Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) - }) + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + func(size int) bool { return size == nodeCount }, scaleDownTimeout)) + Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0)) + }) + } It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]", func() { From 3c8bd9ae24296b0cf9625416330bed47fd8ae027 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Fri, 25 May 2018 17:05:46 +0200 Subject: [PATCH 218/307] Wait for PODs ready after scale up --- test/e2e/autoscaling/cluster_size_autoscaling.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index c08a9fa4e7b..5a35f0ee441 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -250,7 +250,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1)) By("Scale GPU deployment") - framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, false) + framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true) framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout)) From 6d95bb3fa00b91962d1d21c756f5cd5490df94ca Mon Sep 17 00:00:00 2001 From: Xianglin Gao Date: Sun, 27 May 2018 00:25:10 +0800 Subject: [PATCH 219/307] Improve the help of kubeadm completion --- cmd/kubeadm/app/cmd/completion.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/kubeadm/app/cmd/completion.go b/cmd/kubeadm/app/cmd/completion.go index 33d5526f9a8..9e7722b7fbb 100644 --- a/cmd/kubeadm/app/cmd/completion.go +++ b/cmd/kubeadm/app/cmd/completion.go @@ -51,17 +51,18 @@ var ( The shell code must be evaluated to provide interactive completion of kubeadm commands. This can be done by sourcing it from the .bash_profile. + + Note: this requires the bash-completion framework. - Note: this requires the bash-completion framework, which is not installed - by default on Mac. This can be installed by using homebrew: - + To install it on Mac use homebrew: $ brew install bash-completion - Once installed, bash_completion must be evaluated. This can be done by adding the following line to the .bash_profile - $ source $(brew --prefix)/etc/bash_completion + If bash-completion is not installed on Linux, please install the 'bash-completion' package + via your distribution's package manager. + Note for zsh users: [1] zsh completions are only supported in versions of zsh >= 5.2`) completionExample = dedent.Dedent(` From 8d84ef63adb3d5f7eca0414a4c074be6992c953c Mon Sep 17 00:00:00 2001 From: "Lubomir I. Ivanov" Date: Tue, 22 May 2018 22:41:28 +0300 Subject: [PATCH 220/307] kubeadm: do not use --admission-control for the API server The API server argument --admission-control is deprecated. Use the following arguments instead: --enable-admission-plugins=NodeRestriction --disable-admission-plugins=PersistentVolumeLabel Add comment that PersistentVolumeLabel should be removed at some point in 1.11. --- .../app/phases/controlplane/manifests.go | 13 +++++--- .../app/phases/controlplane/manifests_test.go | 33 ++++++++++++------- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index 17ff9de4390..0a604f5a330 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -39,8 +39,6 @@ import ( "k8s.io/kubernetes/pkg/util/version" ) -const defaultAdmissionControl = "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" - // CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane. func CreateInitStaticPodManifestFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguration) error { glog.V(1).Infoln("[controlplane] creating static pod files") @@ -141,9 +139,14 @@ func createStaticPodFiles(manifestDir string, cfg *kubeadmapi.MasterConfiguratio // getAPIServerCommand builds the right API server command from the given config object and version func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string { defaultArguments := map[string]string{ - "advertise-address": cfg.API.AdvertiseAddress, - "insecure-port": "0", - "admission-control": defaultAdmissionControl, + "advertise-address": cfg.API.AdvertiseAddress, + "insecure-port": "0", + "enable-admission-plugins": "NodeRestriction", + // TODO: remove `PersistentVolumeLabel` in kubeadm v1.11, as it's automatically disabled in v1.11. + // ref: https://github.com/kubernetes/kubernetes/pull/64326 + // we can't skip it now as we support v1.10 clusters still. + // remove it from the unit tests too. + "disable-admission-plugins": "PersistentVolumeLabel", "service-cluster-ip-range": cfg.Networking.ServiceSubnet, "service-account-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.ServiceAccountPublicKeyName), "client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName), diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index 3e1a7325d33..6da80ae160d 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -154,7 +154,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -196,7 +197,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -233,7 +235,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -279,7 +282,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -322,7 +326,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -360,7 +365,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -402,7 +408,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -447,7 +454,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=baz", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -491,7 +499,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -531,7 +540,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=1234", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", @@ -571,7 +581,8 @@ func TestGetAPIServerCommand(t *testing.T) { expected: []string{ "kube-apiserver", "--insecure-port=0", - "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "--enable-admission-plugins=NodeRestriction", + "--disable-admission-plugins=PersistentVolumeLabel", "--service-cluster-ip-range=bar", "--service-account-key-file=" + testCertsDir + "/sa.pub", "--client-ca-file=" + testCertsDir + "/ca.crt", From 4815ec409f8ad06a7a2e422be1c2efbeba43097a Mon Sep 17 00:00:00 2001 From: David Eads Date: Fri, 25 May 2018 13:20:30 -0400 Subject: [PATCH 221/307] collapse into one factory --- pkg/kubectl/cmd/util/BUILD | 2 - pkg/kubectl/cmd/util/factory.go | 37 ----- pkg/kubectl/cmd/util/factory_builder.go | 56 -------- pkg/kubectl/cmd/util/factory_client_access.go | 131 ++++++++++++++++-- .../cmd/util/factory_object_mapping.go | 121 ---------------- 5 files changed, 117 insertions(+), 230 deletions(-) delete mode 100644 pkg/kubectl/cmd/util/factory_builder.go delete mode 100644 pkg/kubectl/cmd/util/factory_object_mapping.go diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 0fc82b7eaa2..fa44ccc6917 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -5,9 +5,7 @@ go_library( srcs = [ "conversion.go", "factory.go", - "factory_builder.go", "factory_client_access.go", - "factory_object_mapping.go", "helpers.go", "kubectl_match_version.go", "printing.go", diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 5720af7451b..0e7f0724797 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -46,15 +46,6 @@ import ( // TODO: pass the various interfaces on the factory directly into the command constructors (so the // commands are decoupled from the factory). type Factory interface { - ClientAccessFactory - ObjectMappingFactory - BuilderFactory -} - -// ClientAccessFactory holds the first level of factory methods. -// Generally provides discovery, negotiation, and no-dep calls. -// TODO The polymorphic calls probably deserve their own interface. -type ClientAccessFactory interface { genericclioptions.RESTClientGetter // ClientSet gives you back an internal, generated clientset @@ -79,11 +70,7 @@ type ClientAccessFactory interface { DefaultNamespace() (string, bool, error) // Generators returns the generators for the provided command Generators(cmdName string) map[string]kubectl.Generator -} -// ObjectMappingFactory holds the second level of factory methods. These functions depend upon ClientAccessFactory methods. -// Generally they provide object typing and functions that build requests based on the negotiated clients. -type ObjectMappingFactory interface { // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) @@ -94,35 +81,11 @@ type ObjectMappingFactory interface { Validator(validate bool) (validation.Schema, error) // OpenAPISchema returns the schema openapi schema definition OpenAPISchema() (openapi.Resources, error) -} -// BuilderFactory holds the third level of factory methods. These functions depend upon ObjectMappingFactory and ClientAccessFactory methods. -// Generally they depend upon client mapper functions -type BuilderFactory interface { // ScaleClient gives you back scale getter ScaleClient() (scaleclient.ScalesGetter, error) } -type factory struct { - ClientAccessFactory - ObjectMappingFactory - BuilderFactory -} - -// NewFactory creates a factory with the default Kubernetes resources defined -// Receives a clientGetter capable of providing a discovery client and a REST client configuration. -func NewFactory(clientGetter genericclioptions.RESTClientGetter) Factory { - clientAccessFactory := NewClientAccessFactory(clientGetter) - objectMappingFactory := NewObjectMappingFactory(clientAccessFactory) - builderFactory := NewBuilderFactory(clientAccessFactory, objectMappingFactory) - - return &factory{ - ClientAccessFactory: clientAccessFactory, - ObjectMappingFactory: objectMappingFactory, - BuilderFactory: builderFactory, - } -} - func makePortsString(ports []api.ServicePort, useNodePort bool) string { pieces := make([]string, len(ports)) for ix := range ports { diff --git a/pkg/kubectl/cmd/util/factory_builder.go b/pkg/kubectl/cmd/util/factory_builder.go deleted file mode 100644 index 392e1d25932..00000000000 --- a/pkg/kubectl/cmd/util/factory_builder.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// this file contains factories with no other dependencies - -package util - -import ( - "k8s.io/client-go/dynamic" - scaleclient "k8s.io/client-go/scale" -) - -type ring2Factory struct { - clientAccessFactory ClientAccessFactory - objectMappingFactory ObjectMappingFactory -} - -func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFactory ObjectMappingFactory) BuilderFactory { - f := &ring2Factory{ - clientAccessFactory: clientAccessFactory, - objectMappingFactory: objectMappingFactory, - } - - return f -} - -func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) { - discoClient, err := f.clientAccessFactory.ToDiscoveryClient() - if err != nil { - return nil, err - } - restClient, err := f.clientAccessFactory.RESTClient() - if err != nil { - return nil, err - } - resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) - mapper, err := f.clientAccessFactory.ToRESTMapper() - if err != nil { - return nil, err - } - - return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil -} diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 320aec8ac35..c9cbda6bdde 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -21,6 +21,7 @@ package util import ( "fmt" "io" + "sync" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" @@ -38,47 +39,60 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" + "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" + openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" + "k8s.io/kubernetes/pkg/kubectl/validation" ) -type ring0Factory struct { +type factoryImpl struct { clientGetter genericclioptions.RESTClientGetter + + // openAPIGetter loads and caches openapi specs + openAPIGetter openAPIGetter } -func NewClientAccessFactory(clientGetter genericclioptions.RESTClientGetter) ClientAccessFactory { +type openAPIGetter struct { + once sync.Once + getter openapi.Getter +} + +func NewFactory(clientGetter genericclioptions.RESTClientGetter) Factory { if clientGetter == nil { panic("attempt to instantiate client_access_factory with nil clientGetter") } - f := &ring0Factory{ + f := &factoryImpl{ clientGetter: clientGetter, } return f } -func (f *ring0Factory) ToRESTConfig() (*restclient.Config, error) { +func (f *factoryImpl) ToRESTConfig() (*restclient.Config, error) { return f.clientGetter.ToRESTConfig() } -func (f *ring0Factory) ToRESTMapper() (meta.RESTMapper, error) { +func (f *factoryImpl) ToRESTMapper() (meta.RESTMapper, error) { return f.clientGetter.ToRESTMapper() } -func (f *ring0Factory) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { +func (f *factoryImpl) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { return f.clientGetter.ToDiscoveryClient() } -func (f *ring0Factory) ToRawKubeConfigLoader() clientcmd.ClientConfig { +func (f *factoryImpl) ToRawKubeConfigLoader() clientcmd.ClientConfig { return f.clientGetter.ToRawKubeConfigLoader() } -func (f *ring0Factory) KubernetesClientSet() (*kubernetes.Clientset, error) { +func (f *factoryImpl) KubernetesClientSet() (*kubernetes.Clientset, error) { clientConfig, err := f.ToRESTConfig() if err != nil { return nil, err @@ -86,7 +100,7 @@ func (f *ring0Factory) KubernetesClientSet() (*kubernetes.Clientset, error) { return kubernetes.NewForConfig(clientConfig) } -func (f *ring0Factory) ClientSet() (internalclientset.Interface, error) { +func (f *factoryImpl) ClientSet() (internalclientset.Interface, error) { clientConfig, err := f.ToRESTConfig() if err != nil { return nil, err @@ -94,7 +108,7 @@ func (f *ring0Factory) ClientSet() (internalclientset.Interface, error) { return internalclientset.NewForConfig(clientConfig) } -func (f *ring0Factory) DynamicClient() (dynamic.Interface, error) { +func (f *factoryImpl) DynamicClient() (dynamic.Interface, error) { clientConfig, err := f.ToRESTConfig() if err != nil { return nil, err @@ -103,11 +117,11 @@ func (f *ring0Factory) DynamicClient() (dynamic.Interface, error) { } // NewBuilder returns a new resource builder for structured api objects. -func (f *ring0Factory) NewBuilder() *resource.Builder { +func (f *factoryImpl) NewBuilder() *resource.Builder { return resource.NewBuilder(f.clientGetter) } -func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { +func (f *factoryImpl) RESTClient() (*restclient.RESTClient, error) { clientConfig, err := f.ToRESTConfig() if err != nil { return nil, err @@ -116,10 +130,99 @@ func (f *ring0Factory) RESTClient() (*restclient.RESTClient, error) { return restclient.RESTClientFor(clientConfig) } -func (f *ring0Factory) DefaultNamespace() (string, bool, error) { +func (f *factoryImpl) DefaultNamespace() (string, bool, error) { return f.clientGetter.ToRawKubeConfigLoader().Namespace() } +func (f *factoryImpl) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { + cfg, err := f.clientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + if err := setKubernetesDefaults(cfg); err != nil { + return nil, err + } + gvk := mapping.GroupVersionKind + switch gvk.Group { + case api.GroupName: + cfg.APIPath = "/api" + default: + cfg.APIPath = "/apis" + } + gv := gvk.GroupVersion() + cfg.GroupVersion = &gv + return restclient.RESTClientFor(cfg) +} + +func (f *factoryImpl) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { + cfg, err := f.clientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + if err := restclient.SetKubernetesDefaults(cfg); err != nil { + return nil, err + } + cfg.APIPath = "/apis" + if mapping.GroupVersionKind.Group == api.GroupName { + cfg.APIPath = "/api" + } + gv := mapping.GroupVersionKind.GroupVersion() + cfg.ContentConfig = resource.UnstructuredPlusDefaultContentConfig() + cfg.GroupVersion = &gv + return restclient.RESTClientFor(cfg) +} + +func (f *factoryImpl) Validator(validate bool) (validation.Schema, error) { + if !validate { + return validation.NullSchema{}, nil + } + + resources, err := f.OpenAPISchema() + if err != nil { + return nil, err + } + + return validation.ConjunctiveSchema{ + openapivalidation.NewSchemaValidation(resources), + validation.NoDoubleKeySchema{}, + }, nil +} + +// OpenAPISchema returns metadata and structural information about Kubernetes object definitions. +func (f *factoryImpl) OpenAPISchema() (openapi.Resources, error) { + discovery, err := f.clientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + // Lazily initialize the OpenAPIGetter once + f.openAPIGetter.once.Do(func() { + // Create the caching OpenAPIGetter + f.openAPIGetter.getter = openapi.NewOpenAPIGetter(discovery) + }) + + // Delegate to the OpenAPIGetter + return f.openAPIGetter.getter.Get() +} + +func (f *factoryImpl) ScaleClient() (scaleclient.ScalesGetter, error) { + discoClient, err := f.clientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + restClient, err := f.RESTClient() + if err != nil { + return nil, err + } + resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) + mapper, err := f.clientGetter.ToRESTMapper() + if err != nil { + return nil, err + } + + return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil +} + const ( // TODO(sig-cli): Enforce consistent naming for generators here. // See discussion in https://github.com/kubernetes/kubernetes/issues/46237 @@ -326,7 +429,7 @@ func Contains(resourcesList []*metav1.APIResourceList, resource schema.GroupVers return len(resources) != 0 } -func (f *ring0Factory) Generators(cmdName string) map[string]kubectl.Generator { +func (f *factoryImpl) Generators(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) } diff --git a/pkg/kubectl/cmd/util/factory_object_mapping.go b/pkg/kubectl/cmd/util/factory_object_mapping.go deleted file mode 100644 index a4769978215..00000000000 --- a/pkg/kubectl/cmd/util/factory_object_mapping.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// this file contains factories with no other dependencies - -package util - -import ( - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - restclient "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" - openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" - "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" - "k8s.io/kubernetes/pkg/kubectl/validation" -) - -type ring1Factory struct { - clientAccessFactory ClientAccessFactory - - // openAPIGetter loads and caches openapi specs - openAPIGetter openAPIGetter -} - -type openAPIGetter struct { - once sync.Once - getter openapi.Getter -} - -func NewObjectMappingFactory(clientAccessFactory ClientAccessFactory) ObjectMappingFactory { - f := &ring1Factory{ - clientAccessFactory: clientAccessFactory, - } - return f -} - -func (f *ring1Factory) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { - cfg, err := f.clientAccessFactory.ToRESTConfig() - if err != nil { - return nil, err - } - if err := setKubernetesDefaults(cfg); err != nil { - return nil, err - } - gvk := mapping.GroupVersionKind - switch gvk.Group { - case api.GroupName: - cfg.APIPath = "/api" - default: - cfg.APIPath = "/apis" - } - gv := gvk.GroupVersion() - cfg.GroupVersion = &gv - return restclient.RESTClientFor(cfg) -} - -func (f *ring1Factory) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { - cfg, err := f.clientAccessFactory.ToRESTConfig() - if err != nil { - return nil, err - } - if err := restclient.SetKubernetesDefaults(cfg); err != nil { - return nil, err - } - cfg.APIPath = "/apis" - if mapping.GroupVersionKind.Group == api.GroupName { - cfg.APIPath = "/api" - } - gv := mapping.GroupVersionKind.GroupVersion() - cfg.ContentConfig = resource.UnstructuredPlusDefaultContentConfig() - cfg.GroupVersion = &gv - return restclient.RESTClientFor(cfg) -} - -func (f *ring1Factory) Validator(validate bool) (validation.Schema, error) { - if !validate { - return validation.NullSchema{}, nil - } - - resources, err := f.OpenAPISchema() - if err != nil { - return nil, err - } - - return validation.ConjunctiveSchema{ - openapivalidation.NewSchemaValidation(resources), - validation.NoDoubleKeySchema{}, - }, nil -} - -// OpenAPISchema returns metadata and structural information about Kubernetes object definitions. -func (f *ring1Factory) OpenAPISchema() (openapi.Resources, error) { - discovery, err := f.clientAccessFactory.ToDiscoveryClient() - if err != nil { - return nil, err - } - - // Lazily initialize the OpenAPIGetter once - f.openAPIGetter.once.Do(func() { - // Create the caching OpenAPIGetter - f.openAPIGetter.getter = openapi.NewOpenAPIGetter(discovery) - }) - - // Delegate to the OpenAPIGetter - return f.openAPIGetter.getter.Get() -} From 3988331c6ccdcdd0ab385c41906f4871a23fab49 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Sat, 26 May 2018 21:54:15 -0700 Subject: [PATCH 222/307] Restore InstanceNotFound comment & logic Otherwise node registration is broken on AWS. --- pkg/cloudprovider/cloud.go | 1 + pkg/cloudprovider/providers/aws/aws.go | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/pkg/cloudprovider/cloud.go b/pkg/cloudprovider/cloud.go index 530475799df..0358890bcd8 100644 --- a/pkg/cloudprovider/cloud.go +++ b/pkg/cloudprovider/cloud.go @@ -131,6 +131,7 @@ type Instances interface { // services cannot be used in this method to obtain nodeaddresses NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) // InstanceID returns the cloud provider ID of the node with the specified NodeName. + // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) // InstanceType returns the type of the specified instance. InstanceType(ctx context.Context, name types.NodeName) (string, error) diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 6ea14ef58ef..e95ad50d892 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -1363,6 +1363,10 @@ func (c *Cloud) InstanceID(ctx context.Context, nodeName types.NodeName) (string } inst, err := c.getInstanceByNodeName(nodeName) if err != nil { + if err == cloudprovider.InstanceNotFound { + // The Instances interface requires that we return InstanceNotFound (without wrapping) + return "", err + } return "", fmt.Errorf("getInstanceByNodeName failed for %q with %q", nodeName, err) } return "/" + aws.StringValue(inst.Placement.AvailabilityZone) + "/" + aws.StringValue(inst.InstanceId), nil From 3895887f5e92bb83f8be33e785a53dd2840f554e Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Thu, 24 May 2018 15:57:19 -0400 Subject: [PATCH 223/307] move scaleClient from factory --- pkg/kubectl/cmd/apply.go | 2 +- pkg/kubectl/cmd/rollingupdate.go | 3 +-- pkg/kubectl/cmd/scale.go | 2 +- pkg/kubectl/cmd/util/factory.go | 4 ---- pkg/kubectl/cmd/util/helpers.go | 34 ++++++++++++++++++++++++++++++++ 5 files changed, 37 insertions(+), 8 deletions(-) diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index fc8f67626dc..18819353753 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -220,7 +220,7 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } - o.Scaler, err = f.ScaleClient() + o.Scaler, err = cmdutil.ScaleClientFn(f) if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 24cde12b467..a82ef567ef1 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -214,7 +214,7 @@ func (o *RollingUpdateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, a return err } - o.ScaleClient, err = f.ScaleClient() + o.ScaleClient, err = cmdutil.ScaleClientFn(f) if err != nil { return err } @@ -242,7 +242,6 @@ func (o *RollingUpdateOptions) Validate(cmd *cobra.Command, args []string) error } func (o *RollingUpdateOptions) Run() error { - filename := "" if len(o.FilenameOptions.Filenames) > 0 { filename = o.FilenameOptions.Filenames[0] diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index 6b4ffb1b5ff..d873a1ae2a7 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -289,7 +289,7 @@ func ScaleJob(info *resource.Info, jobsClient batchclient.JobsGetter, count uint } func scaler(f cmdutil.Factory) (kubectl.Scaler, error) { - scalesGetter, err := f.ScaleClient() + scalesGetter, err := cmdutil.ScaleClientFn(f) if err != nil { return nil, err } diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 0e7f0724797..2718111ae5c 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -25,7 +25,6 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - scaleclient "k8s.io/client-go/scale" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" @@ -81,9 +80,6 @@ type Factory interface { Validator(validate bool) (validation.Schema, error) // OpenAPISchema returns the schema openapi schema definition OpenAPISchema() (openapi.Resources, error) - - // ScaleClient gives you back scale getter - ScaleClient() (scaleclient.ScalesGetter, error) } func makePortsString(ports []api.ServicePort, useNodePort bool) string { diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index b4640f67b25..e942b682b14 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -38,6 +38,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl" @@ -673,3 +675,35 @@ func genericDescriber(restClientGetter genericclioptions.RESTClientGetter, mappi eventsClient := clientSet.Core() return printersinternal.GenericDescriberFor(mapping, dynamicClient, eventsClient), nil } + +// ScaleClientFunc provides a ScalesGetter +type ScaleClientFunc func(genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) + +// ScaleClientFn gives a way to easily override the function for unit testing if needed. +var ScaleClientFn ScaleClientFunc = scaleClient + +// scaleClient gives you back scale getter +func scaleClient(restClientGetter genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) { + discoveryClient, err := restClientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + setKubernetesDefaults(clientConfig) + restClient, err := rest.RESTClientFor(clientConfig) + if err != nil { + return nil, err + } + resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient) + mapper, err := restClientGetter.ToRESTMapper() + if err != nil { + return nil, err + } + + return scale.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil +} From 7495ab52293423dcc6f4d83a86c21b4e440dce8e Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 25 May 2018 17:33:22 +0200 Subject: [PATCH 224/307] Remove Generators from Factory --- pkg/kubectl/cmd/expose.go | 2 +- pkg/kubectl/cmd/run.go | 4 +- pkg/kubectl/cmd/util/BUILD | 1 + pkg/kubectl/cmd/util/factory.go | 3 - pkg/kubectl/cmd/util/factory_client_access.go | 222 ---------------- pkg/kubectl/cmd/util/generator.go | 238 ++++++++++++++++++ 6 files changed, 242 insertions(+), 228 deletions(-) create mode 100644 pkg/kubectl/cmd/util/generator.go diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index d36d47d736d..944af7f770e 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -188,7 +188,7 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e return err } - o.Generators = f.Generators + o.Generators = cmdutil.GeneratorFn o.Builder = f.NewBuilder() o.CanBeExposed = polymorphichelpers.CanBeExposedFn o.ClientForMapping = f.ClientForMapping diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index de450080765..a239b34e5a7 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -321,7 +321,7 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e } } - generators := f.Generators("run") + generators := cmdutil.GeneratorFn("run") generator, found := generators[generatorName] if !found { return cmdutil.UsageErrorf(cmd, "generator %q not found", generatorName) @@ -573,7 +573,7 @@ func verifyImagePullPolicy(cmd *cobra.Command) error { } func (o *RunOptions) generateService(f cmdutil.Factory, cmd *cobra.Command, serviceGenerator string, paramsIn map[string]interface{}, namespace string) (*RunObject, error) { - generators := f.Generators("expose") + generators := cmdutil.GeneratorFn("expose") generator, found := generators[serviceGenerator] if !found { return nil, fmt.Errorf("missing service generator: %s", serviceGenerator) diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index fa44ccc6917..026f03db50c 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -6,6 +6,7 @@ go_library( "conversion.go", "factory.go", "factory_client_access.go", + "generator.go", "helpers.go", "kubectl_match_version.go", "printing.go", diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 0e7f0724797..c3303c6727c 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -28,7 +28,6 @@ import ( scaleclient "k8s.io/client-go/scale" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" @@ -68,8 +67,6 @@ type Factory interface { // other namespace is specified and whether the namespace was // overridden. DefaultNamespace() (string, bool, error) - // Generators returns the generators for the provided command - Generators(cmdName string) map[string]kubectl.Generator // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index c9cbda6bdde..8b806097680 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -19,22 +19,11 @@ limitations under the License. package util import ( - "fmt" - "io" "sync" - appsv1 "k8s.io/api/apps/v1" - appsv1beta1 "k8s.io/api/apps/v1beta1" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - batchv2alpha1 "k8s.io/api/batch/v2alpha1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -44,7 +33,6 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" openapivalidation "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -223,216 +211,6 @@ func (f *factoryImpl) ScaleClient() (scaleclient.ScalesGetter, error) { return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil } -const ( - // TODO(sig-cli): Enforce consistent naming for generators here. - // See discussion in https://github.com/kubernetes/kubernetes/issues/46237 - // before you add any more. - RunV1GeneratorName = "run/v1" - RunPodV1GeneratorName = "run-pod/v1" - ServiceV1GeneratorName = "service/v1" - ServiceV2GeneratorName = "service/v2" - ServiceNodePortGeneratorV1Name = "service-nodeport/v1" - ServiceClusterIPGeneratorV1Name = "service-clusterip/v1" - ServiceLoadBalancerGeneratorV1Name = "service-loadbalancer/v1" - ServiceExternalNameGeneratorV1Name = "service-externalname/v1" - ServiceAccountV1GeneratorName = "serviceaccount/v1" - HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1" - DeploymentV1Beta1GeneratorName = "deployment/v1beta1" - DeploymentAppsV1Beta1GeneratorName = "deployment/apps.v1beta1" - DeploymentBasicV1Beta1GeneratorName = "deployment-basic/v1beta1" - DeploymentBasicAppsV1Beta1GeneratorName = "deployment-basic/apps.v1beta1" - DeploymentBasicAppsV1GeneratorName = "deployment-basic/apps.v1" - JobV1GeneratorName = "job/v1" - CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1" - CronJobV1Beta1GeneratorName = "cronjob/v1beta1" - NamespaceV1GeneratorName = "namespace/v1" - ResourceQuotaV1GeneratorName = "resourcequotas/v1" - SecretV1GeneratorName = "secret/v1" - SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1" - SecretForTLSV1GeneratorName = "secret-for-tls/v1" - ConfigMapV1GeneratorName = "configmap/v1" - ClusterRoleBindingV1GeneratorName = "clusterrolebinding.rbac.authorization.k8s.io/v1alpha1" - RoleBindingV1GeneratorName = "rolebinding.rbac.authorization.k8s.io/v1alpha1" - ClusterV1Beta1GeneratorName = "cluster/v1beta1" - PodDisruptionBudgetV1GeneratorName = "poddisruptionbudget/v1beta1" - PodDisruptionBudgetV2GeneratorName = "poddisruptionbudget/v1beta1/v2" - PriorityClassV1Alpha1GeneratorName = "priorityclass/v1alpha1" -) - -// DefaultGenerators returns the set of default generators for use in Factory instances -func DefaultGenerators(cmdName string) map[string]kubectl.Generator { - var generator map[string]kubectl.Generator - switch cmdName { - case "expose": - generator = map[string]kubectl.Generator{ - ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{}, - ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{}, - } - case "service-clusterip": - generator = map[string]kubectl.Generator{ - ServiceClusterIPGeneratorV1Name: kubectl.ServiceClusterIPGeneratorV1{}, - } - case "service-nodeport": - generator = map[string]kubectl.Generator{ - ServiceNodePortGeneratorV1Name: kubectl.ServiceNodePortGeneratorV1{}, - } - case "service-loadbalancer": - generator = map[string]kubectl.Generator{ - ServiceLoadBalancerGeneratorV1Name: kubectl.ServiceLoadBalancerGeneratorV1{}, - } - case "deployment": - // Create Deployment has only StructuredGenerators and no - // param-based Generators. - // The StructuredGenerators are as follows (as of 2018-03-16): - // DeploymentBasicV1Beta1GeneratorName -> kubectl.DeploymentBasicGeneratorV1 - // DeploymentBasicAppsV1Beta1GeneratorName -> kubectl.DeploymentBasicAppsGeneratorV1Beta1 - // DeploymentBasicAppsV1GeneratorName -> kubectl.DeploymentBasicAppsGeneratorV1 - generator = map[string]kubectl.Generator{} - case "run": - generator = map[string]kubectl.Generator{ - RunV1GeneratorName: kubectl.BasicReplicationController{}, - RunPodV1GeneratorName: kubectl.BasicPod{}, - DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, - DeploymentAppsV1Beta1GeneratorName: kubectl.DeploymentAppsV1Beta1{}, - JobV1GeneratorName: kubectl.JobV1{}, - CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, - CronJobV1Beta1GeneratorName: kubectl.CronJobV1Beta1{}, - } - case "namespace": - generator = map[string]kubectl.Generator{ - NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, - } - case "quota": - generator = map[string]kubectl.Generator{ - ResourceQuotaV1GeneratorName: kubectl.ResourceQuotaGeneratorV1{}, - } - case "secret": - generator = map[string]kubectl.Generator{ - SecretV1GeneratorName: kubectl.SecretGeneratorV1{}, - } - case "secret-for-docker-registry": - generator = map[string]kubectl.Generator{ - SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, - } - case "secret-for-tls": - generator = map[string]kubectl.Generator{ - SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, - } - } - - return generator -} - -// fallbackGeneratorNameIfNecessary returns the name of the old generator -// if server does not support new generator. Otherwise, the -// generator string is returned unchanged. -// -// If the generator name is changed, print a warning message to let the user -// know. -func FallbackGeneratorNameIfNecessary( - generatorName string, - discoveryClient discovery.DiscoveryInterface, - cmdErr io.Writer, -) (string, error) { - switch generatorName { - case DeploymentAppsV1Beta1GeneratorName: - hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments")) - if err != nil { - return "", err - } - if !hasResource { - return FallbackGeneratorNameIfNecessary(DeploymentV1Beta1GeneratorName, discoveryClient, cmdErr) - } - case DeploymentV1Beta1GeneratorName: - hasResource, err := HasResource(discoveryClient, extensionsv1beta1.SchemeGroupVersion.WithResource("deployments")) - if err != nil { - return "", err - } - if !hasResource { - return RunV1GeneratorName, nil - } - case DeploymentBasicAppsV1GeneratorName: - hasResource, err := HasResource(discoveryClient, appsv1.SchemeGroupVersion.WithResource("deployments")) - if err != nil { - return "", err - } - if !hasResource { - return FallbackGeneratorNameIfNecessary(DeploymentBasicAppsV1Beta1GeneratorName, discoveryClient, cmdErr) - } - case DeploymentBasicAppsV1Beta1GeneratorName: - hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments")) - if err != nil { - return "", err - } - if !hasResource { - return DeploymentBasicV1Beta1GeneratorName, nil - } - case JobV1GeneratorName: - hasResource, err := HasResource(discoveryClient, batchv1.SchemeGroupVersion.WithResource("jobs")) - if err != nil { - return "", err - } - if !hasResource { - return RunPodV1GeneratorName, nil - } - case CronJobV1Beta1GeneratorName: - hasResource, err := HasResource(discoveryClient, batchv1beta1.SchemeGroupVersion.WithResource("cronjobs")) - if err != nil { - return "", err - } - if !hasResource { - return FallbackGeneratorNameIfNecessary(CronJobV2Alpha1GeneratorName, discoveryClient, cmdErr) - } - case CronJobV2Alpha1GeneratorName: - hasResource, err := HasResource(discoveryClient, batchv2alpha1.SchemeGroupVersion.WithResource("cronjobs")) - if err != nil { - return "", err - } - if !hasResource { - return JobV1GeneratorName, nil - } - } - return generatorName, nil -} - -func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { - fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+ - "but it isn't available. "+ - "Falling back to %q.\n", - newGeneratorName, - oldGeneratorName, - ) -} - -func HasResource(client discovery.DiscoveryInterface, resource schema.GroupVersionResource) (bool, error) { - resources, err := client.ServerResourcesForGroupVersion(resource.GroupVersion().String()) - if apierrors.IsNotFound(err) { - // entire group is missing - return false, nil - } - if err != nil { - // other errors error - return false, fmt.Errorf("failed to discover supported resources: %v", err) - } - for _, serverResource := range resources.APIResources { - if serverResource.Name == resource.Resource { - return true, nil - } - } - return false, nil -} - -func Contains(resourcesList []*metav1.APIResourceList, resource schema.GroupVersionResource) bool { - resources := discovery.FilteredBy(discovery.ResourcePredicateFunc(func(gv string, r *metav1.APIResource) bool { - return resource.GroupVersion().String() == gv && resource.Resource == r.Name - }), resourcesList) - return len(resources) != 0 -} - -func (f *factoryImpl) Generators(cmdName string) map[string]kubectl.Generator { - return DefaultGenerators(cmdName) -} - // this method exists to help us find the points still relying on internal types. func InternalVersionDecoder() runtime.Decoder { return legacyscheme.Codecs.UniversalDecoder() diff --git a/pkg/kubectl/cmd/util/generator.go b/pkg/kubectl/cmd/util/generator.go new file mode 100644 index 00000000000..551f9e94c10 --- /dev/null +++ b/pkg/kubectl/cmd/util/generator.go @@ -0,0 +1,238 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "io" + + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/kubernetes/pkg/kubectl" +) + +const ( + // TODO(sig-cli): Enforce consistent naming for generators here. + // See discussion in https://github.com/kubernetes/kubernetes/issues/46237 + // before you add any more. + RunV1GeneratorName = "run/v1" + RunPodV1GeneratorName = "run-pod/v1" + ServiceV1GeneratorName = "service/v1" + ServiceV2GeneratorName = "service/v2" + ServiceNodePortGeneratorV1Name = "service-nodeport/v1" + ServiceClusterIPGeneratorV1Name = "service-clusterip/v1" + ServiceLoadBalancerGeneratorV1Name = "service-loadbalancer/v1" + ServiceExternalNameGeneratorV1Name = "service-externalname/v1" + ServiceAccountV1GeneratorName = "serviceaccount/v1" + HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1" + DeploymentV1Beta1GeneratorName = "deployment/v1beta1" + DeploymentAppsV1Beta1GeneratorName = "deployment/apps.v1beta1" + DeploymentBasicV1Beta1GeneratorName = "deployment-basic/v1beta1" + DeploymentBasicAppsV1Beta1GeneratorName = "deployment-basic/apps.v1beta1" + DeploymentBasicAppsV1GeneratorName = "deployment-basic/apps.v1" + JobV1GeneratorName = "job/v1" + CronJobV2Alpha1GeneratorName = "cronjob/v2alpha1" + CronJobV1Beta1GeneratorName = "cronjob/v1beta1" + NamespaceV1GeneratorName = "namespace/v1" + ResourceQuotaV1GeneratorName = "resourcequotas/v1" + SecretV1GeneratorName = "secret/v1" + SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1" + SecretForTLSV1GeneratorName = "secret-for-tls/v1" + ConfigMapV1GeneratorName = "configmap/v1" + ClusterRoleBindingV1GeneratorName = "clusterrolebinding.rbac.authorization.k8s.io/v1alpha1" + RoleBindingV1GeneratorName = "rolebinding.rbac.authorization.k8s.io/v1alpha1" + ClusterV1Beta1GeneratorName = "cluster/v1beta1" + PodDisruptionBudgetV1GeneratorName = "poddisruptionbudget/v1beta1" + PodDisruptionBudgetV2GeneratorName = "poddisruptionbudget/v1beta1/v2" + PriorityClassV1Alpha1GeneratorName = "priorityclass/v1alpha1" +) + +// GeneratorFunc returns the generators for the provided command +type GeneratorFunc func(cmdName string) map[string]kubectl.Generator + +// GeneratorFn gives a way to easily override the function for unit testing if needed +var GeneratorFn GeneratorFunc = defaultGenerators + +// defaultGenerators returns the set of default generators for use in Factory instances +func defaultGenerators(cmdName string) map[string]kubectl.Generator { + var generator map[string]kubectl.Generator + switch cmdName { + case "expose": + generator = map[string]kubectl.Generator{ + ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{}, + ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{}, + } + case "service-clusterip": + generator = map[string]kubectl.Generator{ + ServiceClusterIPGeneratorV1Name: kubectl.ServiceClusterIPGeneratorV1{}, + } + case "service-nodeport": + generator = map[string]kubectl.Generator{ + ServiceNodePortGeneratorV1Name: kubectl.ServiceNodePortGeneratorV1{}, + } + case "service-loadbalancer": + generator = map[string]kubectl.Generator{ + ServiceLoadBalancerGeneratorV1Name: kubectl.ServiceLoadBalancerGeneratorV1{}, + } + case "deployment": + // Create Deployment has only StructuredGenerators and no + // param-based Generators. + // The StructuredGenerators are as follows (as of 2018-03-16): + // DeploymentBasicV1Beta1GeneratorName -> kubectl.DeploymentBasicGeneratorV1 + // DeploymentBasicAppsV1Beta1GeneratorName -> kubectl.DeploymentBasicAppsGeneratorV1Beta1 + // DeploymentBasicAppsV1GeneratorName -> kubectl.DeploymentBasicAppsGeneratorV1 + generator = map[string]kubectl.Generator{} + case "run": + generator = map[string]kubectl.Generator{ + RunV1GeneratorName: kubectl.BasicReplicationController{}, + RunPodV1GeneratorName: kubectl.BasicPod{}, + DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, + DeploymentAppsV1Beta1GeneratorName: kubectl.DeploymentAppsV1Beta1{}, + JobV1GeneratorName: kubectl.JobV1{}, + CronJobV2Alpha1GeneratorName: kubectl.CronJobV2Alpha1{}, + CronJobV1Beta1GeneratorName: kubectl.CronJobV1Beta1{}, + } + case "namespace": + generator = map[string]kubectl.Generator{ + NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, + } + case "quota": + generator = map[string]kubectl.Generator{ + ResourceQuotaV1GeneratorName: kubectl.ResourceQuotaGeneratorV1{}, + } + case "secret": + generator = map[string]kubectl.Generator{ + SecretV1GeneratorName: kubectl.SecretGeneratorV1{}, + } + case "secret-for-docker-registry": + generator = map[string]kubectl.Generator{ + SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, + } + case "secret-for-tls": + generator = map[string]kubectl.Generator{ + SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, + } + } + + return generator +} + +// FallbackGeneratorNameIfNecessary returns the name of the old generator +// if server does not support new generator. Otherwise, the +// generator string is returned unchanged. +// +// If the generator name is changed, print a warning message to let the user +// know. +func FallbackGeneratorNameIfNecessary( + generatorName string, + discoveryClient discovery.DiscoveryInterface, + cmdErr io.Writer, +) (string, error) { + switch generatorName { + case DeploymentAppsV1Beta1GeneratorName: + hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments")) + if err != nil { + return "", err + } + if !hasResource { + return FallbackGeneratorNameIfNecessary(DeploymentV1Beta1GeneratorName, discoveryClient, cmdErr) + } + case DeploymentV1Beta1GeneratorName: + hasResource, err := HasResource(discoveryClient, extensionsv1beta1.SchemeGroupVersion.WithResource("deployments")) + if err != nil { + return "", err + } + if !hasResource { + return RunV1GeneratorName, nil + } + case DeploymentBasicAppsV1GeneratorName: + hasResource, err := HasResource(discoveryClient, appsv1.SchemeGroupVersion.WithResource("deployments")) + if err != nil { + return "", err + } + if !hasResource { + return FallbackGeneratorNameIfNecessary(DeploymentBasicAppsV1Beta1GeneratorName, discoveryClient, cmdErr) + } + case DeploymentBasicAppsV1Beta1GeneratorName: + hasResource, err := HasResource(discoveryClient, appsv1beta1.SchemeGroupVersion.WithResource("deployments")) + if err != nil { + return "", err + } + if !hasResource { + return DeploymentBasicV1Beta1GeneratorName, nil + } + case JobV1GeneratorName: + hasResource, err := HasResource(discoveryClient, batchv1.SchemeGroupVersion.WithResource("jobs")) + if err != nil { + return "", err + } + if !hasResource { + return RunPodV1GeneratorName, nil + } + case CronJobV1Beta1GeneratorName: + hasResource, err := HasResource(discoveryClient, batchv1beta1.SchemeGroupVersion.WithResource("cronjobs")) + if err != nil { + return "", err + } + if !hasResource { + return FallbackGeneratorNameIfNecessary(CronJobV2Alpha1GeneratorName, discoveryClient, cmdErr) + } + case CronJobV2Alpha1GeneratorName: + hasResource, err := HasResource(discoveryClient, batchv2alpha1.SchemeGroupVersion.WithResource("cronjobs")) + if err != nil { + return "", err + } + if !hasResource { + return JobV1GeneratorName, nil + } + } + return generatorName, nil +} + +func HasResource(client discovery.DiscoveryInterface, resource schema.GroupVersionResource) (bool, error) { + resources, err := client.ServerResourcesForGroupVersion(resource.GroupVersion().String()) + if apierrors.IsNotFound(err) { + // entire group is missing + return false, nil + } + if err != nil { + // other errors error + return false, fmt.Errorf("failed to discover supported resources: %v", err) + } + for _, serverResource := range resources.APIResources { + if serverResource.Name == resource.Resource { + return true, nil + } + } + return false, nil +} + +func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { + fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+ + "but it isn't available. "+ + "Falling back to %q.\n", + newGeneratorName, + oldGeneratorName, + ) +} From e330741d6d72e97346f3c989960829c247950546 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 28 May 2018 02:46:18 +0000 Subject: [PATCH 225/307] fix azure file size grow issue --- pkg/volume/azure_file/azure_file.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/volume/azure_file/azure_file.go b/pkg/volume/azure_file/azure_file.go index b7efb940d0b..f36b47cb3ee 100644 --- a/pkg/volume/azure_file/azure_file.go +++ b/pkg/volume/azure_file/azure_file.go @@ -150,7 +150,7 @@ func (plugin *azureFilePlugin) ExpandVolumeDevice( newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { - if spec.PersistentVolume != nil || spec.PersistentVolume.Spec.AzureFile == nil { + if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureFile == nil { return oldSize, fmt.Errorf("invalid PV spec") } shareName := spec.PersistentVolume.Spec.AzureFile.ShareName From ecdc1638f6557d8d10d72ebc821e182ead2f0cdc Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 9 Mar 2018 18:47:53 +0100 Subject: [PATCH 226/307] apiextensions-apiserver: add columns to CRD spec --- .../pkg/apis/apiextensions/fuzzer/fuzzer.go | 8 + .../pkg/apis/apiextensions/types.go | 24 +++ .../apis/apiextensions/v1beta1/defaults.go | 8 + .../pkg/apis/apiextensions/v1beta1/types.go | 24 +++ .../apiextensions/validation/validation.go | 39 ++++ .../pkg/apiserver/customresource_handler.go | 5 +- .../pkg/registry/customresource/etcd_test.go | 102 +++++++++- .../tableconvertor/tableconvertor.go | 136 +++++++++---- .../tableconvertor/tableconvertor_test.go | 68 +++++++ .../test/integration/table_test.go | 185 ++++++++++++++++++ .../apimachinery/pkg/api/meta/table/table.go | 6 +- .../pkg/registry/rest/resttest/resttest.go | 5 +- 12 files changed, 556 insertions(+), 54 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go create mode 100644 staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go index 0fe919ab64e..ff8cc033469 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/fuzzer.go @@ -23,9 +23,12 @@ import ( "github.com/google/gofuzz" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" ) +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + // Funcs returns the fuzzer functions for the apiextensions apis. func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { return []interface{}{ @@ -53,6 +56,11 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { } else if len(obj.Versions) != 0 { obj.Version = obj.Versions[0].Name } + if len(obj.AdditionalPrinterColumns) == 0 { + obj.AdditionalPrinterColumns = []apiextensions.CustomResourceColumnDefinition{ + {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"], JSONPath: ".metadata.creationTimestamp"}, + } + } }, func(obj *apiextensions.CustomResourceDefinition, c fuzz.Continue) { c.FuzzNoCustom(obj) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go index debe74a5b0c..6fc75154fac 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go @@ -49,6 +49,8 @@ type CustomResourceDefinitionSpec struct { // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. Versions []CustomResourceDefinitionVersion + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + AdditionalPrinterColumns []CustomResourceColumnDefinition } type CustomResourceDefinitionVersion struct { @@ -61,6 +63,28 @@ type CustomResourceDefinitionVersion struct { Storage bool } +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string + // description is a human readable description of this column. + Description string + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 + + // JSONPath is a simple JSON path, i.e. without array notation. + JSONPath string +} + // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition type CustomResourceDefinitionNames struct { // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go index 1984e229778..e3235e8702c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go @@ -19,9 +19,12 @@ package v1beta1 import ( "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + func addDefaultingFuncs(scheme *runtime.Scheme) error { scheme.AddTypeDefaultingFunc(&CustomResourceDefinition{}, func(obj interface{}) { SetDefaults_CustomResourceDefinition(obj.(*CustomResourceDefinition)) }) // TODO figure out why I can't seem to get my defaulter generated @@ -63,4 +66,9 @@ func SetDefaults_CustomResourceDefinitionSpec(obj *CustomResourceDefinitionSpec) if len(obj.Version) == 0 && len(obj.Versions) != 0 { obj.Version = obj.Versions[0].Name } + if len(obj.AdditionalPrinterColumns) == 0 { + obj.AdditionalPrinterColumns = []CustomResourceColumnDefinition{ + {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"], JSONPath: ".metadata.creationTimestamp"}, + } + } } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go index 9d8d1cd80d1..2080cc8217e 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go @@ -52,6 +52,8 @@ type CustomResourceDefinitionSpec struct { // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. Versions []CustomResourceDefinitionVersion `json:"versions,omitempty" protobuf:"bytes,7,rep,name=versions"` + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + AdditionalPrinterColumns []CustomResourceColumnDefinition `json:"additionalPrinterColumns,omitempty" protobuf:"bytes,8,rep,name=additionalPrinterColumns"` } type CustomResourceDefinitionVersion struct { @@ -64,6 +66,28 @@ type CustomResourceDefinitionVersion struct { Storage bool `json:"storage" protobuf:"varint,3,opt,name=storage"` } +// CustomResourceColumnDefinition specifies a column for server side printing. +type CustomResourceColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format,omitempty" protobuf:"bytes,3,opt,name=format"` + // description is a human readable description of this column. + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority,omitempty" protobuf:"bytes,5,opt,name=priority"` + + // JSONPath is a simple JSON path, i.e. with array notation. + JSONPath string `json:"JSONPath" protobuf:"bytes,6,opt,name=JSONPath"` +} + // CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition type CustomResourceDefinitionNames struct { // Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index eb9acc79e8e..40baf42351a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -22,6 +22,7 @@ import ( "strings" genericvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/sets" validationutil "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -31,6 +32,11 @@ import ( apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" ) +var ( + printerColumnDatatypes = sets.NewString("integer", "number", "string", "boolean", "date") + customResourceColumnDefinitionFormats = sets.NewString("int32", "int64", "float", "double", "byte", "date", "date-time", "password") +) + // ValidateCustomResourceDefinition statically validates func ValidateCustomResourceDefinition(obj *apiextensions.CustomResourceDefinition) field.ErrorList { nameValidationFn := func(name string, prefix bool) []string { @@ -175,6 +181,12 @@ func ValidateCustomResourceDefinitionSpec(spec *apiextensions.CustomResourceDefi allErrs = append(allErrs, field.Forbidden(fldPath.Child("subresources"), "disabled by feature-gate CustomResourceSubresources")) } + for i := range spec.AdditionalPrinterColumns { + if errs := ValidateCustomResourceColumnDefinition(&spec.AdditionalPrinterColumns[i], fldPath.Child("columns").Index(i)); len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + } + return allErrs } @@ -238,6 +250,33 @@ func ValidateCustomResourceDefinitionNames(names *apiextensions.CustomResourceDe return allErrs } +// ValidateCustomResourceColumnDefinition statically validates a printer column. +func ValidateCustomResourceColumnDefinition(col *apiextensions.CustomResourceColumnDefinition, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(col.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("header"), "")) + } + + if len(col.Type) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("type"), fmt.Sprintf("must be one of %s", strings.Join(printerColumnDatatypes.List(), ",")))) + } else if !printerColumnDatatypes.Has(col.Type) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), col.Type, fmt.Sprintf("must be one of %s", strings.Join(printerColumnDatatypes.List(), ",")))) + } + + if len(col.Format) > 0 && !customResourceColumnDefinitionFormats.Has(col.Format) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("format"), col.Format, fmt.Sprintf("must be one of %s", strings.Join(customResourceColumnDefinitionFormats.List(), ",")))) + } + + if len(col.JSONPath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } else if errs := validateSimpleJSONPath(col.JSONPath, fldPath.Child("path")); len(errs) > 0 { + allErrs = append(allErrs, errs...) + } + + return allErrs +} + // specStandardValidator applies validations for different OpenAPI specification versions. type specStandardValidator interface { validate(spec *apiextensions.JSONSchemaProps, fldPath *field.Path) field.ErrorList diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 20231bdf866..231bffdbf6d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -370,6 +370,8 @@ func (r *crdHandler) GetCustomResourceListerCollectionDeleter(crd *apiextensions return info.storages[info.storageVersion].CustomResource, nil } +var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() + func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResourceDefinition) (*crdInfo, error) { storageMap := r.customStorage.Load().(crdStorageMap) if ret, ok := storageMap[crd.UID]; ok { @@ -439,8 +441,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResource scaleSpec = crd.Spec.Subresources.Scale } - // TODO: identify how to pass printer specification from the CRD - table, err := tableconvertor.New(nil) + table, err := tableconvertor.New(crd.Spec.AdditionalPrinterColumns) if err != nil { glog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go index 9a2c8320505..15a242e4493 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd_test.go @@ -21,12 +21,15 @@ import ( "reflect" "strings" "testing" + "time" autoscalingv1 "k8s.io/api/autoscaling/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" + metainternal "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" @@ -72,8 +75,19 @@ func newStorage(t *testing.T) (customresource.CustomResourceStorage, *etcdtestin status := &apiextensions.CustomResourceSubresourceStatus{} - // TODO: identify how to pass printer specification from the CRD - table, _ := tableconvertor.New(nil) + headers := []apiextensions.CustomResourceColumnDefinition{ + {Name: "Age", Type: "date", JSONPath: ".metadata.creationTimestamp"}, + {Name: "Replicas", Type: "integer", JSONPath: ".spec.replicas"}, + {Name: "Missing", Type: "string", JSONPath: ".spec.missing"}, + {Name: "Invalid", Type: "integer", JSONPath: ".spec.string"}, + {Name: "String", Type: "string", JSONPath: ".spec.string"}, + {Name: "StringFloat64", Type: "string", JSONPath: ".spec.float64"}, + {Name: "StringInt64", Type: "string", JSONPath: ".spec.replicas"}, + {Name: "StringBool", Type: "string", JSONPath: ".spec.bool"}, + {Name: "Float64", Type: "number", JSONPath: ".spec.float64"}, + {Name: "Bool", Type: "boolean", JSONPath: ".spec.bool"}, + } + table, _ := tableconvertor.New(headers) storage := customresource.NewStorage( schema.GroupResource{Group: "mygroup.example.com", Resource: "noxus"}, @@ -112,11 +126,18 @@ func validNewCustomResource() *unstructured.Unstructured { "apiVersion": "mygroup.example.com/v1beta1", "kind": "Noxu", "metadata": map[string]interface{}{ - "namespace": "default", - "name": "foo", + "namespace": "default", + "name": "foo", + "creationTimestamp": time.Now().Add(-time.Hour*12 - 30*time.Minute).UTC().Format(time.RFC3339), }, "spec": map[string]interface{}{ - "replicas": int64(7), + "replicas": int64(7), + "string": "string", + "float64": float64(3.1415926), + "bool": true, + "stringList": []interface{}{"foo", "bar"}, + "mixedList": []interface{}{"foo", int64(42)}, + "nonPrimitiveList": []interface{}{"foo", []interface{}{int64(1), int64(2)}}, }, }, } @@ -225,6 +246,77 @@ func TestCategories(t *testing.T) { } } +func TestColumns(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.CustomResource.Store.DestroyFunc() + + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault) + key := "/noxus/" + metav1.NamespaceDefault + "/foo" + validCustomResource := validNewCustomResource() + if err := storage.CustomResource.Storage.Create(ctx, key, validCustomResource, nil, 0); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + gottenList, err := storage.CustomResource.List(ctx, &metainternal.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + tbl, err := storage.CustomResource.ConvertToTable(ctx, gottenList, &metav1beta1.TableOptions{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expectedColumns := []struct { + Name, Type string + }{ + {"Name", "string"}, + {"Age", "date"}, + {"Replicas", "integer"}, + {"Missing", "string"}, + {"Invalid", "integer"}, + {"String", "string"}, + {"StringFloat64", "string"}, + {"StringInt64", "string"}, + {"StringBool", "string"}, + {"Float64", "number"}, + {"Bool", "boolean"}, + } + if len(tbl.ColumnDefinitions) != len(expectedColumns) { + t.Fatalf("got %d columns, expected %d. Got: %+v", len(tbl.ColumnDefinitions), len(expectedColumns), tbl.ColumnDefinitions) + } + for i, d := range tbl.ColumnDefinitions { + if d.Name != expectedColumns[i].Name { + t.Errorf("got column %d name %q, expected %q", i, d.Name, expectedColumns[i].Name) + } + if d.Type != expectedColumns[i].Type { + t.Errorf("got column %d type %q, expected %q", i, d.Type, expectedColumns[i].Type) + } + } + + expectedRows := [][]interface{}{ + { + "foo", + "12h", + int64(7), + nil, + nil, + "string", + "3.1415926", + "7", + "true", + float64(3.1415926), + true, + }, + } + for i, r := range tbl.Rows { + if !reflect.DeepEqual(r.Cells, expectedRows[i]) { + t.Errorf("got row %d with cells %#v, expected %#v", i, r.Cells, expectedRows[i]) + } + } +} + func TestStatusUpdate(t *testing.T) { storage, server := newStorage(t) defer server.Terminate(t) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go index 8dbc0e77265..e1bed809d99 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor.go @@ -19,11 +19,11 @@ package tableconvertor import ( "bytes" "context" + "encoding/json" "fmt" - "strings" - - "github.com/go-openapi/spec" + "reflect" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" "k8s.io/apimachinery/pkg/api/meta" metatable "k8s.io/apimachinery/pkg/api/meta/table" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,56 +33,46 @@ import ( "k8s.io/client-go/util/jsonpath" ) -const printColumnsKey = "x-kubernetes-print-columns" - var swaggerMetadataDescriptions = metav1.ObjectMeta{}.SwaggerDoc() -// New creates a new table convertor for the provided OpenAPI schema. If the printer definition cannot be parsed, +// New creates a new table convertor for the provided CRD column definition. If the printer definition cannot be parsed, // error will be returned along with a default table convertor. -func New(extensions spec.Extensions) (rest.TableConvertor, error) { +func New(crdColumns []apiextensions.CustomResourceColumnDefinition) (rest.TableConvertor, error) { headers := []metav1beta1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: swaggerMetadataDescriptions["name"]}, - {Name: "Age", Type: "date", Description: swaggerMetadataDescriptions["creationTimestamp"]}, } c := &convertor{ headers: headers, } - format, ok := extensions.GetString(printColumnsKey) - if !ok { - return c, nil - } - // "x-kubernetes-print-columns": "custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion" - parts := strings.SplitN(format, "=", 2) - if len(parts) != 2 || parts[0] != "custom-columns" { - return c, fmt.Errorf("unrecognized column definition in 'x-kubernetes-print-columns', only support 'custom-columns=NAME=JSONPATH[,NAME=JSONPATH]'") - } - columnSpecs := strings.Split(parts[1], ",") - var columns []*jsonpath.JSONPath - for _, spec := range columnSpecs { - parts := strings.SplitN(spec, ":", 2) - if len(parts) != 2 || len(parts[0]) == 0 || len(parts[1]) == 0 { - return c, fmt.Errorf("unrecognized column definition in 'x-kubernetes-print-columns', must specify NAME=JSONPATH: %s", spec) - } - path := jsonpath.New(parts[0]) - if err := path.Parse(parts[1]); err != nil { - return c, fmt.Errorf("unrecognized column definition in 'x-kubernetes-print-columns': %v", spec) + + for _, col := range crdColumns { + path := jsonpath.New(col.Name) + if err := path.Parse(fmt.Sprintf("{%s}", col.JSONPath)); err != nil { + return c, fmt.Errorf("unrecognized column definition %q", col.JSONPath) } path.AllowMissingKeys(true) - columns = append(columns, path) - headers = append(headers, metav1beta1.TableColumnDefinition{ - Name: parts[0], - Type: "string", - Description: fmt.Sprintf("Custom resource definition column from OpenAPI (in JSONPath format): %s", parts[1]), + + desc := fmt.Sprintf("Custom resource definition column (in JSONPath format): %s", col.JSONPath) + if len(col.Description) > 0 { + desc = col.Description + } + + c.additionalColumns = append(c.additionalColumns, path) + c.headers = append(c.headers, metav1beta1.TableColumnDefinition{ + Name: col.Name, + Type: col.Type, + Format: col.Format, + Description: desc, + Priority: col.Priority, }) } - c.columns = columns - c.headers = headers + return c, nil } type convertor struct { - headers []metav1beta1.TableColumnDefinition - columns []*jsonpath.JSONPath + headers []metav1beta1.TableColumnDefinition + additionalColumns []*jsonpath.JSONPath } func (c *convertor) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*metav1beta1.Table, error) { @@ -103,18 +93,80 @@ func (c *convertor) ConvertToTable(ctx context.Context, obj runtime.Object, tabl var err error buf := &bytes.Buffer{} table.Rows, err = metatable.MetaToTableRow(obj, func(obj runtime.Object, m metav1.Object, name, age string) ([]interface{}, error) { - cells := make([]interface{}, 2, 2+len(c.columns)) + cells := make([]interface{}, 1, 1+len(c.additionalColumns)) cells[0] = name - cells[1] = age - for _, column := range c.columns { - if err := column.Execute(buf, obj); err != nil { + customHeaders := c.headers[1:] + for i, column := range c.additionalColumns { + results, err := column.FindResults(obj.(runtime.Unstructured).UnstructuredContent()) + if err != nil || len(results) == 0 || len(results[0]) == 0 { cells = append(cells, nil) continue } - cells = append(cells, buf.String()) - buf.Reset() + + // as we only support simple JSON path, we can assume to have only one result (or none, filtered out above) + value := results[0][0].Interface() + if customHeaders[i].Type == "string" { + if err := column.PrintResults(buf, []reflect.Value{reflect.ValueOf(value)}); err == nil { + cells = append(cells, buf.String()) + buf.Reset() + } else { + cells = append(cells, nil) + } + } else { + cells = append(cells, cellForJSONValue(customHeaders[i].Type, value)) + } } return cells, nil }) return table, err } + +func cellForJSONValue(headerType string, value interface{}) interface{} { + if value == nil { + return nil + } + + switch headerType { + case "integer": + switch typed := value.(type) { + case int64: + return typed + case float64: + return int64(typed) + case json.Number: + if i64, err := typed.Int64(); err == nil { + return i64 + } + } + case "number": + switch typed := value.(type) { + case int64: + return float64(typed) + case float64: + return typed + case json.Number: + if f, err := typed.Float64(); err == nil { + return f + } + } + case "boolean": + if b, ok := value.(bool); ok { + return b + } + case "string": + if s, ok := value.(string); ok { + return s + } + case "date": + if typed, ok := value.(string); ok { + var timestamp metav1.Time + err := timestamp.UnmarshalQueryParameter(typed) + if err != nil { + return "" + } + return metatable.ConvertToHumanReadableDateType(timestamp) + } + } + + return nil +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go new file mode 100644 index 00000000000..179aabb8ab7 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/tableconvertor_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tableconvertor + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func Test_cellForJSONValue(t *testing.T) { + tests := []struct { + headerType string + value interface{} + want interface{} + }{ + {"integer", int64(42), int64(42)}, + {"integer", float64(3.14), int64(3)}, + {"integer", true, nil}, + {"integer", "foo", nil}, + + {"number", int64(42), float64(42)}, + {"number", float64(3.14), float64(3.14)}, + {"number", true, nil}, + {"number", "foo", nil}, + + {"boolean", int64(42), nil}, + {"boolean", float64(3.14), nil}, + {"boolean", true, true}, + {"boolean", "foo", nil}, + + {"string", int64(42), nil}, + {"string", float64(3.14), nil}, + {"string", true, nil}, + {"string", "foo", "foo"}, + + {"date", int64(42), nil}, + {"date", float64(3.14), nil}, + {"date", true, nil}, + {"date", time.Now().Add(-time.Hour*12 - 30*time.Minute).UTC().Format(time.RFC3339), "12h"}, + {"date", time.Now().Add(+time.Hour*12 + 30*time.Minute).UTC().Format(time.RFC3339), ""}, + {"date", "", ""}, + + {"unknown", "foo", nil}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%#v of type %s", tt.value, tt.headerType), func(t *testing.T) { + if got := cellForJSONValue(tt.headerType, tt.value); !reflect.DeepEqual(got, tt.want) { + t.Errorf("cellForJSONValue() = %#v, want %#v", got, tt.want) + } + }) + } +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go new file mode 100644 index 00000000000..0c3ac609a9c --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/table_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "testing" + + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/test/integration/testserver" +) + +func newTableCRD() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: "tables.mygroup.example.com"}, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: "mygroup.example.com", + Version: "v1beta1", + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: "tables", + Singular: "table", + Kind: "Table", + ListKind: "TablemList", + }, + Scope: apiextensionsv1beta1.ClusterScoped, + AdditionalPrinterColumns: []apiextensionsv1beta1.CustomResourceColumnDefinition{ + {Name: "Age", Type: "date", JSONPath: ".metadata.creationTimestamp"}, + {Name: "Alpha", Type: "string", JSONPath: ".spec.alpha"}, + {Name: "Beta", Type: "integer", Description: "the beta field", Format: "int64", Priority: 42, JSONPath: ".spec.beta"}, + {Name: "Gamma", Type: "integer", Description: "a column with wrongly typed values", JSONPath: ".spec.gamma"}, + }, + }, + } +} + +func newTableInstance(name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "mygroup.example.com/v1beta1", + "kind": "Table", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": map[string]interface{}{ + "alpha": "foo_123", + "beta": 10, + "gamma": "bar", + "delta": "hello", + }, + }, + } +} + +func TestTableGet(t *testing.T) { + stopCh, config, err := testserver.StartDefaultServer() + if err != nil { + t.Fatal(err) + } + defer close(stopCh) + + apiExtensionClient, err := clientset.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + + crd := newTableCRD() + crd, err = testserver.CreateNewCustomResourceDefinition(crd, apiExtensionClient, dynamicClient) + if err != nil { + t.Fatal(err) + } + + crd, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + t.Logf("table crd created: %#v", crd) + + crClient := newNamespacedCustomResourceClient("", dynamicClient, crd) + foo, err := crClient.Create(newTableInstance("foo")) + if err != nil { + t.Fatalf("unable to create noxu instance: %v", err) + } + t.Logf("foo created: %#v", foo.UnstructuredContent()) + + gv := schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version} + gvk := gv.WithKind(crd.Spec.Names.Kind) + + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + parameterCodec := runtime.NewParameterCodec(scheme) + metav1.AddToGroupVersion(scheme, gv) + scheme.AddKnownTypes(gv, &metav1beta1.Table{}, &metav1beta1.TableOptions{}) + scheme.AddKnownTypes(metav1beta1.SchemeGroupVersion, &metav1beta1.Table{}, &metav1beta1.TableOptions{}) + + crConfig := *config + crConfig.GroupVersion = &gv + crConfig.APIPath = "/apis" + crConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} + crRestClient, err := rest.RESTClientFor(&crConfig) + if err != nil { + t.Fatal(err) + } + + ret, err := crRestClient.Get(). + Resource(crd.Spec.Names.Plural). + SetHeader("Accept", fmt.Sprintf("application/json;as=Table;v=%s;g=%s, application/json", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName)). + VersionedParams(&metav1beta1.TableOptions{}, parameterCodec). + Do(). + Get() + if err != nil { + t.Fatalf("failed to list %v resources: %v", gvk, err) + } + + tbl, ok := ret.(*metav1beta1.Table) + if !ok { + t.Fatalf("expected metav1beta1.Table, got %T", ret) + } + t.Logf("%v table list: %#v", gvk, tbl) + + if got, expected := len(tbl.ColumnDefinitions), 5; got != expected { + t.Errorf("expected %d headers, got %d", expected, got) + } else { + alpha := metav1beta1.TableColumnDefinition{Name: "Alpha", Type: "string", Format: "", Description: "Custom resource definition column (in JSONPath format): .spec.alpha", Priority: 0} + if got, expected := tbl.ColumnDefinitions[2], alpha; got != expected { + t.Errorf("expected column definition %#v, got %#v", expected, got) + } + + beta := metav1beta1.TableColumnDefinition{Name: "Beta", Type: "integer", Format: "int64", Description: "the beta field", Priority: 42} + if got, expected := tbl.ColumnDefinitions[3], beta; got != expected { + t.Errorf("expected column definition %#v, got %#v", expected, got) + } + + gamma := metav1beta1.TableColumnDefinition{Name: "Gamma", Type: "integer", Description: "a column with wrongly typed values"} + if got, expected := tbl.ColumnDefinitions[4], gamma; got != expected { + t.Errorf("expected column definition %#v, got %#v", expected, got) + } + } + if got, expected := len(tbl.Rows), 1; got != expected { + t.Errorf("expected %d rows, got %d", expected, got) + } else if got, expected := len(tbl.Rows[0].Cells), 5; got != expected { + t.Errorf("expected %d cells, got %d", expected, got) + } else { + if got, expected := tbl.Rows[0].Cells[0], "foo"; got != expected { + t.Errorf("expected cell[0] to equal %q, got %q", expected, got) + } + if got, expected := tbl.Rows[0].Cells[2], "foo_123"; got != expected { + t.Errorf("expected cell[2] to equal %q, got %q", expected, got) + } + if got, expected := tbl.Rows[0].Cells[3], int64(10); got != expected { + t.Errorf("expected cell[3] to equal %#v, got %#v", expected, got) + } + if got, expected := tbl.Rows[0].Cells[4], interface{}(nil); got != expected { + t.Errorf("expected cell[3] to equal %#v although the type does not match the column, got %#v", expected, got) + } + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/table/table.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/table/table.go index a0097a4e26d..2144a77cb19 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/table/table.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/table/table.go @@ -53,7 +53,7 @@ func MetaToTableRow(obj runtime.Object, rowFn func(obj runtime.Object, m metav1. row := metav1beta1.TableRow{ Object: runtime.RawExtension{Object: obj}, } - row.Cells, err = rowFn(obj, m, m.GetName(), translateTimestamp(m.GetCreationTimestamp())) + row.Cells, err = rowFn(obj, m, m.GetName(), ConvertToHumanReadableDateType(m.GetCreationTimestamp())) if err != nil { return nil, err } @@ -61,9 +61,9 @@ func MetaToTableRow(obj runtime.Object, rowFn func(obj runtime.Object, m metav1. return rows, nil } -// translateTimestamp returns the elapsed time since timestamp in +// ConvertToHumanReadableDateType returns the elapsed time since timestamp in // human-readable approximation. -func translateTimestamp(timestamp metav1.Time) string { +func ConvertToHumanReadableDateType(timestamp metav1.Time) string { if timestamp.IsZero() { return "" } diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go b/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go index 64d792a607a..647b9401b1e 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go @@ -1320,7 +1320,7 @@ func (t *Tester) testListTableConversion(obj runtime.Object, assignFn AssignFunc t.Errorf("column %d has no name", j) } switch column.Type { - case "string", "date", "integer": + case "string", "date", "integer", "number", "boolean": default: t.Errorf("column %d has unexpected type: %q", j, column.Type) } @@ -1342,13 +1342,14 @@ func (t *Tester) testListTableConversion(obj runtime.Object, assignFn AssignFunc } for i, row := range table.Rows { if len(row.Cells) != len(table.ColumnDefinitions) { - t.Errorf("row %d did not have the correct number of cells: %d in %v", i, len(table.ColumnDefinitions), row.Cells) + t.Errorf("row %d did not have the correct number of cells: %d in %v, expected %d", i, len(row.Cells), row.Cells, len(table.ColumnDefinitions)) } for j, cell := range row.Cells { // do not add to this test without discussion - may break clients switch cell.(type) { case float64, int64, int32, int, string, bool: case []interface{}: + case nil: default: t.Errorf("row %d, cell %d has an unrecognized type, only JSON serialization safe types are allowed: %T ", i, j, cell) } From 96475ce20988df126d016baa8ffaa937682933bb Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 9 Mar 2018 18:50:55 +0100 Subject: [PATCH 227/307] Update generated files --- api/openapi-spec/swagger.json | 42 ++ .../pkg/apis/apiextensions/fuzzer/BUILD | 1 + .../apiextensions/v1beta1/generated.pb.go | 664 ++++++++++++++---- .../apiextensions/v1beta1/generated.proto | 29 + .../v1beta1/zz_generated.conversion.go | 34 + .../v1beta1/zz_generated.deepcopy.go | 21 + .../pkg/apis/apiextensions/validation/BUILD | 1 + .../apiextensions/zz_generated.deepcopy.go | 21 + .../pkg/registry/customresource/BUILD | 2 + .../customresource/tableconvertor/BUILD | 10 +- .../test/integration/BUILD | 4 + 11 files changed, 671 insertions(+), 158 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 390902a04e3..a62df308c16 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -85067,6 +85067,41 @@ } } }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition": { + "description": "CustomResourceColumnDefinition specifies a column for server side printing.", + "required": [ + "name", + "type", + "JSONPath" + ], + "properties": { + "JSONPath": { + "description": "JSONPath is a simple JSON path, i.e. with array notation.", + "type": "string" + }, + "description": { + "description": "description is a human readable description of this column.", + "type": "string" + }, + "format": { + "description": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "type": "string" + }, + "name": { + "description": "name is a human readable name for the column.", + "type": "string" + }, + "priority": { + "description": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", + "type": "integer", + "format": "int32" + }, + "type": { + "description": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "type": "string" + } + } + }, "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition": { "description": "CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format \u003c.spec.name\u003e.\u003c.spec.group\u003e.", "properties": { @@ -85207,6 +85242,13 @@ "scope" ], "properties": { + "additionalPrinterColumns": { + "description": "AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition" + } + }, "group": { "description": "Group is the group this resource belongs in", "type": "string" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/BUILD index a6e5ed9df8c..983bb2bbcb6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/fuzzer/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go index b5a8c0e0002..8e30403c8e3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto It has these top-level messages: + CustomResourceColumnDefinition CustomResourceDefinition CustomResourceDefinitionCondition CustomResourceDefinitionList @@ -67,99 +68,106 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *CustomResourceColumnDefinition) Reset() { *m = CustomResourceColumnDefinition{} } +func (*CustomResourceColumnDefinition) ProtoMessage() {} +func (*CustomResourceColumnDefinition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} } func (*CustomResourceDefinition) ProtoMessage() {} func (*CustomResourceDefinition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptorGenerated, []int{1} } func (m *CustomResourceDefinitionCondition) Reset() { *m = CustomResourceDefinitionCondition{} } func (*CustomResourceDefinitionCondition) ProtoMessage() {} func (*CustomResourceDefinitionCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} + return fileDescriptorGenerated, []int{2} } func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} } func (*CustomResourceDefinitionList) ProtoMessage() {} func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptorGenerated, []int{3} } func (m *CustomResourceDefinitionNames) Reset() { *m = CustomResourceDefinitionNames{} } func (*CustomResourceDefinitionNames) ProtoMessage() {} func (*CustomResourceDefinitionNames) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} + return fileDescriptorGenerated, []int{4} } func (m *CustomResourceDefinitionSpec) Reset() { *m = CustomResourceDefinitionSpec{} } func (*CustomResourceDefinitionSpec) ProtoMessage() {} func (*CustomResourceDefinitionSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} + return fileDescriptorGenerated, []int{5} } func (m *CustomResourceDefinitionStatus) Reset() { *m = CustomResourceDefinitionStatus{} } func (*CustomResourceDefinitionStatus) ProtoMessage() {} func (*CustomResourceDefinitionStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{6} } func (m *CustomResourceDefinitionVersion) Reset() { *m = CustomResourceDefinitionVersion{} } func (*CustomResourceDefinitionVersion) ProtoMessage() {} func (*CustomResourceDefinitionVersion) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{7} } func (m *CustomResourceSubresourceScale) Reset() { *m = CustomResourceSubresourceScale{} } func (*CustomResourceSubresourceScale) ProtoMessage() {} func (*CustomResourceSubresourceScale) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptorGenerated, []int{8} } func (m *CustomResourceSubresourceStatus) Reset() { *m = CustomResourceSubresourceStatus{} } func (*CustomResourceSubresourceStatus) ProtoMessage() {} func (*CustomResourceSubresourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} + return fileDescriptorGenerated, []int{9} } func (m *CustomResourceSubresources) Reset() { *m = CustomResourceSubresources{} } func (*CustomResourceSubresources) ProtoMessage() {} func (*CustomResourceSubresources) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{10} } func (m *CustomResourceValidation) Reset() { *m = CustomResourceValidation{} } func (*CustomResourceValidation) ProtoMessage() {} func (*CustomResourceValidation) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{11} } func (m *ExternalDocumentation) Reset() { *m = ExternalDocumentation{} } func (*ExternalDocumentation) ProtoMessage() {} -func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*ExternalDocumentation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *JSON) Reset() { *m = JSON{} } func (*JSON) ProtoMessage() {} -func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*JSON) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *JSONSchemaProps) Reset() { *m = JSONSchemaProps{} } func (*JSONSchemaProps) ProtoMessage() {} -func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*JSONSchemaProps) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *JSONSchemaPropsOrArray) Reset() { *m = JSONSchemaPropsOrArray{} } func (*JSONSchemaPropsOrArray) ProtoMessage() {} -func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*JSONSchemaPropsOrArray) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *JSONSchemaPropsOrBool) Reset() { *m = JSONSchemaPropsOrBool{} } func (*JSONSchemaPropsOrBool) ProtoMessage() {} -func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*JSONSchemaPropsOrBool) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *JSONSchemaPropsOrStringArray) Reset() { *m = JSONSchemaPropsOrStringArray{} } func (*JSONSchemaPropsOrStringArray) ProtoMessage() {} func (*JSONSchemaPropsOrStringArray) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptorGenerated, []int{17} } func init() { + proto.RegisterType((*CustomResourceColumnDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition") proto.RegisterType((*CustomResourceDefinition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinition") proto.RegisterType((*CustomResourceDefinitionCondition)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionCondition") proto.RegisterType((*CustomResourceDefinitionList)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceDefinitionList") @@ -178,6 +186,47 @@ func init() { proto.RegisterType((*JSONSchemaPropsOrBool)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrBool") proto.RegisterType((*JSONSchemaPropsOrStringArray)(nil), "k8s.io.apiextensions_apiserver.pkg.apis.apiextensions.v1beta1.JSONSchemaPropsOrStringArray") } +func (m *CustomResourceColumnDefinition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomResourceColumnDefinition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i += copy(dAtA[i:], m.Format) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Priority)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i += copy(dAtA[i:], m.JSONPath) + return i, nil +} + func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -431,6 +480,18 @@ func (m *CustomResourceDefinitionSpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.AdditionalPrinterColumns) > 0 { + for _, msg := range m.AdditionalPrinterColumns { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1257,6 +1318,23 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *CustomResourceColumnDefinition) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Priority)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *CustomResourceDefinition) Size() (n int) { var l int _ = l @@ -1350,6 +1428,12 @@ func (m *CustomResourceDefinitionSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.AdditionalPrinterColumns) > 0 { + for _, e := range m.AdditionalPrinterColumns { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1651,6 +1735,21 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *CustomResourceColumnDefinition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomResourceColumnDefinition{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Priority:` + fmt.Sprintf("%v", this.Priority) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `}`, + }, "") + return s +} func (this *CustomResourceDefinition) String() string { if this == nil { return "nil" @@ -1715,6 +1814,7 @@ func (this *CustomResourceDefinitionSpec) String() string { `Validation:` + strings.Replace(fmt.Sprintf("%v", this.Validation), "CustomResourceValidation", "CustomResourceValidation", 1) + `,`, `Subresources:` + strings.Replace(fmt.Sprintf("%v", this.Subresources), "CustomResourceSubresources", "CustomResourceSubresources", 1) + `,`, `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "CustomResourceDefinitionVersion", "CustomResourceDefinitionVersion", 1), `&`, ``, 1) + `,`, + `AdditionalPrinterColumns:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AdditionalPrinterColumns), "CustomResourceColumnDefinition", "CustomResourceColumnDefinition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1932,6 +2032,220 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *CustomResourceColumnDefinition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomResourceColumnDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2846,6 +3160,37 @@ func (m *CustomResourceDefinitionSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalPrinterColumns", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalPrinterColumns = append(m.AdditionalPrinterColumns, CustomResourceColumnDefinition{}) + if err := m.AdditionalPrinterColumns[len(m.AdditionalPrinterColumns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5529,143 +5874,150 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0x1c, 0x49, - 0x15, 0x77, 0xcf, 0x78, 0xfc, 0x51, 0xb6, 0x63, 0xbb, 0x12, 0x87, 0x8e, 0x49, 0x66, 0xec, 0x59, - 0x76, 0x65, 0x60, 0x33, 0x43, 0xf6, 0x83, 0x5d, 0x56, 0xe2, 0xe0, 0xb1, 0x0d, 0xca, 0x62, 0xc7, - 0x56, 0x4d, 0x12, 0x04, 0xfb, 0x59, 0xee, 0xae, 0x19, 0x77, 0xdc, 0x5f, 0xe9, 0xaa, 0x9e, 0xd8, - 0x12, 0x20, 0x3e, 0xb4, 0x42, 0x42, 0xc0, 0x22, 0x88, 0x90, 0x90, 0xb8, 0x80, 0xc4, 0x05, 0x21, - 0x38, 0xc0, 0x91, 0x3f, 0x20, 0xc7, 0x95, 0xb8, 0xec, 0x69, 0x44, 0x86, 0x7f, 0x01, 0x09, 0xc9, - 0x27, 0x54, 0x1f, 0x5d, 0xdd, 0x3d, 0xe3, 0xd9, 0x44, 0xda, 0x99, 0xcd, 0xcd, 0xfd, 0xde, 0xab, - 0xf7, 0xfb, 0xd5, 0xab, 0x57, 0xaf, 0xde, 0x1b, 0x83, 0xd6, 0xf1, 0xeb, 0xb4, 0xe6, 0x04, 0xf5, - 0xe3, 0xf8, 0x90, 0x44, 0x3e, 0x61, 0x84, 0xd6, 0x3b, 0xc4, 0xb7, 0x83, 0xa8, 0xae, 0x14, 0x38, - 0x74, 0xc8, 0x09, 0x23, 0x3e, 0x75, 0x02, 0x9f, 0x5e, 0xc7, 0xa1, 0x43, 0x49, 0xd4, 0x21, 0x51, - 0x3d, 0x3c, 0x6e, 0x73, 0x1d, 0xcd, 0x1b, 0xd4, 0x3b, 0x37, 0x0e, 0x09, 0xc3, 0x37, 0xea, 0x6d, - 0xe2, 0x93, 0x08, 0x33, 0x62, 0xd7, 0xc2, 0x28, 0x60, 0x01, 0xfc, 0xba, 0x74, 0x57, 0xcb, 0x59, - 0xbf, 0xa7, 0xdd, 0xd5, 0xc2, 0xe3, 0x36, 0xd7, 0xd1, 0xbc, 0x41, 0x4d, 0xb9, 0x5b, 0xbd, 0xde, - 0x76, 0xd8, 0x51, 0x7c, 0x58, 0xb3, 0x02, 0xaf, 0xde, 0x0e, 0xda, 0x41, 0x5d, 0x78, 0x3d, 0x8c, - 0x5b, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x24, 0xda, 0xea, 0x2b, 0x29, 0x79, 0x0f, 0x5b, 0x47, 0x8e, - 0x4f, 0xa2, 0xd3, 0x94, 0xb1, 0x47, 0x18, 0xae, 0x77, 0x06, 0x38, 0xae, 0xd6, 0x87, 0xad, 0x8a, - 0x62, 0x9f, 0x39, 0x1e, 0x19, 0x58, 0xf0, 0xd5, 0x27, 0x2d, 0xa0, 0xd6, 0x11, 0xf1, 0xf0, 0xc0, - 0xba, 0x97, 0x87, 0xad, 0x8b, 0x99, 0xe3, 0xd6, 0x1d, 0x9f, 0x51, 0x16, 0xf5, 0x2f, 0xaa, 0xfe, - 0xa4, 0x08, 0xcc, 0xad, 0x98, 0xb2, 0xc0, 0x43, 0x84, 0x06, 0x71, 0x64, 0x91, 0x6d, 0xd2, 0x72, - 0x7c, 0x87, 0x39, 0x81, 0x0f, 0xdf, 0x07, 0x33, 0x7c, 0x57, 0x36, 0x66, 0xd8, 0x34, 0xd6, 0x8c, - 0x8d, 0xb9, 0x97, 0xbe, 0x52, 0x4b, 0x23, 0xae, 0x41, 0xd2, 0x30, 0x73, 0xeb, 0x5a, 0xe7, 0x46, - 0x6d, 0xff, 0xf0, 0x1e, 0xb1, 0xd8, 0x1e, 0x61, 0xb8, 0x01, 0x1f, 0x75, 0x2b, 0x13, 0xbd, 0x6e, - 0x05, 0xa4, 0x32, 0xa4, 0xbd, 0xc2, 0xef, 0x83, 0x49, 0x1a, 0x12, 0xcb, 0x2c, 0x08, 0xef, 0x6f, - 0xd5, 0x3e, 0xd5, 0x79, 0xd6, 0x86, 0x6d, 0xa4, 0x19, 0x12, 0xab, 0x31, 0xaf, 0x88, 0x4c, 0xf2, - 0x2f, 0x24, 0x60, 0xe1, 0x07, 0x06, 0x98, 0xa2, 0x0c, 0xb3, 0x98, 0x9a, 0x45, 0xc1, 0xe0, 0x9d, - 0x71, 0x31, 0x10, 0x20, 0x8d, 0x0b, 0x8a, 0xc3, 0x94, 0xfc, 0x46, 0x0a, 0xbc, 0xfa, 0xdf, 0x02, - 0x58, 0x1f, 0xb6, 0x74, 0x2b, 0xf0, 0x6d, 0x79, 0x1c, 0x37, 0xc1, 0x24, 0x3b, 0x0d, 0x89, 0x38, - 0x8a, 0xd9, 0xc6, 0xab, 0xc9, 0x7e, 0x6e, 0x9f, 0x86, 0xe4, 0xac, 0x5b, 0x79, 0xfe, 0x89, 0x0e, - 0xb8, 0x21, 0x12, 0x2e, 0xe0, 0xd7, 0xf4, 0xbe, 0x0b, 0xc2, 0xd9, 0x7a, 0x9e, 0xd8, 0x59, 0xb7, - 0xb2, 0xa8, 0x97, 0xe5, 0xb9, 0xc2, 0x0e, 0x80, 0x2e, 0xa6, 0xec, 0x76, 0x84, 0x7d, 0x2a, 0xdd, - 0x3a, 0x1e, 0x51, 0xe1, 0xfb, 0xd2, 0xd3, 0xa5, 0x07, 0x5f, 0xd1, 0x58, 0x55, 0x90, 0x70, 0x77, - 0xc0, 0x1b, 0x3a, 0x07, 0x01, 0xbe, 0x00, 0xa6, 0x22, 0x82, 0x69, 0xe0, 0x9b, 0x93, 0x82, 0xb2, - 0x8e, 0x25, 0x12, 0x52, 0xa4, 0xb4, 0xf0, 0x8b, 0x60, 0xda, 0x23, 0x94, 0xe2, 0x36, 0x31, 0x4b, - 0xc2, 0x70, 0x51, 0x19, 0x4e, 0xef, 0x49, 0x31, 0x4a, 0xf4, 0xd5, 0x33, 0x03, 0x5c, 0x1d, 0x16, - 0xb5, 0x5d, 0x87, 0x32, 0xf8, 0xf6, 0xc0, 0x05, 0xa8, 0x3d, 0xdd, 0x0e, 0xf9, 0x6a, 0x91, 0xfe, - 0x4b, 0x0a, 0x7c, 0x26, 0x91, 0x64, 0x92, 0xff, 0x7b, 0xa0, 0xe4, 0x30, 0xe2, 0xf1, 0x33, 0x28, - 0x6e, 0xcc, 0xbd, 0xf4, 0xed, 0x31, 0xe5, 0x5e, 0x63, 0x41, 0x71, 0x28, 0xdd, 0xe4, 0x68, 0x48, - 0x82, 0x56, 0xff, 0x54, 0x00, 0xd7, 0x86, 0x2d, 0xb9, 0x85, 0x3d, 0x42, 0x79, 0xc4, 0x43, 0x37, - 0x8e, 0xb0, 0xab, 0x32, 0x4e, 0x47, 0xfc, 0x40, 0x48, 0x91, 0xd2, 0xc2, 0x17, 0xc1, 0x0c, 0x75, - 0xfc, 0x76, 0xec, 0xe2, 0x48, 0xa5, 0x93, 0xde, 0x75, 0x53, 0xc9, 0x91, 0xb6, 0x80, 0x35, 0x00, - 0xe8, 0x51, 0x10, 0x31, 0x81, 0x61, 0x16, 0xd7, 0x8a, 0xdc, 0x33, 0x2f, 0x10, 0x4d, 0x2d, 0x45, - 0x19, 0x0b, 0xb8, 0x06, 0x26, 0x8f, 0x1d, 0xdf, 0x56, 0xa7, 0xae, 0x6f, 0xf1, 0xb7, 0x1c, 0xdf, - 0x46, 0x42, 0xc3, 0xf1, 0x5d, 0x87, 0x32, 0x2e, 0x51, 0x47, 0x9e, 0x8b, 0xba, 0xb0, 0xd4, 0x16, - 0x1c, 0xdf, 0xc2, 0x8c, 0xb4, 0x83, 0xc8, 0x21, 0xd4, 0x9c, 0x4a, 0xf1, 0xb7, 0xb4, 0x14, 0x65, - 0x2c, 0xaa, 0xff, 0x2a, 0x0d, 0x4f, 0x12, 0x5e, 0x4a, 0xe0, 0x73, 0xa0, 0xd4, 0x8e, 0x82, 0x38, - 0x54, 0x51, 0xd2, 0xd1, 0xfe, 0x26, 0x17, 0x22, 0xa9, 0xe3, 0x59, 0xd9, 0x21, 0x11, 0x3f, 0x30, - 0x15, 0x22, 0x9d, 0x95, 0x77, 0xa5, 0x18, 0x25, 0x7a, 0xf8, 0x23, 0x03, 0x94, 0x7c, 0x15, 0x1c, - 0x9e, 0x72, 0x6f, 0x8f, 0x29, 0x2f, 0x44, 0x78, 0x53, 0xba, 0x32, 0xf2, 0x12, 0x19, 0xbe, 0x02, - 0x4a, 0xd4, 0x0a, 0x42, 0xa2, 0xa2, 0x5e, 0x4e, 0x8c, 0x9a, 0x5c, 0x78, 0xd6, 0xad, 0x2c, 0x24, - 0xee, 0x84, 0x00, 0x49, 0x63, 0xf8, 0x53, 0x03, 0x80, 0x0e, 0x76, 0x1d, 0x1b, 0x73, 0xff, 0xe2, - 0x2c, 0x46, 0x9d, 0xd6, 0x77, 0xb5, 0x7b, 0x79, 0x68, 0xe9, 0x37, 0xca, 0x40, 0xc3, 0x0f, 0x0d, - 0x30, 0x4f, 0xe3, 0xc3, 0x48, 0xad, 0xe2, 0xe7, 0xcc, 0xb9, 0x7c, 0x67, 0xa4, 0x5c, 0x9a, 0x19, - 0x80, 0xc6, 0x52, 0xaf, 0x5b, 0x99, 0xcf, 0x4a, 0x50, 0x8e, 0x00, 0xfc, 0xb9, 0x01, 0x66, 0xd4, - 0x09, 0x53, 0x73, 0x5a, 0x5c, 0xf8, 0x77, 0xc7, 0x74, 0xb0, 0x2a, 0xa3, 0xd2, 0x5b, 0xa0, 0x04, - 0x14, 0x69, 0x06, 0xd5, 0x0f, 0x8b, 0xa0, 0xfc, 0xc9, 0x8f, 0x15, 0x7c, 0x68, 0x00, 0x60, 0x25, - 0x8f, 0x00, 0x35, 0x0d, 0xc1, 0xf9, 0xfd, 0x31, 0x71, 0xd6, 0xaf, 0x4d, 0xda, 0x30, 0x68, 0x11, - 0xbf, 0x8f, 0xfa, 0x6f, 0xf8, 0x3b, 0x03, 0x2c, 0x60, 0xcb, 0x22, 0x21, 0x23, 0xb6, 0xac, 0x21, - 0x85, 0xcf, 0xe0, 0x9a, 0xac, 0x28, 0x56, 0x0b, 0x9b, 0x59, 0x68, 0x94, 0x67, 0x02, 0xdf, 0x00, - 0x17, 0x28, 0x0b, 0x22, 0x62, 0x27, 0x11, 0x57, 0xf5, 0x0d, 0xf6, 0xba, 0x95, 0x0b, 0xcd, 0x9c, - 0x06, 0xf5, 0x59, 0x56, 0x7f, 0x6b, 0x80, 0xca, 0x13, 0x4e, 0x94, 0xd7, 0x42, 0x7e, 0x3f, 0x55, - 0xa5, 0xd1, 0xb5, 0x90, 0x83, 0x23, 0xa1, 0xe1, 0x35, 0x5b, 0x6c, 0xd7, 0x16, 0x51, 0x99, 0xc9, - 0x74, 0x1c, 0x42, 0x8a, 0x94, 0x96, 0xd7, 0x23, 0x8e, 0xcf, 0x5f, 0xc9, 0xa2, 0x30, 0xd4, 0xf5, - 0xa8, 0x29, 0xc5, 0x28, 0xd1, 0x57, 0xff, 0x67, 0xf4, 0xa7, 0x4a, 0x26, 0xcd, 0x9b, 0x16, 0x76, - 0x09, 0xdc, 0x06, 0x4b, 0xbc, 0x9f, 0x42, 0x24, 0x74, 0x1d, 0x0b, 0xd3, 0x03, 0xcc, 0x8e, 0x14, - 0x47, 0x53, 0xb9, 0x5d, 0x6a, 0xf6, 0xe9, 0xd1, 0xc0, 0x0a, 0xf8, 0x26, 0x80, 0xb2, 0xc7, 0xc8, - 0xf9, 0x91, 0xe5, 0x52, 0x77, 0x0b, 0xcd, 0x01, 0x0b, 0x74, 0xce, 0x2a, 0xb8, 0x05, 0x96, 0x5d, - 0x7c, 0x48, 0xdc, 0x26, 0x71, 0x89, 0xc5, 0x82, 0x48, 0xb8, 0x2a, 0x0a, 0x57, 0x2b, 0xbd, 0x6e, - 0x65, 0x79, 0xb7, 0x5f, 0x89, 0x06, 0xed, 0xab, 0xeb, 0xfd, 0x27, 0x92, 0xdd, 0xb8, 0xec, 0xdc, - 0xfe, 0x50, 0x00, 0xab, 0xc3, 0xab, 0x02, 0xfc, 0x71, 0xda, 0x60, 0xca, 0xfe, 0xe1, 0xdd, 0x71, - 0x55, 0x20, 0xd5, 0x61, 0x82, 0xc1, 0xee, 0x12, 0xfe, 0x80, 0x17, 0x73, 0xec, 0x12, 0x75, 0x51, - 0xde, 0x19, 0x1b, 0x05, 0x0e, 0xd2, 0x98, 0x95, 0xef, 0x04, 0x76, 0xc5, 0xb3, 0x80, 0x5d, 0x52, - 0xfd, 0xb3, 0xd1, 0x3f, 0x63, 0xa4, 0x55, 0x1b, 0xfe, 0xc2, 0x00, 0x8b, 0x41, 0x48, 0xfc, 0xcd, - 0x83, 0x9b, 0x77, 0x5f, 0x6e, 0x8a, 0xc9, 0x46, 0x85, 0xea, 0xd6, 0xa7, 0xe4, 0xf9, 0x66, 0x73, - 0xff, 0x96, 0x74, 0x78, 0x10, 0x05, 0x21, 0x6d, 0x5c, 0xec, 0x75, 0x2b, 0x8b, 0xfb, 0x79, 0x28, - 0xd4, 0x8f, 0x5d, 0xf5, 0xc0, 0xca, 0xce, 0x09, 0x23, 0x91, 0x8f, 0xdd, 0xed, 0xc0, 0x8a, 0x3d, - 0xe2, 0x33, 0x49, 0xf4, 0x55, 0x30, 0x67, 0x13, 0x6a, 0x45, 0x4e, 0x28, 0x1e, 0x37, 0x99, 0xde, - 0x17, 0x55, 0x5a, 0xce, 0x6d, 0xa7, 0x2a, 0x94, 0xb5, 0x83, 0xd7, 0x40, 0x31, 0x8e, 0x5c, 0x95, - 0xc5, 0x73, 0xca, 0xbc, 0x78, 0x07, 0xed, 0x22, 0x2e, 0xaf, 0xae, 0x83, 0x49, 0xce, 0x13, 0x5e, - 0x01, 0xc5, 0x08, 0x3f, 0x10, 0x5e, 0xe7, 0x1b, 0xd3, 0xdc, 0x04, 0xe1, 0x07, 0x88, 0xcb, 0xaa, - 0x7f, 0xb9, 0x0a, 0x16, 0xfb, 0xf6, 0x02, 0x57, 0x41, 0xc1, 0xb1, 0x15, 0x07, 0xa0, 0x9c, 0x16, - 0x6e, 0x6e, 0xa3, 0x82, 0x63, 0xc3, 0xd7, 0xc0, 0x94, 0x9c, 0x10, 0x15, 0x68, 0x45, 0x97, 0x00, - 0x21, 0xe5, 0xaf, 0x77, 0xea, 0x8e, 0x13, 0x51, 0xe6, 0x82, 0x03, 0x69, 0xa9, 0x5b, 0x22, 0x39, - 0x90, 0x16, 0xe2, 0xb2, 0xfe, 0xcd, 0x4f, 0x3e, 0xe5, 0xe6, 0xd7, 0xd4, 0xc4, 0x52, 0xca, 0xd7, - 0xab, 0xcc, 0x20, 0xf2, 0x02, 0x98, 0x6a, 0x05, 0x91, 0x87, 0x99, 0x78, 0xa1, 0x33, 0x3d, 0xe6, - 0x37, 0x84, 0x14, 0x29, 0x2d, 0x6f, 0xb2, 0x98, 0xc3, 0x5c, 0x62, 0x4e, 0xe7, 0x9b, 0xac, 0xdb, - 0x5c, 0x88, 0xa4, 0x0e, 0xde, 0x03, 0xd3, 0x36, 0x69, 0xe1, 0xd8, 0x65, 0xe6, 0x8c, 0x48, 0xa1, - 0xad, 0x11, 0xa4, 0x50, 0x63, 0x8e, 0x57, 0xc5, 0x6d, 0xe9, 0x17, 0x25, 0x00, 0xf0, 0x79, 0x30, - 0xed, 0xe1, 0x13, 0xc7, 0x8b, 0x3d, 0x73, 0x76, 0xcd, 0xd8, 0x30, 0xa4, 0xd9, 0x9e, 0x14, 0xa1, - 0x44, 0xc7, 0x2b, 0x23, 0x39, 0xb1, 0xdc, 0x98, 0x3a, 0x1d, 0xa2, 0x94, 0x26, 0x10, 0x05, 0x57, - 0x57, 0xc6, 0x9d, 0x3e, 0x3d, 0x1a, 0x58, 0x21, 0xc0, 0x1c, 0x5f, 0x2c, 0x9e, 0xcb, 0x80, 0x49, - 0x11, 0x4a, 0x74, 0x79, 0x30, 0x65, 0x3f, 0x3f, 0x0c, 0x4c, 0x2d, 0x1e, 0x58, 0x01, 0xbf, 0x0c, - 0x66, 0x3d, 0x7c, 0xb2, 0x4b, 0xfc, 0x36, 0x3b, 0x32, 0x17, 0xd6, 0x8c, 0x8d, 0x62, 0x63, 0xa1, - 0xd7, 0xad, 0xcc, 0xee, 0x25, 0x42, 0x94, 0xea, 0x85, 0xb1, 0xe3, 0x2b, 0xe3, 0x0b, 0x19, 0xe3, - 0x44, 0x88, 0x52, 0x3d, 0x7f, 0x74, 0x42, 0xcc, 0xf8, 0xe5, 0x32, 0x17, 0xf3, 0x4d, 0xf0, 0x81, - 0x14, 0xa3, 0x44, 0x0f, 0x37, 0xc0, 0x8c, 0x87, 0x4f, 0xc4, 0xc0, 0x62, 0x2e, 0x09, 0xb7, 0xf3, - 0xbc, 0x93, 0xd9, 0x53, 0x32, 0xa4, 0xb5, 0xc2, 0xd2, 0xf1, 0xa5, 0xe5, 0x72, 0xc6, 0x52, 0xc9, - 0x90, 0xd6, 0xf2, 0x24, 0x8e, 0x7d, 0xe7, 0x7e, 0x4c, 0xa4, 0x31, 0x14, 0x91, 0xd1, 0x49, 0x7c, - 0x27, 0x55, 0xa1, 0xac, 0x1d, 0x1f, 0x18, 0xbc, 0xd8, 0x65, 0x4e, 0xe8, 0x92, 0xfd, 0x96, 0x79, - 0x51, 0xc4, 0x5f, 0xf4, 0x9e, 0x7b, 0x5a, 0x8a, 0x32, 0x16, 0x90, 0x80, 0x49, 0xe2, 0xc7, 0x9e, - 0x79, 0x49, 0x34, 0x4c, 0x23, 0x49, 0x41, 0x7d, 0x73, 0x76, 0xfc, 0xd8, 0x43, 0xc2, 0x3d, 0x7c, - 0x0d, 0x2c, 0x78, 0xf8, 0x84, 0x97, 0x03, 0x12, 0x31, 0x3e, 0xca, 0xac, 0x88, 0xcd, 0x2f, 0xf3, - 0x26, 0x65, 0x2f, 0xab, 0x40, 0x79, 0x3b, 0xb1, 0xd0, 0xf1, 0x33, 0x0b, 0x2f, 0x67, 0x16, 0x66, - 0x15, 0x28, 0x6f, 0xc7, 0x23, 0x1d, 0x91, 0xfb, 0xb1, 0x13, 0x11, 0xdb, 0xfc, 0x9c, 0xe8, 0x6b, - 0x44, 0xa4, 0x91, 0x92, 0x21, 0xad, 0x85, 0x9d, 0x64, 0xb2, 0x35, 0xc5, 0x35, 0xbc, 0x33, 0xda, - 0x4a, 0xbe, 0x1f, 0x6d, 0x46, 0x11, 0x3e, 0x95, 0x2f, 0x4d, 0x76, 0xa6, 0x85, 0x14, 0x94, 0xb0, - 0xeb, 0xee, 0xb7, 0xcc, 0x2b, 0x22, 0xf6, 0xa3, 0x7e, 0x41, 0x74, 0xd5, 0xd9, 0xe4, 0x20, 0x48, - 0x62, 0x71, 0xd0, 0xc0, 0xe7, 0xa9, 0xb1, 0x3a, 0x5e, 0xd0, 0x7d, 0x0e, 0x82, 0x24, 0x96, 0xd8, - 0xa9, 0x7f, 0xba, 0xdf, 0x32, 0x3f, 0x3f, 0xe6, 0x9d, 0x72, 0x10, 0x24, 0xb1, 0xa0, 0x03, 0x8a, - 0x7e, 0xc0, 0xcc, 0xab, 0x63, 0x79, 0x9e, 0xc5, 0x83, 0x73, 0x2b, 0x60, 0x88, 0x63, 0xc0, 0x5f, - 0x1b, 0x00, 0x84, 0x69, 0x8a, 0x5e, 0x1b, 0xc9, 0xc0, 0xd4, 0x07, 0x59, 0x4b, 0x73, 0x7b, 0xc7, - 0x67, 0xd1, 0x69, 0x3a, 0x7a, 0x64, 0xee, 0x40, 0x86, 0x05, 0xfc, 0xa3, 0x01, 0x2e, 0x61, 0x5b, - 0x0e, 0x22, 0xd8, 0xcd, 0xdc, 0xa0, 0xb2, 0x88, 0xc8, 0xed, 0x51, 0xa7, 0x79, 0x23, 0x08, 0xdc, - 0x86, 0xd9, 0xeb, 0x56, 0x2e, 0x6d, 0x9e, 0x83, 0x8a, 0xce, 0xe5, 0x02, 0xff, 0x6a, 0x80, 0x65, - 0x55, 0x45, 0x33, 0x0c, 0x2b, 0x22, 0x80, 0x64, 0xd4, 0x01, 0xec, 0xc7, 0x91, 0x71, 0xbc, 0xa2, - 0xe2, 0xb8, 0x3c, 0xa0, 0x47, 0x83, 0xd4, 0xe0, 0x3f, 0x0c, 0x30, 0x6f, 0x93, 0x90, 0xf8, 0x36, - 0xf1, 0x2d, 0xce, 0x75, 0x6d, 0x24, 0x93, 0x66, 0x3f, 0xd7, 0xed, 0x0c, 0x84, 0xa4, 0x59, 0x53, - 0x34, 0xe7, 0xb3, 0xaa, 0xb3, 0x6e, 0xe5, 0x72, 0xba, 0x34, 0xab, 0x41, 0x39, 0x96, 0xf0, 0x37, - 0x06, 0x58, 0x4c, 0x0f, 0x40, 0x3e, 0x29, 0xeb, 0x63, 0xcc, 0x03, 0xd1, 0xbe, 0x6e, 0xe6, 0x01, - 0x51, 0x3f, 0x03, 0xf8, 0x37, 0x83, 0x77, 0x6a, 0xc9, 0xdc, 0x48, 0xcd, 0xaa, 0x88, 0xe5, 0x7b, - 0x23, 0x8f, 0xa5, 0x46, 0x90, 0xa1, 0x7c, 0x31, 0x6d, 0x05, 0xb5, 0xe6, 0xac, 0x5b, 0x59, 0xc9, - 0x46, 0x52, 0x2b, 0x50, 0x96, 0x21, 0xfc, 0x99, 0x01, 0xe6, 0x49, 0xda, 0x71, 0x53, 0xf3, 0xb9, - 0x91, 0x04, 0xf1, 0xdc, 0x26, 0x5e, 0xfe, 0x4a, 0x93, 0x51, 0x51, 0x94, 0xc3, 0xe6, 0x1d, 0x24, - 0x39, 0xc1, 0x5e, 0xe8, 0x12, 0xf3, 0x0b, 0x23, 0xee, 0x20, 0x77, 0xa4, 0x5f, 0x94, 0x00, 0xac, - 0xf2, 0xc9, 0xa7, 0xef, 0xe6, 0xc0, 0x25, 0x50, 0x3c, 0x26, 0xa7, 0xb2, 0xb1, 0x47, 0xfc, 0x4f, - 0x68, 0x83, 0x52, 0x07, 0xbb, 0x71, 0x32, 0xbc, 0x8d, 0xb8, 0xea, 0x22, 0xe9, 0xfc, 0x8d, 0xc2, - 0xeb, 0xc6, 0xea, 0x43, 0x03, 0x5c, 0x3e, 0xff, 0x42, 0x3f, 0x53, 0x5a, 0xbf, 0x37, 0xc0, 0xf2, - 0xc0, 0xdd, 0x3d, 0x87, 0xd1, 0xfd, 0x3c, 0xa3, 0xb7, 0x46, 0x7d, 0x09, 0x9b, 0x2c, 0x72, 0xfc, - 0xb6, 0xe8, 0x3c, 0xb2, 0xf4, 0x7e, 0x69, 0x80, 0xa5, 0xfe, 0xeb, 0xf0, 0x2c, 0xe3, 0x55, 0x7d, - 0x58, 0x00, 0x97, 0xcf, 0x6f, 0x98, 0x60, 0xa4, 0x27, 0xc3, 0xf1, 0x4c, 0xd8, 0x20, 0x9d, 0x32, - 0xf5, 0x50, 0xf9, 0x81, 0x01, 0xe6, 0xee, 0x69, 0xbb, 0xe4, 0x7f, 0x1d, 0x23, 0x9f, 0xed, 0x93, - 0xfa, 0x93, 0x2a, 0x28, 0xca, 0xe2, 0x56, 0xff, 0x6e, 0x80, 0x95, 0x73, 0x0b, 0x2b, 0x1f, 0x41, - 0xb1, 0xeb, 0x06, 0x0f, 0xe4, 0x4f, 0x34, 0x99, 0x9f, 0xcc, 0x36, 0x85, 0x14, 0x29, 0x6d, 0x26, - 0x7a, 0x85, 0xcf, 0x2a, 0x7a, 0xd5, 0x7f, 0x1a, 0xe0, 0xea, 0x27, 0x65, 0xe2, 0x33, 0x39, 0xd2, - 0x0d, 0x30, 0xa3, 0x9a, 0xa2, 0x53, 0x71, 0x9c, 0x6a, 0x0e, 0x50, 0x45, 0xe3, 0x14, 0x69, 0x6d, - 0xe3, 0xfa, 0xa3, 0xc7, 0xe5, 0x89, 0x8f, 0x1e, 0x97, 0x27, 0x3e, 0x7e, 0x5c, 0x9e, 0xf8, 0x61, - 0xaf, 0x6c, 0x3c, 0xea, 0x95, 0x8d, 0x8f, 0x7a, 0x65, 0xe3, 0xe3, 0x5e, 0xd9, 0xf8, 0x77, 0xaf, - 0x6c, 0xfc, 0xea, 0x3f, 0xe5, 0x89, 0xef, 0x4e, 0x2b, 0xf0, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, - 0x8c, 0x3a, 0x89, 0x32, 0x37, 0x20, 0x00, 0x00, + // 2306 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6f, 0x5b, 0x49, + 0x15, 0xef, 0xb5, 0xe3, 0xc4, 0x19, 0x27, 0x4d, 0x32, 0x6d, 0xca, 0x6d, 0x68, 0xed, 0xd4, 0x65, + 0x57, 0x01, 0xb6, 0x36, 0xed, 0xee, 0xb2, 0xcb, 0x4a, 0x3c, 0xc4, 0x49, 0x41, 0x5d, 0x9a, 0x26, + 0x1a, 0xb7, 0x45, 0xb0, 0x9f, 0x13, 0x7b, 0xe2, 0xdc, 0xe6, 0x7e, 0x75, 0x66, 0xae, 0x9b, 0x48, + 0x80, 0xf8, 0xd0, 0x0a, 0x09, 0x01, 0x0b, 0x6c, 0x85, 0x84, 0xc4, 0x0b, 0x48, 0xbc, 0x20, 0x04, + 0x0f, 0xf0, 0x06, 0x7f, 0x40, 0x1f, 0xf7, 0x71, 0x9f, 0x2c, 0x6a, 0xfe, 0x05, 0x24, 0xa4, 0x3c, + 0xa1, 0xf9, 0xb8, 0x73, 0xef, 0xb5, 0xe3, 0x6d, 0xb5, 0x6b, 0x6f, 0xdf, 0x7c, 0xcf, 0x39, 0x73, + 0x7e, 0xbf, 0x39, 0x73, 0xe6, 0xcc, 0x39, 0x09, 0xd8, 0x3b, 0x78, 0x95, 0xd5, 0x9c, 0xa0, 0x7e, + 0x10, 0xed, 0x12, 0xea, 0x13, 0x4e, 0x58, 0xbd, 0x4b, 0xfc, 0x76, 0x40, 0xeb, 0x5a, 0x81, 0x43, + 0x87, 0x1c, 0x72, 0xe2, 0x33, 0x27, 0xf0, 0xd9, 0x15, 0x1c, 0x3a, 0x8c, 0xd0, 0x2e, 0xa1, 0xf5, + 0xf0, 0xa0, 0x23, 0x74, 0x2c, 0x6b, 0x50, 0xef, 0x5e, 0xdd, 0x25, 0x1c, 0x5f, 0xad, 0x77, 0x88, + 0x4f, 0x28, 0xe6, 0xa4, 0x5d, 0x0b, 0x69, 0xc0, 0x03, 0xf8, 0x75, 0xe5, 0xae, 0x96, 0xb1, 0x7e, + 0xc7, 0xb8, 0xab, 0x85, 0x07, 0x1d, 0xa1, 0x63, 0x59, 0x83, 0x9a, 0x76, 0xb7, 0x72, 0xa5, 0xe3, + 0xf0, 0xfd, 0x68, 0xb7, 0xd6, 0x0a, 0xbc, 0x7a, 0x27, 0xe8, 0x04, 0x75, 0xe9, 0x75, 0x37, 0xda, + 0x93, 0x5f, 0xf2, 0x43, 0xfe, 0x52, 0x68, 0x2b, 0x2f, 0x25, 0xe4, 0x3d, 0xdc, 0xda, 0x77, 0x7c, + 0x42, 0x8f, 0x12, 0xc6, 0x1e, 0xe1, 0xb8, 0xde, 0x1d, 0xe2, 0xb8, 0x52, 0x1f, 0xb5, 0x8a, 0x46, + 0x3e, 0x77, 0x3c, 0x32, 0xb4, 0xe0, 0xab, 0x4f, 0x5a, 0xc0, 0x5a, 0xfb, 0xc4, 0xc3, 0x43, 0xeb, + 0x5e, 0x1c, 0xb5, 0x2e, 0xe2, 0x8e, 0x5b, 0x77, 0x7c, 0xce, 0x38, 0x1d, 0x5c, 0x54, 0xfd, 0x20, + 0x07, 0xca, 0x1b, 0x11, 0xe3, 0x81, 0x87, 0x08, 0x0b, 0x22, 0xda, 0x22, 0x1b, 0x81, 0x1b, 0x79, + 0xfe, 0x26, 0xd9, 0x73, 0x7c, 0x87, 0x3b, 0x81, 0x0f, 0x57, 0xc1, 0x94, 0x8f, 0x3d, 0x62, 0x5b, + 0xab, 0xd6, 0xda, 0x6c, 0x63, 0xee, 0x51, 0xaf, 0x72, 0xaa, 0xdf, 0xab, 0x4c, 0xdd, 0xc2, 0x1e, + 0x41, 0x52, 0x23, 0x2c, 0xf8, 0x51, 0x48, 0xec, 0x5c, 0xd6, 0xe2, 0xf6, 0x51, 0x48, 0x90, 0xd4, + 0xc0, 0xe7, 0xc1, 0xf4, 0x5e, 0x40, 0x3d, 0xcc, 0xed, 0xbc, 0xb4, 0x39, 0xad, 0x6d, 0xa6, 0xbf, + 0x21, 0xa5, 0x48, 0x6b, 0xe1, 0xcb, 0xa0, 0xd4, 0x26, 0xac, 0x45, 0x9d, 0x50, 0x40, 0xdb, 0x53, + 0xd2, 0xf8, 0x8c, 0x36, 0x2e, 0x6d, 0x26, 0x2a, 0x94, 0xb6, 0x83, 0x2f, 0x80, 0x62, 0x48, 0x9d, + 0x80, 0x3a, 0xfc, 0xc8, 0x2e, 0xac, 0x5a, 0x6b, 0x85, 0xc6, 0xa2, 0x5e, 0x53, 0xdc, 0xd1, 0x72, + 0x64, 0x2c, 0xe0, 0x2a, 0x28, 0xbe, 0xde, 0xdc, 0xbe, 0xb5, 0x83, 0xf9, 0xbe, 0x3d, 0x2d, 0x11, + 0xa6, 0x84, 0x35, 0x2a, 0xde, 0xd3, 0xd2, 0xea, 0x4f, 0xf2, 0xc0, 0xce, 0x46, 0x25, 0x15, 0x8f, + 0x77, 0x41, 0x51, 0x9c, 0x75, 0x1b, 0x73, 0x2c, 0x63, 0x52, 0xba, 0xf6, 0x95, 0x5a, 0x92, 0x87, + 0x26, 0xf4, 0x49, 0xf2, 0x09, 0xeb, 0x5a, 0xf7, 0x6a, 0x6d, 0x7b, 0xf7, 0x1e, 0x69, 0xf1, 0x2d, + 0xc2, 0x71, 0x03, 0x6a, 0x7a, 0x20, 0x91, 0x21, 0xe3, 0x15, 0x7e, 0x1f, 0x4c, 0xb1, 0x90, 0xb4, + 0x64, 0x3c, 0x4b, 0xd7, 0xde, 0xa8, 0x7d, 0xaa, 0x2c, 0xaf, 0x8d, 0xda, 0x48, 0x33, 0x24, 0xad, + 0xe4, 0xb0, 0xc4, 0x17, 0x92, 0xb0, 0xf0, 0x3d, 0x0b, 0x4c, 0x33, 0x8e, 0x79, 0xc4, 0xe4, 0x69, + 0x95, 0xae, 0xbd, 0x35, 0x29, 0x06, 0x12, 0x24, 0x49, 0x06, 0xf5, 0x8d, 0x34, 0x78, 0xf5, 0xbf, + 0x39, 0x70, 0x69, 0xd4, 0xd2, 0x8d, 0xc0, 0x6f, 0xab, 0xe3, 0xb8, 0xa1, 0x93, 0x4f, 0xa5, 0xe7, + 0xcb, 0xe9, 0xe4, 0x3b, 0xee, 0x55, 0x9e, 0x7b, 0xa2, 0x83, 0x54, 0x96, 0x7e, 0xcd, 0xec, 0x5b, + 0x65, 0xf2, 0xa5, 0x2c, 0xb1, 0xe3, 0x5e, 0x65, 0xc1, 0x2c, 0xcb, 0x72, 0x85, 0x5d, 0x00, 0x5d, + 0xcc, 0xf8, 0x6d, 0x8a, 0x7d, 0xa6, 0xdc, 0x3a, 0x1e, 0xd1, 0xe1, 0xfb, 0xd2, 0xd3, 0xa5, 0x87, + 0x58, 0xd1, 0x58, 0xd1, 0x90, 0xf0, 0xe6, 0x90, 0x37, 0x74, 0x02, 0x82, 0xb8, 0x58, 0x94, 0x60, + 0x66, 0xee, 0x8a, 0x89, 0x25, 0x92, 0x52, 0xa4, 0xb5, 0xf0, 0x8b, 0x60, 0xc6, 0x23, 0x8c, 0xe1, + 0x0e, 0x91, 0x17, 0x64, 0xb6, 0xb1, 0xa0, 0x0d, 0x67, 0xb6, 0x94, 0x18, 0xc5, 0xfa, 0xea, 0xb1, + 0x05, 0x2e, 0x8c, 0x8a, 0xda, 0x4d, 0x87, 0x71, 0xf8, 0xe6, 0xd0, 0x05, 0xa8, 0x3d, 0xdd, 0x0e, + 0xc5, 0x6a, 0x99, 0xfe, 0xe6, 0x76, 0xc6, 0x92, 0x54, 0xf2, 0x7f, 0x0f, 0x14, 0x1c, 0x4e, 0x3c, + 0x71, 0x06, 0xf9, 0xb5, 0xd2, 0xb5, 0x6f, 0x4f, 0x28, 0xf7, 0x1a, 0xf3, 0x9a, 0x43, 0xe1, 0x86, + 0x40, 0x43, 0x0a, 0xb4, 0xfa, 0xa7, 0x1c, 0xb8, 0x38, 0x6a, 0x89, 0xa8, 0x78, 0x4c, 0x44, 0x3c, + 0x74, 0x23, 0x8a, 0x5d, 0x9d, 0x71, 0x26, 0xe2, 0x3b, 0x52, 0x8a, 0xb4, 0x56, 0xd4, 0x24, 0xe6, + 0xf8, 0x9d, 0xc8, 0xc5, 0x54, 0xa7, 0x93, 0xd9, 0x75, 0x53, 0xcb, 0x91, 0xb1, 0x80, 0x35, 0x00, + 0xd8, 0x7e, 0x40, 0xb9, 0xc4, 0xb0, 0xf3, 0xab, 0x79, 0xe1, 0x59, 0x14, 0x88, 0xa6, 0x91, 0xa2, + 0x94, 0x85, 0x28, 0xb9, 0x07, 0x8e, 0xdf, 0xd6, 0xa7, 0x6e, 0x6e, 0xf1, 0xb7, 0x1c, 0xbf, 0x8d, + 0xa4, 0x46, 0xe0, 0xbb, 0x0e, 0xe3, 0x42, 0xa2, 0x8f, 0x3c, 0x13, 0x75, 0x69, 0x69, 0x2c, 0x04, + 0x7e, 0x0b, 0x73, 0xd2, 0x09, 0xa8, 0x43, 0x98, 0x3d, 0x9d, 0xe0, 0x6f, 0x18, 0x29, 0x4a, 0x59, + 0x54, 0x7f, 0x3d, 0x33, 0x3a, 0x49, 0x44, 0x29, 0x81, 0x97, 0x41, 0xa1, 0x43, 0x83, 0x28, 0xd4, + 0x51, 0x32, 0xd1, 0xfe, 0xa6, 0x10, 0x22, 0xa5, 0x13, 0x59, 0xd9, 0x25, 0x54, 0x1c, 0x98, 0x0e, + 0x91, 0xc9, 0xca, 0xbb, 0x4a, 0x8c, 0x62, 0x3d, 0xfc, 0x91, 0x05, 0x0a, 0xbe, 0x0e, 0x8e, 0x48, + 0xb9, 0x37, 0x27, 0x94, 0x17, 0x32, 0xbc, 0x09, 0x5d, 0x15, 0x79, 0x85, 0x0c, 0x5f, 0x02, 0x05, + 0xd6, 0x0a, 0x42, 0xa2, 0xa3, 0x5e, 0x8e, 0x8d, 0x9a, 0x42, 0x78, 0xdc, 0xab, 0xcc, 0xc7, 0xee, + 0xa4, 0x00, 0x29, 0x63, 0xf8, 0x53, 0x0b, 0x80, 0x2e, 0x76, 0x9d, 0x36, 0x96, 0x6f, 0x5a, 0x41, + 0xd2, 0x1f, 0x6f, 0x5a, 0xdf, 0x35, 0xee, 0xd5, 0xa1, 0x25, 0xdf, 0x28, 0x05, 0x0d, 0xdf, 0xb7, + 0xc0, 0x1c, 0x8b, 0x76, 0xa9, 0x5e, 0xc5, 0xe4, 0xeb, 0x57, 0xba, 0xf6, 0x9d, 0xb1, 0x72, 0x69, + 0xa6, 0x00, 0x1a, 0x8b, 0xfd, 0x5e, 0x65, 0x2e, 0x2d, 0x41, 0x19, 0x02, 0xf0, 0xe7, 0x16, 0x28, + 0xea, 0x13, 0x66, 0xf6, 0x8c, 0xbc, 0xf0, 0x6f, 0x4f, 0xe8, 0x60, 0x75, 0x46, 0x25, 0xb7, 0x40, + 0x0b, 0x18, 0x32, 0x0c, 0xe0, 0x3f, 0x2d, 0x60, 0xe3, 0xb6, 0x2a, 0xf0, 0xd8, 0xdd, 0xa1, 0x8e, + 0xcf, 0x09, 0x55, 0x0d, 0x11, 0xb3, 0x8b, 0x92, 0xde, 0x78, 0xdf, 0xc2, 0xc1, 0x66, 0xab, 0xb1, + 0xaa, 0xd9, 0xd9, 0xeb, 0x23, 0x68, 0xa0, 0x91, 0x04, 0xab, 0xef, 0xe7, 0x07, 0x7b, 0xb9, 0xc1, + 0xa7, 0x16, 0x3e, 0xb4, 0x00, 0x68, 0xc5, 0x4f, 0x18, 0xb3, 0x2d, 0xb9, 0xa5, 0x77, 0x27, 0x14, + 0x71, 0xf3, 0x56, 0x26, 0xed, 0x8e, 0x11, 0x89, 0x6a, 0x62, 0x7e, 0xc3, 0xdf, 0x59, 0x60, 0x1e, + 0xb7, 0x5a, 0x24, 0xe4, 0xa4, 0xad, 0x2a, 0x60, 0xee, 0x33, 0xb8, 0xe4, 0xcb, 0x9a, 0xd5, 0xfc, + 0x7a, 0x1a, 0x1a, 0x65, 0x99, 0xc0, 0xd7, 0xc0, 0x69, 0xc6, 0x03, 0x4a, 0xda, 0x71, 0xbe, 0xe8, + 0xea, 0x0c, 0xfb, 0xbd, 0xca, 0xe9, 0x66, 0x46, 0x83, 0x06, 0x2c, 0xab, 0xbf, 0xb5, 0x40, 0xe5, + 0x09, 0xf9, 0xf8, 0x14, 0xed, 0xf5, 0xf3, 0x60, 0x5a, 0x6e, 0xb7, 0x2d, 0xa3, 0x52, 0x4c, 0xf5, + 0x4b, 0x52, 0x8a, 0xb4, 0x56, 0x54, 0x53, 0x81, 0x2f, 0xde, 0xf8, 0xbc, 0x34, 0x34, 0xd5, 0xb4, + 0xa9, 0xc4, 0x28, 0xd6, 0x57, 0xff, 0x67, 0x0d, 0xa6, 0x4a, 0xea, 0x92, 0x36, 0x5b, 0xd8, 0x25, + 0x70, 0x13, 0x2c, 0x8a, 0x6e, 0x10, 0x91, 0xd0, 0x75, 0x5a, 0x98, 0xc9, 0x6e, 0x59, 0x71, 0xb4, + 0xb5, 0xdb, 0xc5, 0xe6, 0x80, 0x1e, 0x0d, 0xad, 0x80, 0xaf, 0x03, 0xa8, 0x3a, 0xa4, 0x8c, 0x1f, + 0x55, 0xec, 0x4d, 0xaf, 0xd3, 0x1c, 0xb2, 0x40, 0x27, 0xac, 0x82, 0x1b, 0x60, 0xc9, 0xc5, 0xbb, + 0xc4, 0x6d, 0x12, 0x97, 0xb4, 0x78, 0x40, 0xa5, 0x2b, 0x35, 0x4f, 0x2c, 0xf7, 0x7b, 0x95, 0xa5, + 0x9b, 0x83, 0x4a, 0x34, 0x6c, 0x5f, 0xbd, 0x34, 0x78, 0x22, 0xe9, 0x8d, 0xab, 0xbe, 0xf3, 0x0f, + 0x39, 0xb0, 0x32, 0xba, 0xa6, 0xc1, 0x1f, 0x27, 0xed, 0xb1, 0xea, 0x7e, 0xde, 0x9e, 0x54, 0xfd, + 0xd4, 0xfd, 0x31, 0x18, 0xee, 0x8d, 0xe1, 0x0f, 0xc4, 0x53, 0x84, 0x5d, 0xa2, 0x2f, 0xca, 0x5b, + 0x13, 0xa3, 0x20, 0x40, 0x1a, 0xb3, 0xea, 0x95, 0xc3, 0xae, 0x7c, 0xd4, 0xb0, 0x4b, 0xaa, 0x7f, + 0xb6, 0x06, 0x27, 0xa4, 0xe4, 0xcd, 0x81, 0xbf, 0xb0, 0xc0, 0x42, 0x10, 0x12, 0x7f, 0x7d, 0xe7, + 0xc6, 0xdd, 0x17, 0x9b, 0x72, 0x5a, 0xd5, 0xa1, 0xba, 0xf5, 0x29, 0x79, 0x8a, 0xb9, 0x4d, 0x39, + 0xdc, 0xa1, 0x41, 0xc8, 0x1a, 0x67, 0xfa, 0xbd, 0xca, 0xc2, 0x76, 0x16, 0x0a, 0x0d, 0x62, 0x57, + 0x3d, 0xb0, 0x7c, 0xfd, 0x90, 0x13, 0xea, 0x63, 0x77, 0x33, 0x68, 0x45, 0x1e, 0xf1, 0xb9, 0x22, + 0x3a, 0x30, 0x6e, 0x5a, 0x4f, 0x39, 0x6e, 0x5e, 0x04, 0xf9, 0x88, 0xba, 0x3a, 0x8b, 0x4b, 0xda, + 0x3c, 0x7f, 0x07, 0xdd, 0x44, 0x42, 0x5e, 0xbd, 0x04, 0xa6, 0x04, 0x4f, 0x78, 0x1e, 0xe4, 0x29, + 0x7e, 0x20, 0xbd, 0xce, 0x35, 0x66, 0x84, 0x09, 0xc2, 0x0f, 0x90, 0x90, 0x55, 0xff, 0x72, 0x01, + 0x2c, 0x0c, 0xec, 0x05, 0xae, 0x80, 0x9c, 0xd3, 0xd6, 0x1c, 0x80, 0x76, 0x9a, 0xbb, 0xb1, 0x89, + 0x72, 0x4e, 0x1b, 0xbe, 0x02, 0xa6, 0xd5, 0xd4, 0xaf, 0x41, 0x2b, 0xa6, 0x04, 0x48, 0xa9, 0xe8, + 0x3d, 0x12, 0x77, 0x82, 0x88, 0x36, 0x97, 0x1c, 0xc8, 0x9e, 0xbe, 0x25, 0x8a, 0x03, 0xd9, 0x43, + 0x42, 0xf6, 0x49, 0x67, 0xed, 0x78, 0xd8, 0x2f, 0x3c, 0xc5, 0xb0, 0x3f, 0xfd, 0xb1, 0xc3, 0xfe, + 0x65, 0x50, 0xe0, 0x0e, 0x77, 0x89, 0x3d, 0x93, 0x6d, 0x11, 0x6f, 0x0b, 0x21, 0x52, 0x3a, 0x78, + 0x0f, 0xcc, 0xb4, 0xc9, 0x1e, 0x8e, 0x5c, 0x6e, 0x17, 0x65, 0x0a, 0x6d, 0x8c, 0x21, 0x85, 0x1a, + 0x25, 0x51, 0x15, 0x37, 0x95, 0x5f, 0x14, 0x03, 0xc0, 0xe7, 0xc0, 0x8c, 0x87, 0x0f, 0x1d, 0x2f, + 0xf2, 0xec, 0xd9, 0x55, 0x6b, 0xcd, 0x52, 0x66, 0x5b, 0x4a, 0x84, 0x62, 0x9d, 0xa8, 0x8c, 0xe4, + 0xb0, 0xe5, 0x46, 0xcc, 0xe9, 0x12, 0xad, 0xb4, 0x81, 0x2c, 0xb8, 0xa6, 0x32, 0x5e, 0x1f, 0xd0, + 0xa3, 0xa1, 0x15, 0x12, 0xcc, 0xf1, 0xe5, 0xe2, 0x52, 0x0a, 0x4c, 0x89, 0x50, 0xac, 0xcb, 0x82, + 0x69, 0xfb, 0xb9, 0x51, 0x60, 0x7a, 0xf1, 0xd0, 0x0a, 0xf8, 0x65, 0x30, 0xeb, 0xe1, 0xc3, 0x9b, + 0xc4, 0xef, 0xf0, 0x7d, 0x7b, 0x7e, 0xd5, 0x5a, 0xcb, 0x37, 0xe6, 0xfb, 0xbd, 0xca, 0xec, 0x56, + 0x2c, 0x44, 0x89, 0x5e, 0x1a, 0x3b, 0xbe, 0x36, 0x3e, 0x9d, 0x32, 0x8e, 0x85, 0x28, 0xd1, 0x8b, + 0x47, 0x27, 0xc4, 0x5c, 0x5c, 0x2e, 0x7b, 0x21, 0xdb, 0xc2, 0xef, 0x28, 0x31, 0x8a, 0xf5, 0x70, + 0x0d, 0x14, 0x3d, 0x7c, 0x28, 0xc7, 0x2d, 0x7b, 0x51, 0xba, 0x9d, 0x13, 0x7d, 0xd8, 0x96, 0x96, + 0x21, 0xa3, 0x95, 0x96, 0x8e, 0xaf, 0x2c, 0x97, 0x52, 0x96, 0x5a, 0x86, 0x8c, 0x56, 0x24, 0x71, + 0xe4, 0x3b, 0xf7, 0x23, 0xa2, 0x8c, 0xa1, 0x8c, 0x8c, 0x49, 0xe2, 0x3b, 0x89, 0x0a, 0xa5, 0xed, + 0xc4, 0xb8, 0xe3, 0x45, 0x2e, 0x77, 0x42, 0x97, 0x6c, 0xef, 0xd9, 0x67, 0x64, 0xfc, 0x65, 0xe7, + 0xbc, 0x65, 0xa4, 0x28, 0x65, 0x01, 0x09, 0x98, 0x22, 0x7e, 0xe4, 0xd9, 0x67, 0x65, 0xc3, 0x34, + 0x96, 0x14, 0x34, 0x37, 0xe7, 0xba, 0x1f, 0x79, 0x48, 0xba, 0x87, 0xaf, 0x80, 0x79, 0x0f, 0x1f, + 0x8a, 0x72, 0x40, 0x28, 0x17, 0x83, 0xd8, 0xb2, 0xdc, 0xfc, 0x92, 0x68, 0x52, 0xb6, 0xd2, 0x0a, + 0x94, 0xb5, 0x93, 0x0b, 0x1d, 0x3f, 0xb5, 0xf0, 0x5c, 0x6a, 0x61, 0x5a, 0x81, 0xb2, 0x76, 0x22, + 0xd2, 0x94, 0xdc, 0x8f, 0x1c, 0x4a, 0xda, 0xf6, 0xe7, 0x64, 0x5f, 0x23, 0x23, 0x8d, 0xb4, 0x0c, + 0x19, 0x2d, 0xec, 0xc6, 0x73, 0xb9, 0x2d, 0xaf, 0xe1, 0x9d, 0xf1, 0x56, 0xf2, 0x6d, 0xba, 0x4e, + 0x29, 0x3e, 0x52, 0x2f, 0x4d, 0x7a, 0x22, 0x87, 0x0c, 0x14, 0xb0, 0xeb, 0x6e, 0xef, 0xd9, 0xe7, + 0x65, 0xec, 0xc7, 0xfd, 0x82, 0x98, 0xaa, 0xb3, 0x2e, 0x40, 0x90, 0xc2, 0x12, 0xa0, 0x81, 0x2f, + 0x52, 0x63, 0x65, 0xb2, 0xa0, 0xdb, 0x02, 0x04, 0x29, 0x2c, 0xb9, 0x53, 0xff, 0x68, 0x7b, 0xcf, + 0xfe, 0xfc, 0x84, 0x77, 0x2a, 0x40, 0x90, 0xc2, 0x82, 0x0e, 0xc8, 0xfb, 0x01, 0xb7, 0x2f, 0x4c, + 0xe4, 0x79, 0x96, 0x0f, 0xce, 0xad, 0x80, 0x23, 0x81, 0x01, 0x7f, 0x63, 0x01, 0x10, 0x26, 0x29, + 0x7a, 0x71, 0x2c, 0xe3, 0xde, 0x00, 0x64, 0x2d, 0xc9, 0xed, 0xeb, 0x3e, 0xa7, 0x47, 0xc9, 0xe8, + 0x91, 0xba, 0x03, 0x29, 0x16, 0xf0, 0x8f, 0x16, 0x38, 0x9b, 0x9e, 0xa8, 0x0c, 0xbd, 0xb2, 0x8c, + 0xc8, 0xed, 0x71, 0xa7, 0x79, 0x23, 0x08, 0xdc, 0x86, 0xdd, 0xef, 0x55, 0xce, 0xae, 0x9f, 0x80, + 0x8a, 0x4e, 0xe4, 0x02, 0xff, 0x6a, 0x81, 0x25, 0x5d, 0x45, 0x53, 0x0c, 0x2b, 0x32, 0x80, 0x64, + 0xdc, 0x01, 0x1c, 0xc4, 0x51, 0x71, 0x3c, 0xaf, 0xe3, 0xb8, 0x34, 0xa4, 0x47, 0xc3, 0xd4, 0xe0, + 0x3f, 0x2c, 0x30, 0xd7, 0x26, 0x21, 0xf1, 0xdb, 0xc4, 0x6f, 0x09, 0xae, 0xab, 0x63, 0x99, 0x34, + 0x07, 0xb9, 0x6e, 0xa6, 0x20, 0x14, 0xcd, 0x9a, 0xa6, 0x39, 0x97, 0x56, 0x1d, 0xf7, 0x2a, 0xe7, + 0x92, 0xa5, 0x69, 0x0d, 0xca, 0xb0, 0x84, 0x1f, 0x58, 0x60, 0x21, 0x39, 0x00, 0xf5, 0xa4, 0x5c, + 0x9a, 0x60, 0x1e, 0xc8, 0xf6, 0x75, 0x3d, 0x0b, 0x88, 0x06, 0x19, 0xc0, 0xbf, 0x59, 0xa2, 0x53, + 0x8b, 0xe7, 0x46, 0x66, 0x57, 0x65, 0x2c, 0xdf, 0x19, 0x7b, 0x2c, 0x0d, 0x82, 0x0a, 0xe5, 0x0b, + 0x49, 0x2b, 0x68, 0x34, 0xc7, 0xbd, 0xca, 0x72, 0x3a, 0x92, 0x46, 0x81, 0xd2, 0x0c, 0xe1, 0xcf, + 0x2c, 0x30, 0x47, 0x92, 0x8e, 0x9b, 0xd9, 0x97, 0xc7, 0x12, 0xc4, 0x13, 0x9b, 0x78, 0xf5, 0x37, + 0xa6, 0x94, 0x8a, 0xa1, 0x0c, 0xb6, 0xe8, 0x20, 0xc9, 0x21, 0xf6, 0x42, 0x97, 0xd8, 0x5f, 0x18, + 0x73, 0x07, 0x79, 0x5d, 0xf9, 0x45, 0x31, 0xc0, 0x8a, 0x98, 0x7c, 0x06, 0x6e, 0x0e, 0x5c, 0x04, + 0xf9, 0x03, 0x72, 0xa4, 0x1a, 0x7b, 0x24, 0x7e, 0xc2, 0x36, 0x28, 0x74, 0xb1, 0x1b, 0xc5, 0xc3, + 0xdb, 0x98, 0xab, 0x2e, 0x52, 0xce, 0x5f, 0xcb, 0xbd, 0x6a, 0xad, 0x3c, 0xb4, 0xc0, 0xb9, 0x93, + 0x2f, 0xf4, 0x33, 0xa5, 0xf5, 0x7b, 0x0b, 0x2c, 0x0d, 0xdd, 0xdd, 0x13, 0x18, 0xdd, 0xcf, 0x32, + 0x7a, 0x63, 0xdc, 0x97, 0xb0, 0xc9, 0xa9, 0xe3, 0x77, 0x64, 0xe7, 0x91, 0xa6, 0xf7, 0x4b, 0x0b, + 0x2c, 0x0e, 0x5e, 0x87, 0x67, 0x19, 0xaf, 0xea, 0xc3, 0x1c, 0x38, 0x77, 0x72, 0xc3, 0x04, 0xa9, + 0x99, 0x0c, 0x27, 0x33, 0x61, 0x83, 0x64, 0xca, 0x34, 0x43, 0xe5, 0x7b, 0x16, 0x28, 0xdd, 0x33, + 0x76, 0xf1, 0x7f, 0x6a, 0xc6, 0x3e, 0xdb, 0xc7, 0xf5, 0x27, 0x51, 0x30, 0x94, 0xc6, 0xad, 0xfe, + 0xdd, 0x02, 0xcb, 0x27, 0x16, 0x56, 0x31, 0x82, 0x62, 0xd7, 0x0d, 0x1e, 0xa8, 0x3f, 0xd1, 0xa4, + 0xfe, 0x64, 0xb6, 0x2e, 0xa5, 0x48, 0x6b, 0x53, 0xd1, 0xcb, 0x7d, 0x56, 0xd1, 0xab, 0xfe, 0xcb, + 0x02, 0x17, 0x3e, 0x2e, 0x13, 0x9f, 0xc9, 0x91, 0xae, 0x81, 0xa2, 0x6e, 0x8a, 0x8e, 0xe4, 0x71, + 0xea, 0x39, 0x40, 0x17, 0x0d, 0xf9, 0xdf, 0x73, 0xf5, 0xab, 0x71, 0xe5, 0xd1, 0xe3, 0xf2, 0xa9, + 0x0f, 0x1f, 0x97, 0x4f, 0x7d, 0xf4, 0xb8, 0x7c, 0xea, 0x87, 0xfd, 0xb2, 0xf5, 0xa8, 0x5f, 0xb6, + 0x3e, 0xec, 0x97, 0xad, 0x8f, 0xfa, 0x65, 0xeb, 0xdf, 0xfd, 0xb2, 0xf5, 0xab, 0xff, 0x94, 0x4f, + 0x7d, 0x77, 0x46, 0x83, 0xff, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x11, 0xe8, 0x41, 0x0b, 0x22, + 0x00, 0x00, } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto index 0d494317838..2a75484ffd9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto @@ -29,6 +29,32 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// CustomResourceColumnDefinition specifies a column for server side printing. +message CustomResourceColumnDefinition { + // name is a human readable name for the column. + optional string name = 1; + + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + optional string type = 2; + + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + optional string format = 3; + + // description is a human readable description of this column. + optional string description = 4; + + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + optional int32 priority = 5; + + // JSONPath is a simple JSON path, i.e. with array notation. + optional string JSONPath = 6; +} + // CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format // <.spec.name>.<.spec.group>. message CustomResourceDefinition { @@ -132,6 +158,9 @@ message CustomResourceDefinitionSpec { // major version, then minor version. An example sorted list of versions: // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. repeated CustomResourceDefinitionVersion versions = 7; + + // AdditionalPrinterColumns are additional columns shown e.g. in kubectl next to the name. Defaults to a created-at column. + repeated CustomResourceColumnDefinition additionalPrinterColumns = 8; } // CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go index db2340e7333..bcb2527c81b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go @@ -36,6 +36,8 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(scheme *runtime.Scheme) error { return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition, + Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition, Convert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition, Convert_apiextensions_CustomResourceDefinition_To_v1beta1_CustomResourceDefinition, Convert_v1beta1_CustomResourceDefinitionCondition_To_apiextensions_CustomResourceDefinitionCondition, @@ -73,6 +75,36 @@ func RegisterConversions(scheme *runtime.Scheme) error { ) } +func autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in *CustomResourceColumnDefinition, out *apiextensions.CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_v1beta1_CustomResourceColumnDefinition_To_apiextensions_CustomResourceColumnDefinition(in, out, s) +} + +func autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + out.Name = in.Name + out.Type = in.Type + out.Format = in.Format + out.Description = in.Description + out.Priority = in.Priority + out.JSONPath = in.JSONPath + return nil +} + +// Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition is an autogenerated conversion function. +func Convert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in *apiextensions.CustomResourceColumnDefinition, out *CustomResourceColumnDefinition, s conversion.Scope) error { + return autoConvert_apiextensions_CustomResourceColumnDefinition_To_v1beta1_CustomResourceColumnDefinition(in, out, s) +} + func autoConvert_v1beta1_CustomResourceDefinition_To_apiextensions_CustomResourceDefinition(in *CustomResourceDefinition, out *apiextensions.CustomResourceDefinition, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(&in.Spec, &out.Spec, s); err != nil { @@ -223,6 +255,7 @@ func autoConvert_v1beta1_CustomResourceDefinitionSpec_To_apiextensions_CustomRes } out.Subresources = (*apiextensions.CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) out.Versions = *(*[]apiextensions.CustomResourceDefinitionVersion)(unsafe.Pointer(&in.Versions)) + out.AdditionalPrinterColumns = *(*[]apiextensions.CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) return nil } @@ -249,6 +282,7 @@ func autoConvert_apiextensions_CustomResourceDefinitionSpec_To_v1beta1_CustomRes } out.Subresources = (*CustomResourceSubresources)(unsafe.Pointer(in.Subresources)) out.Versions = *(*[]CustomResourceDefinitionVersion)(unsafe.Pointer(&in.Versions)) + out.AdditionalPrinterColumns = *(*[]CustomResourceColumnDefinition)(unsafe.Pointer(&in.AdditionalPrinterColumns)) return nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index c990055d03a..d2c1cebf232 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -24,6 +24,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { *out = *in @@ -155,6 +171,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = make([]CustomResourceDefinitionVersion, len(*in)) copy(*out, *in) } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } return } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD index b641b609370..c43365fe032 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/BUILD @@ -15,6 +15,7 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/features:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go index 9284bbc81c4..145b3c5d715 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go @@ -24,6 +24,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomResourceColumnDefinition) DeepCopyInto(out *CustomResourceColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceColumnDefinition. +func (in *CustomResourceColumnDefinition) DeepCopy() *CustomResourceColumnDefinition { + if in == nil { + return nil + } + out := new(CustomResourceColumnDefinition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) { *out = *in @@ -155,6 +171,11 @@ func (in *CustomResourceDefinitionSpec) DeepCopyInto(out *CustomResourceDefiniti *out = make([]CustomResourceDefinitionVersion, len(*in)) copy(*out, *in) } + if in.AdditionalPrinterColumns != nil { + in, out := &in.AdditionalPrinterColumns, &out.AdditionalPrinterColumns + *out = make([]CustomResourceColumnDefinition, len(*in)) + copy(*out, *in) + } return } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD index bbef70ca780..cebbd87bfa4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/BUILD @@ -71,8 +71,10 @@ go_test( "//vendor/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/BUILD index c9c5e70b772..9f8a352d3d9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor/BUILD @@ -1,4 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -6,7 +6,7 @@ go_library( importpath = "k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta/table:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -30,3 +30,9 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["tableconvertor_test.go"], + embed = [":go_default_library"], +) diff --git a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD index 6c5adbefbb4..867dffe6089 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/test/integration/BUILD @@ -13,6 +13,7 @@ go_test( "finalization_test.go", "registration_test.go", "subresources_test.go", + "table_test.go", "validation_test.go", "versioning_test.go", "yaml_test.go", @@ -32,12 +33,15 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", ], ) From d463bbddb1e53627a50b8e00a1f5a5b71703eebe Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Thu, 24 May 2018 13:36:30 -0400 Subject: [PATCH 228/307] move resource builder flags to genericclioptions --- hack/import-restrictions.yaml | 1 + pkg/kubectl/cmd/delete.go | 2 +- pkg/kubectl/cmd/wait/BUILD | 7 +- pkg/kubectl/cmd/wait/wait.go | 6 +- pkg/kubectl/cmd/wait/wait_test.go | 4 +- pkg/kubectl/genericclioptions/BUILD | 3 + .../builder_flags.go} | 65 ++++++++++++++----- .../builder_flags_fake.go} | 2 +- 8 files changed, 62 insertions(+), 28 deletions(-) rename pkg/kubectl/{cmd/wait/flags.go => genericclioptions/builder_flags.go} (66%) rename pkg/kubectl/{cmd/wait/fakeresourcefinder.go => genericclioptions/builder_flags_fake.go} (98%) diff --git a/hack/import-restrictions.yaml b/hack/import-restrictions.yaml index c053eb45251..19e12ac1e94 100644 --- a/hack/import-restrictions.yaml +++ b/hack/import-restrictions.yaml @@ -24,6 +24,7 @@ # TODO this one should be tightened. We depend on it for testing, but we should instead create our own scheme - k8s.io/api/core/v1 - k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers + - k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource - baseImportPath: "./vendor/k8s.io/apimachinery/" allowedImports: diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 2f2bd91508a..e98f0451a98 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -275,7 +275,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { effectiveTimeout = 168 * time.Hour } waitOptions := kubectlwait.WaitOptions{ - ResourceFinder: kubectlwait.ResourceFinderForResult(o.Result), + ResourceFinder: genericclioptions.ResourceFinderForResult(o.Result), DynamicClient: o.DynamicClient, Timeout: effectiveTimeout, diff --git a/pkg/kubectl/cmd/wait/BUILD b/pkg/kubectl/cmd/wait/BUILD index ce1253f61eb..0f13739b86c 100644 --- a/pkg/kubectl/cmd/wait/BUILD +++ b/pkg/kubectl/cmd/wait/BUILD @@ -2,11 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = [ - "fakeresourcefinder.go", - "flags.go", - "wait.go", - ], + srcs = ["wait.go"], importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/wait", visibility = ["//visibility:public"], deps = [ @@ -15,7 +11,6 @@ go_library( "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", diff --git a/pkg/kubectl/cmd/wait/wait.go b/pkg/kubectl/cmd/wait/wait.go index e75d5f418dc..9ccf3cb735e 100644 --- a/pkg/kubectl/cmd/wait/wait.go +++ b/pkg/kubectl/cmd/wait/wait.go @@ -42,7 +42,7 @@ import ( type WaitFlags struct { RESTClientGetter genericclioptions.RESTClientGetter PrintFlags *genericclioptions.PrintFlags - ResourceBuilderFlags *ResourceBuilderFlags + ResourceBuilderFlags *genericclioptions.ResourceBuilderFlags Timeout time.Duration ForCondition string @@ -55,7 +55,7 @@ func NewWaitFlags(restClientGetter genericclioptions.RESTClientGetter, streams g return &WaitFlags{ RESTClientGetter: restClientGetter, PrintFlags: genericclioptions.NewPrintFlags("condition met"), - ResourceBuilderFlags: NewResourceBuilderFlags(), + ResourceBuilderFlags: genericclioptions.NewResourceBuilderFlags(), Timeout: 30 * time.Second, @@ -151,7 +151,7 @@ func conditionFuncFor(condition string) (ConditionFunc, error) { // WaitOptions is a set of options that allows you to wait. This is the object reflects the runtime needs of a wait // command, making the logic itself easy to unit test with our existing mocks. type WaitOptions struct { - ResourceFinder ResourceFinder + ResourceFinder genericclioptions.ResourceFinder DynamicClient dynamic.Interface Timeout time.Duration diff --git a/pkg/kubectl/cmd/wait/wait_test.go b/pkg/kubectl/cmd/wait/wait_test.go index 6ef63357bd5..77d98e8d459 100644 --- a/pkg/kubectl/cmd/wait/wait_test.go +++ b/pkg/kubectl/cmd/wait/wait_test.go @@ -219,7 +219,7 @@ func TestWaitForDeletion(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeClient := test.fakeClient() o := &WaitOptions{ - ResourceFinder: NewSimpleResourceFinder(test.info), + ResourceFinder: genericclioptions.NewSimpleResourceFinder(test.info), DynamicClient: fakeClient, Timeout: test.timeout, @@ -451,7 +451,7 @@ func TestWaitForCondition(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeClient := test.fakeClient() o := &WaitOptions{ - ResourceFinder: NewSimpleResourceFinder(test.info), + ResourceFinder: genericclioptions.NewSimpleResourceFinder(test.info), DynamicClient: fakeClient, Timeout: test.timeout, diff --git a/pkg/kubectl/genericclioptions/BUILD b/pkg/kubectl/genericclioptions/BUILD index 66867c87ec5..24f95b3373c 100644 --- a/pkg/kubectl/genericclioptions/BUILD +++ b/pkg/kubectl/genericclioptions/BUILD @@ -3,6 +3,8 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "builder_flags.go", + "builder_flags_fake.go", "config_flags.go", "config_flags_fake.go", "doc.go", @@ -16,6 +18,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubectl/genericclioptions/printers:go_default_library", + "//pkg/kubectl/genericclioptions/resource:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", diff --git a/pkg/kubectl/cmd/wait/flags.go b/pkg/kubectl/genericclioptions/builder_flags.go similarity index 66% rename from pkg/kubectl/cmd/wait/flags.go rename to pkg/kubectl/genericclioptions/builder_flags.go index 824a8af8db6..2fc1dcf5fae 100644 --- a/pkg/kubectl/cmd/wait/flags.go +++ b/pkg/kubectl/genericclioptions/builder_flags.go @@ -14,14 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package wait +package genericclioptions import ( "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" ) @@ -29,15 +28,15 @@ import ( type ResourceBuilderFlags struct { FilenameOptions resource.FilenameOptions - LabelSelector string - FieldSelector string - AllNamespaces bool Namespace string ExplicitNamespace bool - // TODO add conditional support. These are false for now. - All bool - Local bool + LabelSelector *string + FieldSelector *string + AllNamespaces *bool + + All *bool + Local *bool } // NewResourceBuilderFlags returns a default ResourceBuilderFlags @@ -46,6 +45,13 @@ func NewResourceBuilderFlags() *ResourceBuilderFlags { FilenameOptions: resource.FilenameOptions{ Recursive: true, }, + + LabelSelector: str_ptr(""), + FieldSelector: str_ptr(""), + AllNamespaces: bool_ptr(false), + + All: bool_ptr(false), + Local: bool_ptr(false), } } @@ -59,23 +65,44 @@ func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { flagset.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) flagset.BoolVar(&o.FilenameOptions.Recursive, "recursive", o.FilenameOptions.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") - flagset.StringVarP(&o.LabelSelector, "selector", "l", o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") - flagset.StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") - flagset.BoolVar(&o.AllNamespaces, "all-namespaces", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + if o.LabelSelector != nil { + flagset.StringVarP(o.LabelSelector, "selector", "l", *o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") + } + if o.FieldSelector != nil { + flagset.StringVar(o.FieldSelector, "field-selector", *o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + } + if o.AllNamespaces != nil { + flagset.BoolVar(o.AllNamespaces, "all-namespaces", *o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + } } // ToBuilder gives you back a resource finder to visit resources that are located -func (o *ResourceBuilderFlags) ToBuilder(restClientGetter genericclioptions.RESTClientGetter, resources []string) ResourceFinder { +func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, resources []string) ResourceFinder { namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() + labelSelector := "" + if o.LabelSelector != nil { + labelSelector = *o.LabelSelector + } + + fieldSelector := "" + if o.FieldSelector != nil { + fieldSelector = *o.FieldSelector + } + + allResources := false + if o.All != nil { + allResources = *o.All + } + return &ResourceFindBuilderWrapper{ builder: resource.NewBuilder(restClientGetter). Unstructured(). NamespaceParam(namespace).DefaultNamespace(). FilenameParam(enforceNamespace, &o.FilenameOptions). - LabelSelectorParam(o.LabelSelector). - FieldSelectorParam(o.FieldSelector). - ResourceTypeOrNameArgs(o.All, resources...). + LabelSelectorParam(labelSelector). + FieldSelectorParam(fieldSelector). + ResourceTypeOrNameArgs(allResources, resources...). Latest(). Flatten(). AddError(namespaceErr), @@ -112,3 +139,11 @@ func ResourceFinderForResult(result resource.Visitor) ResourceFinder { return result }) } + +func str_ptr(val string) *string { + return &val +} + +func bool_ptr(val bool) *bool { + return &val +} diff --git a/pkg/kubectl/cmd/wait/fakeresourcefinder.go b/pkg/kubectl/genericclioptions/builder_flags_fake.go similarity index 98% rename from pkg/kubectl/cmd/wait/fakeresourcefinder.go rename to pkg/kubectl/genericclioptions/builder_flags_fake.go index 591dea27ef4..15137c9e797 100644 --- a/pkg/kubectl/cmd/wait/fakeresourcefinder.go +++ b/pkg/kubectl/genericclioptions/builder_flags_fake.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package wait +package genericclioptions import ( "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" From b4af3a4ffb2273fdbf2a260b3a87452700009279 Mon Sep 17 00:00:00 2001 From: juanvallejo Date: Thu, 24 May 2018 15:05:16 -0400 Subject: [PATCH 229/307] move filename flags to genericclioptions --- pkg/kubectl/BUILD | 2 - pkg/kubectl/bash_comp_utils.go | 36 --------- pkg/kubectl/cmd/apply_set_last_applied.go | 2 +- pkg/kubectl/cmd/delete_flags.go | 39 +--------- pkg/kubectl/cmd/rollingupdate.go | 2 +- pkg/kubectl/cmd/util/helpers.go | 13 +++- pkg/kubectl/genericclioptions/BUILD | 1 + .../genericclioptions/builder_flags.go | 73 ++++++++----------- .../genericclioptions/filename_flags.go | 71 ++++++++++++++++++ 9 files changed, 119 insertions(+), 120 deletions(-) delete mode 100644 pkg/kubectl/bash_comp_utils.go create mode 100644 pkg/kubectl/genericclioptions/filename_flags.go diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index 8cc818887be..9701a1ad2f9 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -82,7 +82,6 @@ go_library( srcs = [ "apply.go", "autoscale.go", - "bash_comp_utils.go", "clusterrolebinding.go", "conditions.go", "configmap.go", @@ -125,7 +124,6 @@ go_library( "//pkg/controller/deployment/util:go_default_library", "//pkg/credentialprovider:go_default_library", "//pkg/kubectl/apps:go_default_library", - "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/util:go_default_library", "//pkg/kubectl/util/hash:go_default_library", "//pkg/kubectl/util/slice:go_default_library", diff --git a/pkg/kubectl/bash_comp_utils.go b/pkg/kubectl/bash_comp_utils.go deleted file mode 100644 index 94df450a4a9..00000000000 --- a/pkg/kubectl/bash_comp_utils.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. - -package kubectl - -import ( - "strings" - - "github.com/spf13/cobra" - - "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" -) - -func AddJsonFilenameFlag(cmd *cobra.Command, value *[]string, usage string) { - cmd.Flags().StringSliceVarP(value, "filename", "f", *value, usage) - annotations := make([]string, 0, len(resource.FileExtensions)) - for _, ext := range resource.FileExtensions { - annotations = append(annotations, strings.TrimLeft(ext, ".")) - } - cmd.Flags().SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) -} diff --git a/pkg/kubectl/cmd/apply_set_last_applied.go b/pkg/kubectl/cmd/apply_set_last_applied.go index 8b39bfd2b6f..3d92e6b8bce 100644 --- a/pkg/kubectl/cmd/apply_set_last_applied.go +++ b/pkg/kubectl/cmd/apply_set_last_applied.go @@ -107,7 +107,7 @@ func NewCmdApplySetLastApplied(f cmdutil.Factory, ioStreams genericclioptions.IO cmdutil.AddDryRunFlag(cmd) cmd.Flags().BoolVar(&o.CreateAnnotation, "create-annotation", o.CreateAnnotation, "Will create 'last-applied-configuration' annotations if current objects doesn't have one") - kubectl.AddJsonFilenameFlag(cmd, &o.FilenameOptions.Filenames, "Filename, directory, or URL to files that contains the last-applied-configuration annotations") + cmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.FilenameOptions.Filenames, "Filename, directory, or URL to files that contains the last-applied-configuration annotations") return cmd } diff --git a/pkg/kubectl/cmd/delete_flags.go b/pkg/kubectl/cmd/delete_flags.go index 1a5a6db3e2c..fc0580c688d 100644 --- a/pkg/kubectl/cmd/delete_flags.go +++ b/pkg/kubectl/cmd/delete_flags.go @@ -22,44 +22,13 @@ import ( "github.com/spf13/cobra" "k8s.io/client-go/dynamic" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" ) -type FileNameFlags struct { - Usage string - - Filenames *[]string - Recursive *bool -} - -func (o *FileNameFlags) ToOptions() resource.FilenameOptions { - options := resource.FilenameOptions{} - - if o.Recursive != nil { - options.Recursive = *o.Recursive - } - if o.Filenames != nil { - options.Filenames = *o.Filenames - } - - return options -} - -func (o *FileNameFlags) AddFlags(cmd *cobra.Command) { - if o.Recursive != nil { - cmd.Flags().BoolVarP(o.Recursive, "recursive", "R", *o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") - } - if o.Filenames != nil { - kubectl.AddJsonFilenameFlag(cmd, o.Filenames, "Filename, directory, or URL to files "+o.Usage) - } -} - // PrintFlags composes common printer flag structs // used for commands requiring deletion logic. type DeleteFlags struct { - FileNameFlags *FileNameFlags + FileNameFlags *genericclioptions.FileNameFlags LabelSelector *string FieldSelector *string @@ -121,7 +90,7 @@ func (f *DeleteFlags) ToOptions(dynamicClient dynamic.Interface, streams generic } func (f *DeleteFlags) AddFlags(cmd *cobra.Command) { - f.FileNameFlags.AddFlags(cmd) + f.FileNameFlags.AddFlags(cmd.Flags()) if f.LabelSelector != nil { cmd.Flags().StringVarP(f.LabelSelector, "selector", "l", *f.LabelSelector, "Selector (label query) to filter on, not including uninitialized ones.") } @@ -175,7 +144,7 @@ func NewDeleteCommandFlags(usage string) *DeleteFlags { recursive := false return &DeleteFlags{ - FileNameFlags: &FileNameFlags{Usage: usage, Filenames: &filenames, Recursive: &recursive}, + FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Recursive: &recursive}, LabelSelector: &labelSelector, FieldSelector: &fieldSelector, @@ -203,7 +172,7 @@ func NewDeleteFlags(usage string) *DeleteFlags { recursive := false return &DeleteFlags{ - FileNameFlags: &FileNameFlags{Usage: usage, Filenames: &filenames, Recursive: &recursive}, + FileNameFlags: &genericclioptions.FileNameFlags{Usage: usage, Filenames: &filenames, Recursive: &recursive}, Cascade: &cascade, GracePeriod: &gracePeriod, diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index a82ef567ef1..de3e289f4a4 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -150,7 +150,7 @@ func NewCmdRollingUpdate(f cmdutil.Factory, ioStreams genericclioptions.IOStream cmd.Flags().DurationVar(&o.Interval, "poll-interval", o.Interval, `Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".`) cmd.Flags().DurationVar(&o.Timeout, "timeout", o.Timeout, `Max time to wait for a replication controller to update before giving up. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".`) usage := "Filename or URL to file to use to create the new replication controller." - kubectl.AddJsonFilenameFlag(cmd, &o.FilenameOptions.Filenames, usage) + cmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.FilenameOptions.Filenames, usage) cmd.Flags().StringVar(&o.Image, "image", o.Image, i18n.T("Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with --filename/-f")) cmd.Flags().StringVar(&o.DeploymentKey, "deployment-label-key", o.DeploymentKey, i18n.T("The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise")) cmd.Flags().StringVar(&o.Container, "container", o.Container, i18n.T("Container name which will have its image upgraded. Only relevant when --image is specified, ignored otherwise. Required when using --image on a multi-container pod")) diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index e942b682b14..d9b16d19152 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -29,6 +29,7 @@ import ( "github.com/evanphx/json-patch" "github.com/golang/glog" "github.com/spf13/cobra" + "github.com/spf13/pflag" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -42,7 +43,6 @@ import ( "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/printers" @@ -405,10 +405,19 @@ func AddValidateOptionFlags(cmd *cobra.Command, options *ValidateOptions) { } func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) { - kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, "Filename, directory, or URL to files "+usage) + AddJsonFilenameFlag(cmd.Flags(), &options.Filenames, "Filename, directory, or URL to files "+usage) cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") } +func AddJsonFilenameFlag(flags *pflag.FlagSet, value *[]string, usage string) { + flags.StringSliceVarP(value, "filename", "f", *value, usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) +} + // AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. func AddDryRunFlag(cmd *cobra.Command) { cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") diff --git a/pkg/kubectl/genericclioptions/BUILD b/pkg/kubectl/genericclioptions/BUILD index 24f95b3373c..88c5662b716 100644 --- a/pkg/kubectl/genericclioptions/BUILD +++ b/pkg/kubectl/genericclioptions/BUILD @@ -8,6 +8,7 @@ go_library( "config_flags.go", "config_flags_fake.go", "doc.go", + "filename_flags.go", "io_options.go", "json_yaml_flags.go", "name_flags.go", diff --git a/pkg/kubectl/genericclioptions/builder_flags.go b/pkg/kubectl/genericclioptions/builder_flags.go index 2fc1dcf5fae..4648751c315 100644 --- a/pkg/kubectl/genericclioptions/builder_flags.go +++ b/pkg/kubectl/genericclioptions/builder_flags.go @@ -17,53 +17,46 @@ limitations under the License. package genericclioptions import ( - "strings" - - "github.com/spf13/cobra" "github.com/spf13/pflag" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" ) // ResourceBuilderFlags are flags for finding resources +// TODO(juanvallejo): wire --local flag from commands through type ResourceBuilderFlags struct { - FilenameOptions resource.FilenameOptions - - Namespace string - ExplicitNamespace bool + FileNameFlags *FileNameFlags LabelSelector *string FieldSelector *string AllNamespaces *bool - All *bool - Local *bool + All bool } // NewResourceBuilderFlags returns a default ResourceBuilderFlags func NewResourceBuilderFlags() *ResourceBuilderFlags { + filenames := []string{} + return &ResourceBuilderFlags{ - FilenameOptions: resource.FilenameOptions{ - Recursive: true, + FileNameFlags: &FileNameFlags{ + Usage: "identifying the resource.", + Filenames: &filenames, + Recursive: boolPtr(true), }, - LabelSelector: str_ptr(""), - FieldSelector: str_ptr(""), - AllNamespaces: bool_ptr(false), - - All: bool_ptr(false), - Local: bool_ptr(false), + LabelSelector: strPtr(""), + AllNamespaces: boolPtr(false), } } +func (o *ResourceBuilderFlags) WithFieldSelector(selector string) *ResourceBuilderFlags { + o.FieldSelector = &selector + return o +} + // AddFlags registers flags for finding resources func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { - flagset.StringSliceVarP(&o.FilenameOptions.Filenames, "filename", "f", o.FilenameOptions.Filenames, "Filename, directory, or URL to files identifying the resource.") - annotations := make([]string, 0, len(resource.FileExtensions)) - for _, ext := range resource.FileExtensions { - annotations = append(annotations, strings.TrimLeft(ext, ".")) - } - flagset.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) - flagset.BoolVar(&o.FilenameOptions.Recursive, "recursive", o.FilenameOptions.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") + o.FileNameFlags.AddFlags(flagset) if o.LabelSelector != nil { flagset.StringVarP(o.LabelSelector, "selector", "l", *o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") @@ -80,29 +73,23 @@ func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, resources []string) ResourceFinder { namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() - labelSelector := "" + builder := resource.NewBuilder(restClientGetter). + Unstructured(). + NamespaceParam(namespace).DefaultNamespace(). + ResourceTypeOrNameArgs(o.All, resources...) + if o.FileNameFlags != nil { + opts := o.FileNameFlags.ToOptions() + builder = builder.FilenameParam(enforceNamespace, &opts) + } if o.LabelSelector != nil { - labelSelector = *o.LabelSelector + builder = builder.LabelSelectorParam(*o.LabelSelector) } - - fieldSelector := "" if o.FieldSelector != nil { - fieldSelector = *o.FieldSelector - } - - allResources := false - if o.All != nil { - allResources = *o.All + builder = builder.FieldSelectorParam(*o.FieldSelector) } return &ResourceFindBuilderWrapper{ - builder: resource.NewBuilder(restClientGetter). - Unstructured(). - NamespaceParam(namespace).DefaultNamespace(). - FilenameParam(enforceNamespace, &o.FilenameOptions). - LabelSelectorParam(labelSelector). - FieldSelectorParam(fieldSelector). - ResourceTypeOrNameArgs(allResources, resources...). + builder: builder. Latest(). Flatten(). AddError(namespaceErr), @@ -140,10 +127,10 @@ func ResourceFinderForResult(result resource.Visitor) ResourceFinder { }) } -func str_ptr(val string) *string { +func strPtr(val string) *string { return &val } -func bool_ptr(val bool) *bool { +func boolPtr(val bool) *bool { return &val } diff --git a/pkg/kubectl/genericclioptions/filename_flags.go b/pkg/kubectl/genericclioptions/filename_flags.go new file mode 100644 index 00000000000..9fc0b60709c --- /dev/null +++ b/pkg/kubectl/genericclioptions/filename_flags.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" +) + +// Usage of this struct by itself is discouraged. +// These flags are composed by ResourceBuilderFlags +// which should be used instead. +type FileNameFlags struct { + Usage string + + Filenames *[]string + Recursive *bool +} + +func (o *FileNameFlags) ToOptions() resource.FilenameOptions { + options := resource.FilenameOptions{} + + if o == nil { + return options + } + + if o.Recursive != nil { + options.Recursive = *o.Recursive + } + if o.Filenames != nil { + options.Filenames = *o.Filenames + } + + return options +} + +func (o *FileNameFlags) AddFlags(flags *pflag.FlagSet) { + if o == nil { + return + } + + if o.Recursive != nil { + flags.BoolVarP(o.Recursive, "recursive", "R", *o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") + } + if o.Filenames != nil { + flags.StringSliceVarP(o.Filenames, "filename", "f", *o.Filenames, o.Usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) + } +} From 9ea33c604c49c8ab03615d7ad85544614673911f Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Mon, 28 May 2018 13:23:37 +0200 Subject: [PATCH 230/307] Fix bug with scheduler throughput variable pass-by-value --- test/e2e/scalability/density.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 1ef125d8744..b461e458882 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -227,7 +227,7 @@ func logPodStartupStatus( expectedPods int, observedLabels map[string]string, period time.Duration, - scheduleThroughputs []float64, + scheduleThroughputs *[]float64, stopCh chan struct{}) { label := labels.SelectorFromSet(labels.Set(observedLabels)) @@ -250,14 +250,14 @@ func logPodStartupStatus( framework.Logf(startupStatus.String("Density")) // Compute scheduling throughput for the latest time period. throughput := float64(startupStatus.Scheduled-lastScheduledCount) / float64(period/time.Second) - scheduleThroughputs = append(scheduleThroughputs, throughput) + *scheduleThroughputs = append(*scheduleThroughputs, throughput) lastScheduledCount = startupStatus.Scheduled } } // runDensityTest will perform a density test and return the time it took for // all pods to start -func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs []float64) time.Duration { +func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTimer, scheduleThroughputs *[]float64) time.Duration { defer GinkgoRecover() // Create all secrets, configmaps and daemons. @@ -647,7 +647,7 @@ var _ = SIGDescribe("Density", func() { LogFunc: framework.Logf, }) } - e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, scheduleThroughputs) + e2eStartupTime = runDensityTest(dConfig, testPhaseDurations, &scheduleThroughputs) if itArg.runLatencyTest { By("Scheduling additional Pods to measure startup latencies") From c798dfc88df9c015f6c274399413c5cab1315e94 Mon Sep 17 00:00:00 2001 From: Alexander Kanevskiy Date: Thu, 24 May 2018 18:33:40 +0300 Subject: [PATCH 231/307] UX improvement for preflight check for external etcd client certificates By using same preflight check label for all etcd client certificates, it will allow user to use single identifier, and as result shorter command line arguments to kubeadm, in case multiple issues found with those files. Fixes: kubernetes/kubeadm#834 --- cmd/kubeadm/app/preflight/checks.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 8b3c5cb6a98..1b107e36649 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -892,13 +892,13 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi if cfg.Etcd.External != nil { // Only check etcd version when external endpoints are specified if cfg.Etcd.External.CAFile != "" { - checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.CAFile}) + checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.CAFile, Label: "ExternalEtcdClientCertificates"}) } if cfg.Etcd.External.CertFile != "" { - checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.CertFile}) + checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.CertFile, Label: "ExternalEtcdClientCertificates"}) } if cfg.Etcd.External.KeyFile != "" { - checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.KeyFile}) + checks = append(checks, FileExistingCheck{Path: cfg.Etcd.External.KeyFile, Label: "ExternalEtcdClientCertificates"}) } checks = append(checks, ExternalEtcdVersionCheck{Etcd: cfg.Etcd}) } From a4e0659815ad69da425a66f362919112568d93d4 Mon Sep 17 00:00:00 2001 From: "Rostislav M. Georgiev" Date: Mon, 28 May 2018 15:20:03 +0300 Subject: [PATCH 232/307] kubeadm: Use loadPodSpecFromFile instead of LoadPodFromFile Implement and use loadPodSpecFromFile which loads and returns PodSpec object from YAML or JSON file. Use this function in the places where LoadPodFromFile is used to load Pod object and then return the PodSpec portion of it. This also removes the dependency on //pkg/volume/util and its dependencies (thus, making kubeadm more lean). Signed-off-by: Rostislav M. Georgiev --- cmd/kubeadm/app/phases/selfhosting/BUILD | 4 +-- .../app/phases/selfhosting/selfhosting.go | 29 ++++++++++++++++--- .../phases/selfhosting/selfhosting_test.go | 18 ++++++++---- 3 files changed, 40 insertions(+), 11 deletions(-) diff --git a/cmd/kubeadm/app/phases/selfhosting/BUILD b/cmd/kubeadm/app/phases/selfhosting/BUILD index 8ca10bd1bb3..4503f0c71cc 100644 --- a/cmd/kubeadm/app/phases/selfhosting/BUILD +++ b/cmd/kubeadm/app/phases/selfhosting/BUILD @@ -17,7 +17,6 @@ go_test( deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", - "//pkg/volume/util:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", ], @@ -37,12 +36,13 @@ go_library( "//cmd/kubeadm/app/features:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", - "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go index 4993ba0a74c..3d20c959dd8 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go @@ -18,6 +18,7 @@ package selfhosting import ( "fmt" + "io/ioutil" "os" "time" @@ -26,12 +27,13 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" clientset "k8s.io/client-go/kubernetes" + clientscheme "k8s.io/client-go/kubernetes/scheme" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - volumeutil "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -85,12 +87,11 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea continue } - // Load the Static Pod file in order to be able to create a self-hosted variant of that file - pod, err := volumeutil.LoadPodFromFile(manifestPath) + // Load the Static Pod spec in order to be able to create a self-hosted variant of that file + podSpec, err := loadPodSpecFromFile(manifestPath) if err != nil { return err } - podSpec := &pod.Spec // Build a DaemonSet object from the loaded PodSpec ds := BuildDaemonSet(componentName, podSpec, mutators) @@ -174,3 +175,23 @@ func BuildSelfhostedComponentLabels(component string) map[string]string { func BuildSelfHostedComponentLabelQuery(componentName string) string { return fmt.Sprintf("k8s-app=%s", kubeadmconstants.AddSelfHostedPrefix(componentName)) } + +func loadPodSpecFromFile(filePath string) (*v1.PodSpec, error) { + podDef, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file path %s: %+v", filePath, err) + } + + if len(podDef) == 0 { + return nil, fmt.Errorf("file was empty: %s", filePath) + } + + codec := clientscheme.Codecs.UniversalDecoder() + pod := &v1.Pod{} + + if err = runtime.DecodeInto(codec, podDef, pod); err != nil { + return nil, fmt.Errorf("failed decoding pod: %v", err) + } + + return &pod.Spec, nil +} diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go index 5b30899d77b..8d7e757f675 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting_test.go @@ -26,7 +26,6 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util" - volumeutil "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -494,11 +493,10 @@ func TestBuildDaemonSet(t *testing.T) { } defer os.Remove(tempFile) - pod, err := volumeutil.LoadPodFromFile(tempFile) + podSpec, err := loadPodSpecFromFile(tempFile) if err != nil { - t.Fatalf("couldn't load the specified Pod") + t.Fatalf("couldn't load the specified Pod Spec") } - podSpec := &pod.Spec ds := BuildDaemonSet(rt.component, podSpec, GetDefaultMutators()) dsBytes, err := util.MarshalToYaml(ds, apps.SchemeGroupVersion) @@ -517,6 +515,11 @@ func TestLoadPodSpecFromFile(t *testing.T) { content string expectError bool }{ + { + // No content + content: "", + expectError: true, + }, { // Good YAML content: ` @@ -570,11 +573,16 @@ spec: } defer os.Remove(tempFile) - _, err = volumeutil.LoadPodFromFile(tempFile) + _, err = loadPodSpecFromFile(tempFile) if (err != nil) != rt.expectError { t.Errorf("failed TestLoadPodSpecFromFile:\nexpected error:\n%t\nsaw:\n%v", rt.expectError, err) } } + + _, err := loadPodSpecFromFile("") + if err == nil { + t.Error("unexpected success: loadPodSpecFromFile should return error when no file is given") + } } func createTempFileWithContent(content []byte) (string, error) { From 23bf2246fe0b356fea5650da4c056153bd4a4486 Mon Sep 17 00:00:00 2001 From: wojtekt Date: Mon, 28 May 2018 14:27:31 +0200 Subject: [PATCH 233/307] Fix GKE Regional Clusters upgrade tests --- test/e2e/framework/nodes_util.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index 4212899e74f..a58a6e62859 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -205,16 +205,7 @@ func NodeUpgrade(f *Framework, v string, img string) error { if err != nil { return err } - - // Wait for it to complete and validate nodes are healthy. - // - // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in - // GKE; the operation shouldn't return until they all are. - Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout) - if _, err := CheckNodesReady(f.ClientSet, TestContext.CloudConfig.NumNodes, RestartNodeReadyAgainTimeout); err != nil { - return err - } - return nil + return waitForNodesReadyAfterUpgrade(f) } // TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default. @@ -223,9 +214,20 @@ func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, en if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil { return err } + return waitForNodesReadyAfterUpgrade(f) +} + +func waitForNodesReadyAfterUpgrade(f *Framework) error { // Wait for it to complete and validate nodes are healthy. - Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout) - if _, err := CheckNodesReady(f.ClientSet, TestContext.CloudConfig.NumNodes, RestartNodeReadyAgainTimeout); err != nil { + // + // TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in + // GKE; the operation shouldn't return until they all are. + numNodes, err := NumberOfRegisteredNodes(f.ClientSet) + if err != nil { + return fmt.Errorf("couldn't detect number of nodes") + } + Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes) + if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil { return err } return nil From c85e69aeb96801a16ee8b60dc75b8ca31054204f Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 24 May 2018 09:33:36 -0400 Subject: [PATCH 234/307] remove unnecessary factory delegation for RESTClientGetter method --- pkg/kubectl/cmd/annotate.go | 2 +- pkg/kubectl/cmd/annotate_test.go | 15 ++-- pkg/kubectl/cmd/apply.go | 2 +- pkg/kubectl/cmd/apply_set_last_applied.go | 2 +- pkg/kubectl/cmd/apply_test.go | 42 ++++------- pkg/kubectl/cmd/apply_view_last_applied.go | 2 +- pkg/kubectl/cmd/attach.go | 2 +- pkg/kubectl/cmd/attach_test.go | 9 +-- pkg/kubectl/cmd/auth/cani.go | 2 +- pkg/kubectl/cmd/auth/cani_test.go | 3 +- pkg/kubectl/cmd/auth/reconcile.go | 2 +- pkg/kubectl/cmd/autoscale.go | 2 +- pkg/kubectl/cmd/clusterinfo_dump.go | 2 +- pkg/kubectl/cmd/convert.go | 2 +- pkg/kubectl/cmd/convert_test.go | 3 +- pkg/kubectl/cmd/cp.go | 2 +- pkg/kubectl/cmd/cp_test.go | 3 +- pkg/kubectl/cmd/create/create.go | 4 +- .../cmd/create/create_clusterrole_test.go | 7 +- .../create/create_clusterrolebinding_test.go | 3 +- .../cmd/create/create_configmap_test.go | 3 +- .../cmd/create/create_deployment_test.go | 6 +- pkg/kubectl/cmd/create/create_job.go | 2 +- pkg/kubectl/cmd/create/create_pdb_test.go | 3 +- pkg/kubectl/cmd/create/create_quota_test.go | 4 +- pkg/kubectl/cmd/create/create_role.go | 2 +- pkg/kubectl/cmd/create/create_role_test.go | 10 +-- .../cmd/create/create_rolebinding_test.go | 3 +- pkg/kubectl/cmd/create/create_secret_test.go | 6 +- pkg/kubectl/cmd/create/create_service_test.go | 9 +-- .../cmd/create/create_serviceaccount_test.go | 3 +- pkg/kubectl/cmd/create/create_test.go | 9 +-- pkg/kubectl/cmd/delete.go | 2 +- pkg/kubectl/cmd/delete_test.go | 45 ++++-------- pkg/kubectl/cmd/describe.go | 2 +- pkg/kubectl/cmd/describe_test.go | 17 ++--- pkg/kubectl/cmd/diff.go | 4 +- pkg/kubectl/cmd/drain.go | 2 +- pkg/kubectl/cmd/edit_test.go | 6 +- pkg/kubectl/cmd/exec.go | 2 +- pkg/kubectl/cmd/exec_test.go | 6 +- pkg/kubectl/cmd/expose.go | 2 +- pkg/kubectl/cmd/expose_test.go | 3 +- pkg/kubectl/cmd/get/get.go | 2 +- pkg/kubectl/cmd/get/get_test.go | 69 +++++++------------ pkg/kubectl/cmd/label.go | 2 +- pkg/kubectl/cmd/label_test.go | 12 ++-- pkg/kubectl/cmd/logs.go | 2 +- pkg/kubectl/cmd/logs_test.go | 4 +- pkg/kubectl/cmd/patch.go | 2 +- pkg/kubectl/cmd/patch_test.go | 12 ++-- pkg/kubectl/cmd/plugin.go | 2 +- pkg/kubectl/cmd/portforward.go | 2 +- pkg/kubectl/cmd/portforward_test.go | 3 +- pkg/kubectl/cmd/replace.go | 2 +- pkg/kubectl/cmd/replace_test.go | 12 ++-- pkg/kubectl/cmd/rollingupdate.go | 2 +- pkg/kubectl/cmd/rollout/rollout_history.go | 2 +- pkg/kubectl/cmd/rollout/rollout_pause.go | 2 +- pkg/kubectl/cmd/rollout/rollout_pause_test.go | 3 +- pkg/kubectl/cmd/rollout/rollout_resume.go | 2 +- pkg/kubectl/cmd/rollout/rollout_status.go | 2 +- pkg/kubectl/cmd/rollout/rollout_undo.go | 2 +- pkg/kubectl/cmd/run.go | 2 +- pkg/kubectl/cmd/run_test.go | 6 +- pkg/kubectl/cmd/scale.go | 2 +- pkg/kubectl/cmd/set/set_env.go | 2 +- pkg/kubectl/cmd/set/set_env_test.go | 14 ++-- pkg/kubectl/cmd/set/set_image.go | 2 +- pkg/kubectl/cmd/set/set_image_test.go | 11 ++- pkg/kubectl/cmd/set/set_resources.go | 2 +- pkg/kubectl/cmd/set/set_resources_test.go | 11 ++- pkg/kubectl/cmd/set/set_selector.go | 2 +- pkg/kubectl/cmd/set/set_selector_test.go | 3 +- pkg/kubectl/cmd/set/set_serviceaccount.go | 2 +- .../cmd/set/set_serviceaccount_test.go | 14 ++-- pkg/kubectl/cmd/set/set_subject.go | 2 +- pkg/kubectl/cmd/set/set_subject_test.go | 4 +- pkg/kubectl/cmd/taint.go | 2 +- pkg/kubectl/cmd/testing/BUILD | 1 - pkg/kubectl/cmd/testing/fake.go | 28 +++----- pkg/kubectl/cmd/top_node_test.go | 21 ++---- pkg/kubectl/cmd/top_pod.go | 2 +- pkg/kubectl/cmd/top_pod_test.go | 9 +-- pkg/kubectl/cmd/util/editor/editoptions.go | 2 +- pkg/kubectl/cmd/util/factory.go | 5 -- pkg/kubectl/cmd/util/factory_client_access.go | 4 -- pkg/kubectl/genericclioptions/BUILD | 1 + .../genericclioptions/config_flags_fake.go | 31 +++++++++ 89 files changed, 228 insertions(+), 360 deletions(-) diff --git a/pkg/kubectl/cmd/annotate.go b/pkg/kubectl/cmd/annotate.go index 20e12bd2b3f..7df91f9b74a 100644 --- a/pkg/kubectl/cmd/annotate.go +++ b/pkg/kubectl/cmd/annotate.go @@ -177,7 +177,7 @@ func (o *AnnotateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ return printer.PrintObj(obj, out) } - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/annotate_test.go b/pkg/kubectl/cmd/annotate_test.go index f54fcf0ff11..172eb23309c 100644 --- a/pkg/kubectl/cmd/annotate_test.go +++ b/pkg/kubectl/cmd/annotate_test.go @@ -418,10 +418,9 @@ func TestAnnotateErrors(t *testing.T) { for k, testCase := range testCases { t.Run(k, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() iostreams, _, bufOut, bufErr := genericclioptions.NewTestIOStreams() @@ -453,7 +452,7 @@ func TestAnnotateErrors(t *testing.T) { func TestAnnotateObject(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -485,7 +484,6 @@ func TestAnnotateObject(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() iostreams, _, bufOut, _ := genericclioptions.NewTestIOStreams() @@ -507,7 +505,7 @@ func TestAnnotateObject(t *testing.T) { func TestAnnotateObjectFromFile(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -539,7 +537,6 @@ func TestAnnotateObjectFromFile(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() iostreams, _, bufOut, _ := genericclioptions.NewTestIOStreams() @@ -560,7 +557,7 @@ func TestAnnotateObjectFromFile(t *testing.T) { } func TestAnnotateLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -571,7 +568,6 @@ func TestAnnotateLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() iostreams, _, _, _ := genericclioptions.NewTestIOStreams() @@ -594,7 +590,7 @@ func TestAnnotateLocal(t *testing.T) { func TestAnnotateMultipleObjects(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -627,7 +623,6 @@ func TestAnnotateMultipleObjects(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() iostreams, _, _, _ := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index 18819353753..650451120ed 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -230,7 +230,7 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/apply_set_last_applied.go b/pkg/kubectl/cmd/apply_set_last_applied.go index 3d92e6b8bce..809515d2442 100644 --- a/pkg/kubectl/cmd/apply_set_last_applied.go +++ b/pkg/kubectl/cmd/apply_set_last_applied.go @@ -118,7 +118,7 @@ func (o *SetLastAppliedOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) o.shortOutput = o.output == "name" var err error - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/apply_test.go b/pkg/kubectl/cmd/apply_test.go index abc4df764c0..81f7464219d 100644 --- a/pkg/kubectl/cmd/apply_test.go +++ b/pkg/kubectl/cmd/apply_test.go @@ -273,7 +273,7 @@ func TestRunApplyPrintsValidObjectList(t *testing.T) { cmBytes := readConfigMapList(t, filenameCM) pathCM := "/namespaces/test/configmaps" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -292,7 +292,6 @@ func TestRunApplyPrintsValidObjectList(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -393,7 +392,7 @@ func TestRunApplyViewLastApplied(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -419,7 +418,6 @@ func TestRunApplyViewLastApplied(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() cmdutil.BehaviorOnFatal(func(str string, code int) { @@ -453,7 +451,7 @@ func TestApplyObjectWithoutAnnotation(t *testing.T) { nameRC, rcBytes := readReplicationController(t, filenameRC) pathRC := "/namespaces/test/replicationcontrollers/" + nameRC - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -472,7 +470,6 @@ func TestApplyObjectWithoutAnnotation(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -499,7 +496,7 @@ func TestApplyObject(t *testing.T) { for _, fn := range testingOpenAPISchemaFns { t.Run("test apply when a local object is specified", func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -520,7 +517,6 @@ func TestApplyObject(t *testing.T) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -564,7 +560,7 @@ func TestApplyObjectOutput(t *testing.T) { for _, fn := range testingOpenAPISchemaFns { t.Run("test apply returns correct output", func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -585,7 +581,6 @@ func TestApplyObjectOutput(t *testing.T) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -617,7 +612,7 @@ func TestApplyRetry(t *testing.T) { firstPatch := true retry := false getCount := 0 - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -647,7 +642,6 @@ func TestApplyRetry(t *testing.T) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -677,7 +671,7 @@ func TestApplyNonExistObject(t *testing.T) { pathRC := "/namespaces/test/replicationcontrollers" pathNameRC := pathRC + "/" + nameRC - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -697,7 +691,6 @@ func TestApplyNonExistObject(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -723,7 +716,7 @@ func TestApplyEmptyPatch(t *testing.T) { var body []byte - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -750,7 +743,6 @@ func TestApplyEmptyPatch(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() // 1. apply non exist object @@ -797,7 +789,7 @@ func testApplyMultipleObjects(t *testing.T, asList bool) { for _, fn := range testingOpenAPISchemaFns { t.Run("test apply on multiple objects", func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -825,7 +817,6 @@ func testApplyMultipleObjects(t *testing.T, asList bool) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -884,7 +875,7 @@ func TestApplyNULLPreservation(t *testing.T) { for _, fn := range testingOpenAPISchemaFns { t.Run("test apply preserves NULL fields", func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -926,7 +917,6 @@ func TestApplyNULLPreservation(t *testing.T) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -960,7 +950,7 @@ func TestUnstructuredApply(t *testing.T) { for _, fn := range testingOpenAPISchemaFns { t.Run("test apply works correctly with unstructured objects", func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -993,7 +983,6 @@ func TestUnstructuredApply(t *testing.T) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -1029,7 +1018,7 @@ func TestUnstructuredIdempotentApply(t *testing.T) { for _, fn := range testingOpenAPISchemaFns { t.Run("test repeated apply operations on an unstructured object", func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -1059,7 +1048,6 @@ func TestUnstructuredIdempotentApply(t *testing.T) { }), } tf.OpenAPISchemaFunc = fn - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() @@ -1131,7 +1119,7 @@ func TestRunApplySetLastApplied(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -1161,7 +1149,6 @@ func TestRunApplySetLastApplied(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() cmdutil.BehaviorOnFatal(func(str string, code int) { @@ -1227,7 +1214,7 @@ func TestForceApply(t *testing.T) { deleted := false isScaledDownToZero := false counts := map[string]int{} - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.ClientConfigVal = defaultClientConfig() @@ -1312,7 +1299,6 @@ func TestForceApply(t *testing.T) { tf.OpenAPISchemaFunc = fn tf.Client = tf.UnstructuredClient tf.ClientConfigVal = &restclient.Config{} - tf.Namespace = "test" ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdApply("kubectl", tf, ioStreams) diff --git a/pkg/kubectl/cmd/apply_view_last_applied.go b/pkg/kubectl/cmd/apply_view_last_applied.go index ce5c7722e19..5fa5cd12c2a 100644 --- a/pkg/kubectl/cmd/apply_view_last_applied.go +++ b/pkg/kubectl/cmd/apply_view_last_applied.go @@ -91,7 +91,7 @@ func NewCmdApplyViewLastApplied(f cmdutil.Factory, ioStreams genericclioptions.I } func (o *ViewLastAppliedOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args []string) error { - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go index 2195eee1ced..88ff4f7d415 100644 --- a/pkg/kubectl/cmd/attach.go +++ b/pkg/kubectl/cmd/attach.go @@ -135,7 +135,7 @@ func (p *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn [ return cmdutil.UsageErrorf(cmd, "expected POD, TYPE/NAME, or TYPE NAME, (at most 2 arguments) saw %d: %v", len(argsIn), argsIn) } - namespace, _, err := f.DefaultNamespace() + namespace, _, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/attach_test.go b/pkg/kubectl/cmd/attach_test.go index 55e0aad17ed..007df0dccdf 100644 --- a/pkg/kubectl/cmd/attach_test.go +++ b/pkg/kubectl/cmd/attach_test.go @@ -140,7 +140,7 @@ func TestPodAndContainerAttach(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -156,7 +156,6 @@ func TestPodAndContainerAttach(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() cmd := &cobra.Command{} @@ -224,7 +223,7 @@ func TestAttach(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -247,7 +246,6 @@ func TestAttach(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs, GroupVersion: &schema.GroupVersion{Version: test.version}}} remoteAttach := &fakeRemoteAttach{} if test.remoteAttachErr { @@ -312,7 +310,7 @@ func TestAttachWarnings(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -335,7 +333,6 @@ func TestAttachWarnings(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{APIPath: "/api", ContentConfig: restclient.ContentConfig{NegotiatedSerializer: legacyscheme.Codecs, GroupVersion: &schema.GroupVersion{Version: test.version}}} streams, _, _, bufErr := genericclioptions.NewTestIOStreams() ex := &fakeRemoteAttach{} diff --git a/pkg/kubectl/cmd/auth/cani.go b/pkg/kubectl/cmd/auth/cani.go index 3d771af8703..f7e5dac5df5 100644 --- a/pkg/kubectl/cmd/auth/cani.go +++ b/pkg/kubectl/cmd/auth/cani.go @@ -146,7 +146,7 @@ func (o *CanIOptions) Complete(f cmdutil.Factory, args []string) error { o.Namespace = "" if !o.AllNamespaces { - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/auth/cani_test.go b/pkg/kubectl/cmd/auth/cani_test.go index 84dc9826868..8ce36cdd680 100644 --- a/pkg/kubectl/cmd/auth/cani_test.go +++ b/pkg/kubectl/cmd/auth/cani_test.go @@ -121,7 +121,7 @@ func TestRunAccessCheck(t *testing.T) { test.o.Out = ioutil.Discard test.o.ErrOut = ioutil.Discard - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -157,7 +157,6 @@ func TestRunAccessCheck(t *testing.T) { test.serverErr }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}} if err := test.o.Complete(tf, test.args); err != nil { diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index 555244ca61f..6237c1ba7dd 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -97,7 +97,7 @@ func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args return errors.New("no arguments are allowed") } - namespace, enforceNamespace, err := f.DefaultNamespace() + namespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index 7c1875cf4d2..e2fc589982f 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -165,7 +165,7 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args } } - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/clusterinfo_dump.go b/pkg/kubectl/cmd/clusterinfo_dump.go index 0c3af8d4974..9e55ca45d77 100644 --- a/pkg/kubectl/cmd/clusterinfo_dump.go +++ b/pkg/kubectl/cmd/clusterinfo_dump.go @@ -134,7 +134,7 @@ func (o *ClusterInfoDumpOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) if err != nil { return err } - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/convert.go b/pkg/kubectl/cmd/convert.go index ec29cdca15a..c22c2990dad 100644 --- a/pkg/kubectl/cmd/convert.go +++ b/pkg/kubectl/cmd/convert.go @@ -139,7 +139,7 @@ func (o *ConvertOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) (err er o.builder.Schema(schema) } - cmdNamespace, _, err := f.DefaultNamespace() + cmdNamespace, _, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/convert_test.go b/pkg/kubectl/cmd/convert_test.go index 72ff0755ee5..8f3e8cf85e4 100644 --- a/pkg/kubectl/cmd/convert_test.go +++ b/pkg/kubectl/cmd/convert_test.go @@ -95,7 +95,7 @@ func TestConvertObject(t *testing.T) { for _, tc := range testcases { for _, field := range tc.fields { t.Run(fmt.Sprintf("%s %s", tc.name, field), func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -104,7 +104,6 @@ func TestConvertObject(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" buf := bytes.NewBuffer([]byte{}) cmd := NewCmdConvert(tf, genericclioptions.IOStreams{Out: buf, ErrOut: buf}) diff --git a/pkg/kubectl/cmd/cp.go b/pkg/kubectl/cmd/cp.go index ff486853baa..b7242f5abd5 100644 --- a/pkg/kubectl/cmd/cp.go +++ b/pkg/kubectl/cmd/cp.go @@ -144,7 +144,7 @@ func extractFileSpec(arg string) (fileSpec, error) { func (o *CopyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/cp_test.go b/pkg/kubectl/cmd/cp_test.go index 925383a7260..656c53ea97a 100644 --- a/pkg/kubectl/cmd/cp_test.go +++ b/pkg/kubectl/cmd/cp_test.go @@ -514,8 +514,7 @@ func TestClean(t *testing.T) { } func TestCopyToPod(t *testing.T) { - tf := cmdtesting.NewTestFactory() - tf.Namespace = "test" + tf := cmdtesting.NewTestFactory().WithNamespace("test") ns := legacyscheme.Codecs codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) diff --git a/pkg/kubectl/cmd/create/create.go b/pkg/kubectl/cmd/create/create.go index 1be7fc47562..556e6d15afa 100644 --- a/pkg/kubectl/cmd/create/create.go +++ b/pkg/kubectl/cmd/create/create.go @@ -216,7 +216,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { return err } - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } @@ -386,7 +386,7 @@ func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command return printer.PrintObj(obj, out) } - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/create/create_clusterrole_test.go b/pkg/kubectl/cmd/create/create_clusterrole_test.go index 4dc876f9a9e..647c0865751 100644 --- a/pkg/kubectl/cmd/create/create_clusterrole_test.go +++ b/pkg/kubectl/cmd/create/create_clusterrole_test.go @@ -36,10 +36,9 @@ import ( func TestCreateClusterRole(t *testing.T) { clusterRoleName := "my-cluster-role" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{} tf.ClientConfigVal = defaultClientConfig() @@ -178,11 +177,9 @@ func TestCreateClusterRole(t *testing.T) { } func TestClusterRoleValidate(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" - tests := map[string]struct { clusterRoleOptions *CreateClusterRoleOptions expectErr bool diff --git a/pkg/kubectl/cmd/create/create_clusterrolebinding_test.go b/pkg/kubectl/cmd/create/create_clusterrolebinding_test.go index 206d6da73b2..69f1559aac9 100644 --- a/pkg/kubectl/cmd/create/create_clusterrolebinding_test.go +++ b/pkg/kubectl/cmd/create/create_clusterrolebinding_test.go @@ -68,7 +68,7 @@ func TestCreateClusterRoleBinding(t *testing.T) { }, } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -77,7 +77,6 @@ func TestCreateClusterRoleBinding(t *testing.T) { encoder := ns.EncoderForVersion(info.Serializer, groupVersion) decoder := ns.DecoderToVersion(info.Serializer, groupVersion) - tf.Namespace = "test" tf.Client = &ClusterRoleBindingRESTClient{ RESTClient: &fake.RESTClient{ NegotiatedSerializer: ns, diff --git a/pkg/kubectl/cmd/create/create_configmap_test.go b/pkg/kubectl/cmd/create/create_configmap_test.go index 65aac513988..8ee9501bcbf 100644 --- a/pkg/kubectl/cmd/create/create_configmap_test.go +++ b/pkg/kubectl/cmd/create/create_configmap_test.go @@ -36,7 +36,7 @@ import ( func TestCreateConfigMap(t *testing.T) { configMap := &v1.ConfigMap{} configMap.Name = "my-configmap" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -55,7 +55,6 @@ func TestCreateConfigMap(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateConfigMap(tf, ioStreams) cmd.Flags().Set("output", "name") diff --git a/pkg/kubectl/cmd/create/create_deployment_test.go b/pkg/kubectl/cmd/create/create_deployment_test.go index af1e48f081e..bd42fcfedd0 100644 --- a/pkg/kubectl/cmd/create/create_deployment_test.go +++ b/pkg/kubectl/cmd/create/create_deployment_test.go @@ -89,7 +89,7 @@ func Test_generatorFromName(t *testing.T) { func TestCreateDeployment(t *testing.T) { depName := "jonny-dep" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -104,7 +104,6 @@ func TestCreateDeployment(t *testing.T) { }), } tf.ClientConfigVal = &restclient.Config{} - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateDeployment(tf, ioStreams) @@ -120,7 +119,7 @@ func TestCreateDeployment(t *testing.T) { func TestCreateDeploymentNoImage(t *testing.T) { depName := "jonny-dep" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -135,7 +134,6 @@ func TestCreateDeploymentNoImage(t *testing.T) { }), } tf.ClientConfigVal = &restclient.Config{} - tf.Namespace = "test" ioStreams := genericclioptions.NewTestIOStreamsDiscard() cmd := NewCmdCreateDeployment(tf, ioStreams) diff --git a/pkg/kubectl/cmd/create/create_job.go b/pkg/kubectl/cmd/create/create_job.go index 9980fa2866d..d4f024e470b 100644 --- a/pkg/kubectl/cmd/create/create_job.go +++ b/pkg/kubectl/cmd/create/create_job.go @@ -100,7 +100,7 @@ func (o *CreateJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args } o.Name = args[0] - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/create/create_pdb_test.go b/pkg/kubectl/cmd/create/create_pdb_test.go index 06a7fd80c80..01d6ee99e8d 100644 --- a/pkg/kubectl/cmd/create/create_pdb_test.go +++ b/pkg/kubectl/cmd/create/create_pdb_test.go @@ -32,7 +32,7 @@ import ( func TestCreatePdb(t *testing.T) { pdbName := "my-pdb" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -48,7 +48,6 @@ func TestCreatePdb(t *testing.T) { }), } tf.ClientConfigVal = &restclient.Config{} - tf.Namespace = "test" outputFormat := "name" diff --git a/pkg/kubectl/cmd/create/create_quota_test.go b/pkg/kubectl/cmd/create/create_quota_test.go index c9bf1333eb5..c780beafefd 100644 --- a/pkg/kubectl/cmd/create/create_quota_test.go +++ b/pkg/kubectl/cmd/create/create_quota_test.go @@ -51,11 +51,9 @@ func TestCreateQuota(t *testing.T) { } for name, test := range tests { t.Run(name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" - ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateQuota(tf, ioStreams) cmd.Flags().Parse(test.flags) diff --git a/pkg/kubectl/cmd/create/create_role.go b/pkg/kubectl/cmd/create/create_role.go index 53243761dc0..d01014b45f1 100644 --- a/pkg/kubectl/cmd/create/create_role.go +++ b/pkg/kubectl/cmd/create/create_role.go @@ -226,7 +226,7 @@ func (o *CreateRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args return printer.PrintObj(obj, o.Out) } - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/create/create_role_test.go b/pkg/kubectl/cmd/create/create_role_test.go index c2284c3ec8a..4551bea8541 100644 --- a/pkg/kubectl/cmd/create/create_role_test.go +++ b/pkg/kubectl/cmd/create/create_role_test.go @@ -34,10 +34,9 @@ import ( func TestCreateRole(t *testing.T) { roleName := "my-role" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{} tf.ClientConfigVal = defaultClientConfig() @@ -152,11 +151,9 @@ func TestCreateRole(t *testing.T) { } func TestValidate(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" - tests := map[string]struct { roleOptions *CreateRoleOptions expectErr bool @@ -357,10 +354,9 @@ func TestValidate(t *testing.T) { func TestComplete(t *testing.T) { roleName := "my-role" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{} tf.ClientConfigVal = defaultClientConfig() diff --git a/pkg/kubectl/cmd/create/create_rolebinding_test.go b/pkg/kubectl/cmd/create/create_rolebinding_test.go index f57b2351204..f9cb82518f7 100644 --- a/pkg/kubectl/cmd/create/create_rolebinding_test.go +++ b/pkg/kubectl/cmd/create/create_rolebinding_test.go @@ -70,7 +70,7 @@ func TestCreateRoleBinding(t *testing.T) { }, } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -79,7 +79,6 @@ func TestCreateRoleBinding(t *testing.T) { encoder := ns.EncoderForVersion(info.Serializer, groupVersion) decoder := ns.DecoderToVersion(info.Serializer, groupVersion) - tf.Namespace = "test" tf.Client = &RoleBindingRESTClient{ RESTClient: &fake.RESTClient{ NegotiatedSerializer: ns, diff --git a/pkg/kubectl/cmd/create/create_secret_test.go b/pkg/kubectl/cmd/create/create_secret_test.go index 21b41a8520c..6bdf4475966 100644 --- a/pkg/kubectl/cmd/create/create_secret_test.go +++ b/pkg/kubectl/cmd/create/create_secret_test.go @@ -37,7 +37,7 @@ func TestCreateSecretGeneric(t *testing.T) { }, } secretObject.Name = "my-secret" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -56,7 +56,6 @@ func TestCreateSecretGeneric(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateSecretGeneric(tf, ioStreams) cmd.Flags().Set("output", "name") @@ -72,7 +71,7 @@ func TestCreateSecretGeneric(t *testing.T) { func TestCreateSecretDockerRegistry(t *testing.T) { secretObject := &v1.Secret{} secretObject.Name = "my-secret" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) ns := legacyscheme.Codecs @@ -89,7 +88,6 @@ func TestCreateSecretDockerRegistry(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateSecretDockerRegistry(tf, ioStreams) cmd.Flags().Set("docker-username", "test-user") diff --git a/pkg/kubectl/cmd/create/create_service_test.go b/pkg/kubectl/cmd/create/create_service_test.go index 9fd018f1852..eb622922214 100644 --- a/pkg/kubectl/cmd/create/create_service_test.go +++ b/pkg/kubectl/cmd/create/create_service_test.go @@ -32,7 +32,7 @@ import ( func TestCreateService(t *testing.T) { service := &v1.Service{} service.Name = "my-service" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -51,7 +51,6 @@ func TestCreateService(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateServiceClusterIP(tf, ioStreams) cmd.Flags().Set("output", "name") @@ -66,7 +65,7 @@ func TestCreateService(t *testing.T) { func TestCreateServiceNodePort(t *testing.T) { service := &v1.Service{} service.Name = "my-node-port-service" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -85,7 +84,6 @@ func TestCreateServiceNodePort(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateServiceNodePort(tf, ioStreams) cmd.Flags().Set("output", "name") @@ -100,7 +98,7 @@ func TestCreateServiceNodePort(t *testing.T) { func TestCreateServiceExternalName(t *testing.T) { service := &v1.Service{} service.Name = "my-external-name-service" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -119,7 +117,6 @@ func TestCreateServiceExternalName(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateServiceExternalName(tf, ioStreams) cmd.Flags().Set("output", "name") diff --git a/pkg/kubectl/cmd/create/create_serviceaccount_test.go b/pkg/kubectl/cmd/create/create_serviceaccount_test.go index 2f62c05fd3c..06f9e35642a 100644 --- a/pkg/kubectl/cmd/create/create_serviceaccount_test.go +++ b/pkg/kubectl/cmd/create/create_serviceaccount_test.go @@ -32,7 +32,7 @@ import ( func TestCreateServiceAccount(t *testing.T) { serviceAccountObject := &v1.ServiceAccount{} serviceAccountObject.Name = "my-service-account" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -51,7 +51,6 @@ func TestCreateServiceAccount(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreateServiceAccount(tf, ioStreams) cmd.Flags().Set("output", "name") diff --git a/pkg/kubectl/cmd/create/create_test.go b/pkg/kubectl/cmd/create/create_test.go index 607a3bfbd3f..dba513d8b93 100644 --- a/pkg/kubectl/cmd/create/create_test.go +++ b/pkg/kubectl/cmd/create/create_test.go @@ -51,7 +51,7 @@ func TestCreateObject(t *testing.T) { _, _, rc := testData() rc.Items[0].Name = "redis-master-controller" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -69,7 +69,6 @@ func TestCreateObject(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreate(tf, ioStreams) @@ -87,7 +86,7 @@ func TestCreateMultipleObject(t *testing.T) { initTestErrorHandler(t) _, svc, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -107,7 +106,6 @@ func TestCreateMultipleObject(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreate(tf, ioStreams) @@ -127,7 +125,7 @@ func TestCreateDirectory(t *testing.T) { _, _, rc := testData() rc.Items[0].Name = "name" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -145,7 +143,6 @@ func TestCreateDirectory(t *testing.T) { } }), } - tf.Namespace = "test" ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdCreate(tf, ioStreams) diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index e98f0451a98..6446e2a354f 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -146,7 +146,7 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra } func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Command) error { - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index 51fb4a887c7..71e4405f3a2 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -52,7 +52,7 @@ func TestDeleteObjectByTuple(t *testing.T) { initTestErrorHandler(t) _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -77,7 +77,6 @@ func TestDeleteObjectByTuple(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -118,7 +117,7 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { initTestErrorHandler(t) _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -137,7 +136,6 @@ func TestOrphanDependentsInDeleteObject(t *testing.T) { } }), } - tf.Namespace = "test" // DeleteOptions.PropagationPolicy should be Foreground, when cascade is true (default). foregroundPolicy := metav1.DeletePropagationForeground @@ -170,7 +168,7 @@ func TestDeleteNamedObject(t *testing.T) { initTestErrorHandler(t) _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -195,7 +193,6 @@ func TestDeleteNamedObject(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -223,7 +220,7 @@ func TestDeleteObject(t *testing.T) { initTestErrorHandler(t) _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -240,7 +237,6 @@ func TestDeleteObject(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -260,7 +256,7 @@ func TestDeleteObjectGraceZero(t *testing.T) { pods, _, _ := testData() count := 0 - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -288,7 +284,6 @@ func TestDeleteObjectGraceZero(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, errBuf := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -307,7 +302,7 @@ func TestDeleteObjectGraceZero(t *testing.T) { func TestDeleteObjectNotFound(t *testing.T) { initTestErrorHandler(t) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -322,7 +317,6 @@ func TestDeleteObjectNotFound(t *testing.T) { } }), } - tf.Namespace = "test" options := &DeleteOptions{ FilenameOptions: resource.FilenameOptions{ @@ -345,7 +339,7 @@ func TestDeleteObjectNotFound(t *testing.T) { func TestDeleteObjectIgnoreNotFound(t *testing.T) { initTestErrorHandler(t) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -360,7 +354,6 @@ func TestDeleteObjectIgnoreNotFound(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -382,7 +375,7 @@ func TestDeleteAllNotFound(t *testing.T) { svc.Items = append(svc.Items, api.Service{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}) notFoundError := &errors.NewNotFound(api.Resource("services"), "foo").ErrStatus - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -403,7 +396,6 @@ func TestDeleteAllNotFound(t *testing.T) { } }), } - tf.Namespace = "test" // Make sure we can explicitly choose to fail on NotFound errors, even with --all options := &DeleteOptions{ @@ -429,7 +421,7 @@ func TestDeleteAllIgnoreNotFound(t *testing.T) { initTestErrorHandler(t) _, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -454,7 +446,6 @@ func TestDeleteAllIgnoreNotFound(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -472,7 +463,7 @@ func TestDeleteMultipleObject(t *testing.T) { initTestErrorHandler(t) _, svc, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -491,7 +482,6 @@ func TestDeleteMultipleObject(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -510,7 +500,7 @@ func TestDeleteMultipleObjectContinueOnMissing(t *testing.T) { initTestErrorHandler(t) _, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -529,7 +519,6 @@ func TestDeleteMultipleObjectContinueOnMissing(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() options := &DeleteOptions{ @@ -558,7 +547,7 @@ func TestDeleteMultipleObjectContinueOnMissing(t *testing.T) { func TestDeleteMultipleResourcesWithTheSameName(t *testing.T) { initTestErrorHandler(t) _, svc, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -582,7 +571,6 @@ func TestDeleteMultipleResourcesWithTheSameName(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -599,7 +587,7 @@ func TestDeleteDirectory(t *testing.T) { initTestErrorHandler(t) _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -616,7 +604,6 @@ func TestDeleteDirectory(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -634,7 +621,7 @@ func TestDeleteMultipleSelector(t *testing.T) { initTestErrorHandler(t) pods, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -663,7 +650,6 @@ func TestDeleteMultipleSelector(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdDelete(tf, streams) @@ -703,10 +689,9 @@ func TestResourceErrors(t *testing.T) { for k, testCase := range testCases { t.Run(k, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index 848f82100f8..3a0feafdd8f 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -122,7 +122,7 @@ func NewCmdDescribe(parent string, f cmdutil.Factory, streams genericclioptions. func (o *DescribeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/describe_test.go b/pkg/kubectl/cmd/describe_test.go index ecc0fc8c4ed..daac115ed6f 100644 --- a/pkg/kubectl/cmd/describe_test.go +++ b/pkg/kubectl/cmd/describe_test.go @@ -41,7 +41,7 @@ func TestDescribeUnknownSchemaObject(t *testing.T) { }() cmdutil.DescriberFn = d.describerFor - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("non-default") defer tf.Cleanup() _, _, codec := cmdtesting.NewExternalScheme() @@ -52,7 +52,6 @@ func TestDescribeUnknownSchemaObject(t *testing.T) { streams, _, buf, _ := genericclioptions.NewTestIOStreams() - tf.Namespace = "non-default" cmd := NewCmdDescribe("kubectl", tf, streams) cmd.Run(cmd, []string{"type", "foo"}) @@ -82,7 +81,7 @@ func TestDescribeUnknownNamespacedSchemaObject(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, cmdtesting.NewInternalNamespacedType("", "", "foo", "non-default"))}, } - tf.Namespace = "non-default" + tf.WithNamespace("non-default") streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -107,7 +106,7 @@ func TestDescribeObject(t *testing.T) { cmdutil.DescriberFn = d.describerFor _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -123,7 +122,6 @@ func TestDescribeObject(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -149,7 +147,7 @@ func TestDescribeListObjects(t *testing.T) { cmdutil.DescriberFn = d.describerFor pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -160,7 +158,6 @@ func TestDescribeListObjects(t *testing.T) { streams, _, buf, _ := genericclioptions.NewTestIOStreams() - tf.Namespace = "test" cmd := NewCmdDescribe("kubectl", tf, streams) cmd.Run(cmd, []string{"pods"}) if buf.String() != fmt.Sprintf("%s\n\n%s", d.Output, d.Output) { @@ -177,7 +174,7 @@ func TestDescribeObjectShowEvents(t *testing.T) { cmdutil.DescriberFn = d.describerFor pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -186,7 +183,6 @@ func TestDescribeObjectShowEvents(t *testing.T) { Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, } - tf.Namespace = "test" cmd := NewCmdDescribe("kubectl", tf, genericclioptions.NewTestIOStreamsDiscard()) cmd.Flags().Set("show-events", "true") cmd.Run(cmd, []string{"pods"}) @@ -204,7 +200,7 @@ func TestDescribeObjectSkipEvents(t *testing.T) { cmdutil.DescriberFn = d.describerFor pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -213,7 +209,6 @@ func TestDescribeObjectSkipEvents(t *testing.T) { Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, } - tf.Namespace = "test" cmd := NewCmdDescribe("kubectl", tf, genericclioptions.NewTestIOStreamsDiscard()) cmd.Flags().Set("show-events", "false") cmd.Run(cmd, []string{"pods"}) diff --git a/pkg/kubectl/cmd/diff.go b/pkg/kubectl/cmd/diff.go index d513c7ab762..d4cda1261b4 100644 --- a/pkg/kubectl/cmd/diff.go +++ b/pkg/kubectl/cmd/diff.go @@ -407,7 +407,7 @@ func NewDownloader(f cmdutil.Factory) (*Downloader, error) { if err != nil { return nil, err } - d.ns, _, _ = f.DefaultNamespace() + d.ns, _, _ = f.ToRawKubeConfigLoader().Namespace() return &d, nil } @@ -451,7 +451,7 @@ func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, t printer := Printer{} - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 8a6383f9a2e..8c0baf5b5e9 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -263,7 +263,7 @@ func (o *DrainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st o.nodeInfos = []*resource.Info{} - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/edit_test.go b/pkg/kubectl/cmd/edit_test.go index fe5fd28ae44..6820d64e7ad 100644 --- a/pkg/kubectl/cmd/edit_test.go +++ b/pkg/kubectl/cmd/edit_test.go @@ -224,12 +224,8 @@ func TestEdit(t *testing.T) { Client: fake.CreateHTTPClient(reqResp), }, nil } - - if len(testcase.Namespace) > 0 { - tf.Namespace = testcase.Namespace - } + tf.WithNamespace(testcase.Namespace) tf.ClientConfigVal = defaultClientConfig() - tf.CommandVal = "edit test cmd invocation" ioStreams, _, buf, errBuf := genericclioptions.NewTestIOStreams() var cmd *cobra.Command diff --git a/pkg/kubectl/cmd/exec.go b/pkg/kubectl/cmd/exec.go index 1866da37b42..5290b0fd22c 100644 --- a/pkg/kubectl/cmd/exec.go +++ b/pkg/kubectl/cmd/exec.go @@ -166,7 +166,7 @@ func (p *ExecOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, argsIn []s } } - namespace, _, err := f.DefaultNamespace() + namespace, _, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/exec_test.go b/pkg/kubectl/cmd/exec_test.go index 90c0e4be2ee..187c6ff8f8d 100644 --- a/pkg/kubectl/cmd/exec_test.go +++ b/pkg/kubectl/cmd/exec_test.go @@ -132,7 +132,7 @@ func TestPodAndContainer(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() ns := legacyscheme.Codecs @@ -141,7 +141,6 @@ func TestPodAndContainer(t *testing.T) { NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() cmd := &cobra.Command{} @@ -193,7 +192,7 @@ func TestExec(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -212,7 +211,6 @@ func TestExec(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ex := &fakeRemoteExecutor{} if test.execErr { diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index 944af7f770e..f0e309eb74f 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -201,7 +201,7 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e return err } - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/expose_test.go b/pkg/kubectl/cmd/expose_test.go index 548a44ac5cb..7c4fa01e93c 100644 --- a/pkg/kubectl/cmd/expose_test.go +++ b/pkg/kubectl/cmd/expose_test.go @@ -467,7 +467,7 @@ func TestRunExposeService(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace(test.ns) defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -486,7 +486,6 @@ func TestRunExposeService(t *testing.T) { } }), } - tf.Namespace = test.ns ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdExposeService(tf, ioStreams) diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index ad6554c0ef8..ceb2e2de901 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -189,7 +189,7 @@ func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri } var err error - o.Namespace, o.ExplicitNamespace, err = f.DefaultNamespace() + o.Namespace, o.ExplicitNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/get/get_test.go b/pkg/kubectl/cmd/get/get_test.go index bdea31c0671..a6ab8990423 100644 --- a/pkg/kubectl/cmd/get/get_test.go +++ b/pkg/kubectl/cmd/get/get_test.go @@ -172,7 +172,7 @@ func testComponentStatusData() *api.ComponentStatusList { // Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. func TestGetUnknownSchemaObject(t *testing.T) { t.Skip("This test is completely broken. The first thing it does is add the object to the scheme!") - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() _, _, codec := cmdtesting.NewExternalScheme() tf.OpenAPISchemaFunc = openapitesting.CreateOpenAPISchemaFunc(openapiSchemaPath) @@ -190,7 +190,6 @@ func TestGetUnknownSchemaObject(t *testing.T) { Body: objBody(codec, obj), }, } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -225,7 +224,7 @@ func TestGetUnknownSchemaObject(t *testing.T) { // Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. func TestGetSchemaObject(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Version: "v1"}) t.Logf("%v", string(runtime.EncodeOrDie(codec, &api.ReplicationController{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}))) @@ -234,7 +233,6 @@ func TestGetSchemaObject(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.ReplicationController{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})}, } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -249,7 +247,7 @@ func TestGetSchemaObject(t *testing.T) { func TestGetObjectsWithOpenAPIOutputFormatPresent(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -260,7 +258,6 @@ func TestGetObjectsWithOpenAPIOutputFormatPresent(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -306,7 +303,7 @@ func testOpenAPISchemaData() (openapi.Resources, error) { func TestGetObjects(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -314,7 +311,6 @@ func TestGetObjects(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -332,7 +328,7 @@ foo 0/0 0 func TestGetObjectsShowKind(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -340,7 +336,6 @@ func TestGetObjectsShowKind(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -359,7 +354,7 @@ pod/foo 0/0 0 func TestGetObjectsShowLabels(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -367,7 +362,6 @@ func TestGetObjectsShowLabels(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -398,7 +392,7 @@ func TestGetObjectIgnoreNotFound(t *testing.T) { }, } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -416,7 +410,6 @@ func TestGetObjectIgnoreNotFound(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -451,7 +444,7 @@ func TestGetSortedObjects(t *testing.T) { }, } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -459,7 +452,6 @@ func TestGetSortedObjects(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: "v1"}}} streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -483,7 +475,7 @@ c 0/0 0 func TestGetObjectsIdentifiedByFile(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -491,7 +483,6 @@ func TestGetObjectsIdentifiedByFile(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -510,7 +501,7 @@ foo 0/0 0 func TestGetListObjects(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -518,7 +509,6 @@ func TestGetListObjects(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -537,7 +527,7 @@ bar 0/0 0 func TestGetListComponentStatus(t *testing.T) { statuses := testComponentStatusData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -545,7 +535,6 @@ func TestGetListComponentStatus(t *testing.T) { NegotiatedSerializer: unstructuredSerializer, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, statuses)}, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -578,7 +567,7 @@ func TestGetMixedGenericObjects(t *testing.T) { Code: 0, } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -594,7 +583,6 @@ func TestGetMixedGenericObjects(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -628,7 +616,7 @@ func TestGetMixedGenericObjects(t *testing.T) { func TestGetMultipleTypeObjects(t *testing.T) { pods, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -646,7 +634,6 @@ func TestGetMultipleTypeObjects(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -667,7 +654,7 @@ service/baz ClusterIP func TestGetMultipleTypeObjectsAsList(t *testing.T) { pods, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -685,7 +672,6 @@ func TestGetMultipleTypeObjectsAsList(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -767,7 +753,7 @@ func TestGetMultipleTypeObjectsAsList(t *testing.T) { func TestGetMultipleTypeObjectsWithLabelSelector(t *testing.T) { pods, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -788,7 +774,6 @@ func TestGetMultipleTypeObjectsWithLabelSelector(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -811,7 +796,7 @@ service/baz ClusterIP func TestGetMultipleTypeObjectsWithFieldSelector(t *testing.T) { pods, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -832,7 +817,6 @@ func TestGetMultipleTypeObjectsWithFieldSelector(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -860,7 +844,7 @@ func TestGetMultipleTypeObjectsWithDirectReference(t *testing.T) { }, } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -878,7 +862,6 @@ func TestGetMultipleTypeObjectsWithDirectReference(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -969,7 +952,7 @@ func watchTestData() ([]api.Pod, []watch.Event) { func TestWatchLabelSelector(t *testing.T) { pods, events := watchTestData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -997,7 +980,6 @@ func TestWatchLabelSelector(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -1021,7 +1003,7 @@ foo 0/0 0 func TestWatchFieldSelector(t *testing.T) { pods, events := watchTestData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -1049,7 +1031,6 @@ func TestWatchFieldSelector(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -1073,7 +1054,7 @@ foo 0/0 0 func TestWatchResource(t *testing.T) { pods, events := watchTestData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -1095,7 +1076,6 @@ func TestWatchResource(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -1117,7 +1097,7 @@ foo 0/0 0 func TestWatchResourceIdentifiedByFile(t *testing.T) { pods, events := watchTestData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -1139,7 +1119,6 @@ func TestWatchResourceIdentifiedByFile(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -1162,7 +1141,7 @@ foo 0/0 0 func TestWatchOnlyResource(t *testing.T) { pods, events := watchTestData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -1184,7 +1163,6 @@ func TestWatchOnlyResource(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) @@ -1205,7 +1183,7 @@ foo 0/0 0 func TestWatchOnlyList(t *testing.T) { pods, events := watchTestData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -1230,7 +1208,6 @@ func TestWatchOnlyList(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdGet("kubectl", tf, streams) diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index ce22d4af0bf..f89024770d6 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -188,7 +188,7 @@ func (o *LabelOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st return fmt.Errorf("--list and --output may not be specified together") } - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/label_test.go b/pkg/kubectl/cmd/label_test.go index 4bb226fa81d..f0264645e8d 100644 --- a/pkg/kubectl/cmd/label_test.go +++ b/pkg/kubectl/cmd/label_test.go @@ -322,10 +322,9 @@ func TestLabelErrors(t *testing.T) { for k, testCase := range testCases { t.Run(k, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, _, _ := genericclioptions.NewTestIOStreams() @@ -357,7 +356,7 @@ func TestLabelErrors(t *testing.T) { func TestLabelForResourceFromFile(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -388,7 +387,6 @@ func TestLabelForResourceFromFile(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -411,7 +409,7 @@ func TestLabelForResourceFromFile(t *testing.T) { } func TestLabelLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.UnstructuredClient = &fake.RESTClient{ @@ -421,7 +419,6 @@ func TestLabelLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -446,7 +443,7 @@ func TestLabelLocal(t *testing.T) { func TestLabelMultipleObjects(t *testing.T) { pods, _, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -479,7 +476,6 @@ func TestLabelMultipleObjects(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ioStreams, _, buf, _ := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/logs.go b/pkg/kubectl/cmd/logs.go index 71eb390c6a9..9b5e55cf5ab 100644 --- a/pkg/kubectl/cmd/logs.go +++ b/pkg/kubectl/cmd/logs.go @@ -156,7 +156,7 @@ func (o *LogsOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []str return cmdutil.UsageErrorf(cmd, "%s", logsUsageStr) } var err error - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/logs_test.go b/pkg/kubectl/cmd/logs_test.go index e95e67a01fc..7b82432532e 100644 --- a/pkg/kubectl/cmd/logs_test.go +++ b/pkg/kubectl/cmd/logs_test.go @@ -58,7 +58,7 @@ func TestLog(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { logContent := "test log content" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -80,7 +80,6 @@ func TestLog(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() oldLogFn := polymorphichelpers.LogsForObjectFn defer func() { @@ -123,6 +122,7 @@ func testPod() *api.Pod { func TestValidateLogFlags(t *testing.T) { f := cmdtesting.NewTestFactory() defer f.Cleanup() + f.WithNamespace("") tests := []struct { name string diff --git a/pkg/kubectl/cmd/patch.go b/pkg/kubectl/cmd/patch.go index f773632e18c..14fa2c6a510 100644 --- a/pkg/kubectl/cmd/patch.go +++ b/pkg/kubectl/cmd/patch.go @@ -152,7 +152,7 @@ func (o *PatchOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st return o.PrintFlags.ToPrinter() } - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/patch_test.go b/pkg/kubectl/cmd/patch_test.go index 1311f188439..9a6ee38cae2 100644 --- a/pkg/kubectl/cmd/patch_test.go +++ b/pkg/kubectl/cmd/patch_test.go @@ -31,7 +31,7 @@ import ( func TestPatchObject(t *testing.T) { _, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -55,7 +55,6 @@ func TestPatchObject(t *testing.T) { } }), } - tf.Namespace = "test" stream, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdPatch(tf, stream) @@ -73,7 +72,7 @@ func TestPatchObject(t *testing.T) { func TestPatchObjectFromFile(t *testing.T) { _, svc, _ := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -90,7 +89,6 @@ func TestPatchObjectFromFile(t *testing.T) { } }), } - tf.Namespace = "test" stream, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdPatch(tf, stream) @@ -111,7 +109,7 @@ func TestPatchNoop(t *testing.T) { getObject := &svc.Items[0] patchObject := &svc.Items[0] - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -130,7 +128,6 @@ func TestPatchNoop(t *testing.T) { } }), } - tf.Namespace = "test" // Patched { @@ -159,7 +156,7 @@ func TestPatchObjectFromFileOutput(t *testing.T) { } svcCopy.Labels["post-patch"] = "post-patch-value" - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -178,7 +175,6 @@ func TestPatchObjectFromFileOutput(t *testing.T) { } }), } - tf.Namespace = "test" stream, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdPatch(tf, stream) diff --git a/pkg/kubectl/cmd/plugin.go b/pkg/kubectl/cmd/plugin.go index 35c4662a59b..feb35253146 100644 --- a/pkg/kubectl/cmd/plugin.go +++ b/pkg/kubectl/cmd/plugin.go @@ -158,7 +158,7 @@ type factoryAttrsPluginEnvProvider struct { } func (p *factoryAttrsPluginEnvProvider) Env() (plugins.EnvList, error) { - cmdNamespace, _, err := p.factory.DefaultNamespace() + cmdNamespace, _, err := p.factory.ToRawKubeConfigLoader().Namespace() if err != nil { return plugins.EnvList{}, err } diff --git a/pkg/kubectl/cmd/portforward.go b/pkg/kubectl/cmd/portforward.go index b6f73348e26..3baa3bee0b1 100644 --- a/pkg/kubectl/cmd/portforward.go +++ b/pkg/kubectl/cmd/portforward.go @@ -174,7 +174,7 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return cmdutil.UsageErrorf(cmd, "TYPE/NAME and list of ports are required for port-forward") } - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/portforward_test.go b/pkg/kubectl/cmd/portforward_test.go index 87642e1c853..c2ddde7fa98 100644 --- a/pkg/kubectl/cmd/portforward_test.go +++ b/pkg/kubectl/cmd/portforward_test.go @@ -73,7 +73,7 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { var err error - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -94,7 +94,6 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() ff := &fakePortForwarder{} if test.pfErr { diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index 3f210c6a57a..7c94ddf4973 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -179,7 +179,7 @@ func (o *ReplaceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] o.Builder = f.NewBuilder o.BuilderArgs = args - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/replace_test.go b/pkg/kubectl/cmd/replace_test.go index e40bde5148c..13e69d907c6 100644 --- a/pkg/kubectl/cmd/replace_test.go +++ b/pkg/kubectl/cmd/replace_test.go @@ -32,7 +32,7 @@ import ( func TestReplaceObject(t *testing.T) { _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -62,7 +62,6 @@ func TestReplaceObject(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdReplace(tf, streams) @@ -89,7 +88,7 @@ func TestReplaceObject(t *testing.T) { func TestReplaceMultipleObject(t *testing.T) { _, svc, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -133,7 +132,6 @@ func TestReplaceMultipleObject(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdReplace(tf, streams) @@ -160,7 +158,7 @@ func TestReplaceMultipleObject(t *testing.T) { func TestReplaceDirectory(t *testing.T) { _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -191,7 +189,6 @@ func TestReplaceDirectory(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdReplace(tf, streams) @@ -218,7 +215,7 @@ func TestReplaceDirectory(t *testing.T) { func TestForceReplaceObjectNotFound(t *testing.T) { _, _, rc := testData() - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -238,7 +235,6 @@ func TestForceReplaceObjectNotFound(t *testing.T) { } }), } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdReplace(tf, streams) diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index de3e289f4a4..c304fdcaa1f 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -209,7 +209,7 @@ func (o *RollingUpdateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, a } var err error - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/rollout_history.go b/pkg/kubectl/cmd/rollout/rollout_history.go index 5ace22c993f..b438bc30d1d 100644 --- a/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/pkg/kubectl/cmd/rollout/rollout_history.go @@ -74,7 +74,7 @@ func RunHistory(f cmdutil.Factory, cmd *cobra.Command, out io.Writer, args []str return fmt.Errorf("revision must be a positive integer: %v", revision) } - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index 51d8ea3faac..7ab8a851934 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -104,7 +104,7 @@ func (o *PauseConfig) CompletePause(f cmdutil.Factory, cmd *cobra.Command, args o.Pauser = polymorphichelpers.ObjectPauserFn - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/rollout_pause_test.go b/pkg/kubectl/cmd/rollout/rollout_pause_test.go index a40e0327305..965a52163be 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause_test.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause_test.go @@ -40,7 +40,7 @@ var rolloutPauseGroupVersionDecoder = schema.GroupVersion{Group: "extensions", V func TestRolloutPause(t *testing.T) { deploymentName := "deployment/nginx-deployment" ns := legacyscheme.Codecs - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) encoder := ns.EncoderForVersion(info.Serializer, rolloutPauseGroupVersionEncoder) @@ -62,7 +62,6 @@ func TestRolloutPause(t *testing.T) { }, } - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdRolloutPause(tf, streams) diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index 8ca44d8a762..aca0534f8eb 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -102,7 +102,7 @@ func (o *ResumeConfig) CompleteResume(f cmdutil.Factory, cmd *cobra.Command, arg o.Resumer = polymorphichelpers.ObjectResumerFn - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index 3182c4a8f37..e705bd8d163 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -104,7 +104,7 @@ func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error o.Builder = f.NewBuilder() var err error - o.Namespace, o.EnforceNamespace, err = f.DefaultNamespace() + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index 46a66a0dd7b..f2840eebeb9 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -109,7 +109,7 @@ func (o *UndoOptions) CompleteUndo(f cmdutil.Factory, cmd *cobra.Command, out io o.Out = out o.DryRun = cmdutil.GetDryRunFlag(cmd) - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index a239b34e5a7..7157b72c8bd 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -265,7 +265,7 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e return cmdutil.UsageErrorf(cmd, "--port must be set when exposing a service") } - namespace, _, err := f.DefaultNamespace() + namespace, _, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/run_test.go b/pkg/kubectl/cmd/run_test.go index b4a6f6850e6..3ffd79f1c5e 100644 --- a/pkg/kubectl/cmd/run_test.go +++ b/pkg/kubectl/cmd/run_test.go @@ -170,7 +170,7 @@ func TestRunArgsFollowDashRules(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -190,7 +190,6 @@ func TestRunArgsFollowDashRules(t *testing.T) { }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{} cmd := NewCmdRun(tf, genericclioptions.NewTestIOStreamsDiscard()) @@ -505,7 +504,7 @@ func TestRunValidations(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() _, _, codec := cmdtesting.NewExternalScheme() @@ -513,7 +512,6 @@ func TestRunValidations(t *testing.T) { NegotiatedSerializer: scheme.Codecs, Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, cmdtesting.NewInternalType("", "", ""))}, } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, _, bufErr := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index d873a1ae2a7..1f8c4f2577c 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -156,7 +156,7 @@ func (o *ScaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } o.PrintObj = printer.PrintObj - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index d2ec28e1a7f..10800c5b716 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -233,7 +233,7 @@ func (o *EnvOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri if err != nil { return err } - o.namespace, o.enforceNamespace, err = f.DefaultNamespace() + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_env_test.go b/pkg/kubectl/cmd/set/set_env_test.go index d5d73a5163a..f3d7c3338c0 100644 --- a/pkg/kubectl/cmd/set/set_env_test.go +++ b/pkg/kubectl/cmd/set/set_env_test.go @@ -45,7 +45,7 @@ import ( ) func TestSetEnvLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -56,7 +56,6 @@ func TestSetEnvLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -83,7 +82,7 @@ func TestSetEnvLocal(t *testing.T) { } func TestSetMultiResourcesEnvLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -94,7 +93,6 @@ func TestSetMultiResourcesEnvLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -447,16 +445,15 @@ func TestSetEnvRemote(t *testing.T) { t.Run(input.name, func(t *testing.T) { groupVersion := schema.GroupVersion{Group: input.apiGroup, Version: input.apiVersion} testapi.Default = testapi.Groups[input.testAPIGroup] - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{ GroupVersion: groupVersion, NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", tf.Namespace, input.args[1]) + resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", "test", input.args[1]) switch p, m := req.URL.Path, req.Method; { case p == resourcePath && m == http.MethodGet: return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(input.object)}, nil @@ -585,10 +582,9 @@ func TestSetEnvFromResource(t *testing.T) { }, } t.Run(input.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} tf.Client = &fake.RESTClient{ GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index b1de0b7b71a..7efa5f2d1fd 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -150,7 +150,7 @@ func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.PrintObj = printer.PrintObj - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_image_test.go b/pkg/kubectl/cmd/set/set_image_test.go index fb383be767d..bf731f066b2 100644 --- a/pkg/kubectl/cmd/set/set_image_test.go +++ b/pkg/kubectl/cmd/set/set_image_test.go @@ -46,7 +46,7 @@ import ( ) func TestImageLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -57,7 +57,6 @@ func TestImageLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -159,7 +158,7 @@ func TestSetImageValidation(t *testing.T) { } func TestSetMultiResourcesImageLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -170,7 +169,6 @@ func TestSetMultiResourcesImageLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -532,15 +530,14 @@ func TestSetImageRemote(t *testing.T) { t.Run(input.name, func(t *testing.T) { groupVersion := schema.GroupVersion{Group: input.apiGroup, Version: input.apiVersion} testapi.Default = testapi.Groups[input.testAPIGroup] - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{ GroupVersion: groupVersion, NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", tf.Namespace, input.args[1]) + resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", "test", input.args[1]) switch p, m := req.URL.Path, req.Method; { case p == resourcePath && m == http.MethodGet: return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(input.object)}, nil diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 055068dd203..e773bb8137e 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -160,7 +160,7 @@ func (o *SetResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, ar } o.PrintObj = printer.PrintObj - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_resources_test.go b/pkg/kubectl/cmd/set/set_resources_test.go index fe630782649..9d286278f01 100644 --- a/pkg/kubectl/cmd/set/set_resources_test.go +++ b/pkg/kubectl/cmd/set/set_resources_test.go @@ -45,7 +45,7 @@ import ( ) func TestResourcesLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -56,7 +56,6 @@ func TestResourcesLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -94,7 +93,7 @@ func TestResourcesLocal(t *testing.T) { } func TestSetMultiResourcesLimitsLocal(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -105,7 +104,6 @@ func TestSetMultiResourcesLimitsLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -455,15 +453,14 @@ func TestSetResourcesRemote(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { groupVersion := schema.GroupVersion{Group: input.apiGroup, Version: input.apiVersion} testapi.Default = testapi.Groups[input.testAPIGroup] - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{ GroupVersion: groupVersion, NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", tf.Namespace, input.args[1]) + resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", "test", input.args[1]) switch p, m := req.URL.Path, req.Method; { case p == resourcePath && m == http.MethodGet: return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(input.object)}, nil diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 54cfbacc1f7..3f29dd07c05 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -132,7 +132,7 @@ func (o *SetSelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg o.dryrun = cmdutil.GetDryRunFlag(cmd) o.output = cmdutil.GetFlagString(cmd, "output") - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_selector_test.go b/pkg/kubectl/cmd/set/set_selector_test.go index 9c014f0c32d..351e6d98d35 100644 --- a/pkg/kubectl/cmd/set/set_selector_test.go +++ b/pkg/kubectl/cmd/set/set_selector_test.go @@ -317,7 +317,7 @@ func TestGetResourcesAndSelector(t *testing.T) { } func TestSelectorTest(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -328,7 +328,6 @@ func TestSelectorTest(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} streams, _, buf, _ := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index ab4e843a619..6faf727801a 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -141,7 +141,7 @@ func (o *SetServiceAccountOptions) Complete(f cmdutil.Factory, cmd *cobra.Comman } o.PrintObj = printer.PrintObj - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_serviceaccount_test.go b/pkg/kubectl/cmd/set/set_serviceaccount_test.go index f2e06bd7bb6..7239bf1c587 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount_test.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount_test.go @@ -68,7 +68,7 @@ func TestSetServiceAccountLocal(t *testing.T) { for i, input := range inputs { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -81,7 +81,6 @@ func TestSetServiceAccountLocal(t *testing.T) { outputFormat := "yaml" - tf.Namespace = "test" streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdServiceAccount(tf, streams) cmd.Flags().Set("output", outputFormat) @@ -105,7 +104,7 @@ func TestSetServiceAccountLocal(t *testing.T) { func TestSetServiceAccountMultiLocal(t *testing.T) { testapi.Default = testapi.Groups[""] - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -116,7 +115,6 @@ func TestSetServiceAccountMultiLocal(t *testing.T) { return nil, nil }), } - tf.Namespace = "test" tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} outputFormat := "name" @@ -326,15 +324,14 @@ func TestSetServiceAccountRemote(t *testing.T) { t.Run(input.apiPrefix, func(t *testing.T) { groupVersion := schema.GroupVersion{Group: input.apiGroup, Version: input.apiVersion} testapi.Default = testapi.Groups[input.testAPIGroup] - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" tf.Client = &fake.RESTClient{ GroupVersion: groupVersion, NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", tf.Namespace, input.args[1]) + resourcePath := testapi.Default.ResourcePath(input.args[0]+"s", "test", input.args[1]) switch p, m := req.URL.Path, req.Method; { case p == resourcePath && m == http.MethodGet: return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: objBody(input.object)}, nil @@ -387,7 +384,7 @@ func TestServiceAccountValidation(t *testing.T) { } for _, input := range inputs { t.Run(input.name, func(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() tf.Client = &fake.RESTClient{ @@ -400,7 +397,6 @@ func TestServiceAccountValidation(t *testing.T) { outputFormat := "" - tf.Namespace = "test" streams := genericclioptions.NewTestIOStreamsDiscard() cmd := NewCmdServiceAccount(tf, streams) diff --git a/pkg/kubectl/cmd/set/set_subject.go b/pkg/kubectl/cmd/set/set_subject.go index c745abfc3a4..4b395b31d3c 100644 --- a/pkg/kubectl/cmd/set/set_subject.go +++ b/pkg/kubectl/cmd/set/set_subject.go @@ -131,7 +131,7 @@ func (o *SubjectOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [] o.PrintObj = printer.PrintObj var enforceNamespace bool - o.namespace, enforceNamespace, err = f.DefaultNamespace() + o.namespace, enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/set/set_subject_test.go b/pkg/kubectl/cmd/set/set_subject_test.go index 05c82082c83..33fd75cd739 100644 --- a/pkg/kubectl/cmd/set/set_subject_test.go +++ b/pkg/kubectl/cmd/set/set_subject_test.go @@ -28,11 +28,9 @@ import ( ) func TestValidate(t *testing.T) { - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() - tf.Namespace = "test" - tests := map[string]struct { options *SubjectOptions expectErr bool diff --git a/pkg/kubectl/cmd/taint.go b/pkg/kubectl/cmd/taint.go index e4c68dd4340..45b6ab0e7bf 100644 --- a/pkg/kubectl/cmd/taint.go +++ b/pkg/kubectl/cmd/taint.go @@ -124,7 +124,7 @@ func NewCmdTaint(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. // Complete adapts from the command line args and factory to the data required. func (o *TaintOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) (err error) { - namespace, _, err := f.DefaultNamespace() + namespace, _, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/testing/BUILD b/pkg/kubectl/cmd/testing/BUILD index 8c615c2cf7f..e6b59fa7f0e 100644 --- a/pkg/kubectl/cmd/testing/BUILD +++ b/pkg/kubectl/cmd/testing/BUILD @@ -18,7 +18,6 @@ go_library( "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/validation:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/kubectl/cmd/testing/fake.go b/pkg/kubectl/cmd/testing/fake.go index 839ee29dbea..12d876626d7 100644 --- a/pkg/kubectl/cmd/testing/fake.go +++ b/pkg/kubectl/cmd/testing/fake.go @@ -25,8 +25,6 @@ import ( "path/filepath" "time" - "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta/testrestmapper" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -234,12 +232,12 @@ func (d *fakeCachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList type TestFactory struct { cmdutil.Factory + kubeConfigFlags *genericclioptions.TestConfigFlags + Client kubectl.RESTClient ScaleGetter scaleclient.ScalesGetter UnstructuredClient kubectl.RESTClient - Namespace string ClientConfigVal *restclient.Config - CommandVal string FakeDynamicClient *fakedynamic.FakeDynamicClient tempConfigFile *os.File @@ -276,6 +274,7 @@ func NewTestFactory() *TestFactory { return &TestFactory{ Factory: cmdutil.NewFactory(configFlags), + kubeConfigFlags: configFlags, FakeDynamicClient: fakedynamic.NewSimpleDynamicClient(legacyscheme.Scheme), tempConfigFile: tmpFile, @@ -283,6 +282,11 @@ func NewTestFactory() *TestFactory { } } +func (f *TestFactory) WithNamespace(ns string) *TestFactory { + f.kubeConfigFlags.WithNamespace(ns) + return f +} + func (f *TestFactory) Cleanup() { if f.tempConfigFile == nil { return @@ -295,10 +299,6 @@ func (f *TestFactory) ToRESTConfig() (*restclient.Config, error) { return f.ClientConfigVal, nil } -func (f *TestFactory) BareClientConfig() (*restclient.Config, error) { - return f.ClientConfigVal, nil -} - func (f *TestFactory) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { return f.Client, nil } @@ -314,10 +314,6 @@ func (f *TestFactory) Validator(validate bool) (validation.Schema, error) { return validation.NullSchema{}, nil } -func (f *TestFactory) DefaultNamespace() (string, bool, error) { - return f.Namespace, false, nil -} - func (f *TestFactory) OpenAPISchema() (openapi.Resources, error) { if f.OpenAPISchemaFunc != nil { return f.OpenAPISchemaFunc() @@ -325,10 +321,6 @@ func (f *TestFactory) OpenAPISchema() (openapi.Resources, error) { return openapitesting.EmptyResources{}, nil } -func (f *TestFactory) Command(*cobra.Command, bool) string { - return f.CommandVal -} - func (f *TestFactory) NewBuilder() *resource.Builder { mapper, err := f.ToRESTMapper() @@ -425,10 +417,6 @@ func (f *TestFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface, err return cachedClient, nil } -func (f *TestFactory) ClientSetForVersion(requiredVersion *schema.GroupVersion) (internalclientset.Interface, error) { - return f.ClientSet() -} - func testRESTMapper() meta.RESTMapper { groupResources := testDynamicResources() mapper := restmapper.NewDiscoveryRESTMapper(groupResources) diff --git a/pkg/kubectl/cmd/top_node_test.go b/pkg/kubectl/cmd/top_node_test.go index 6857355fb1a..ab387fbfc83 100644 --- a/pkg/kubectl/cmd/top_node_test.go +++ b/pkg/kubectl/cmd/top_node_test.go @@ -50,7 +50,7 @@ func TestTopNodeAllMetrics(t *testing.T) { expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", baseMetricsAddress, metricsApiVersion) expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -78,7 +78,6 @@ func TestTopNodeAllMetrics(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -103,7 +102,7 @@ func TestTopNodeAllMetricsCustomDefaults(t *testing.T) { expectedMetricsPath := fmt.Sprintf("%s/%s/nodes", customBaseMetricsAddress, metricsApiVersion) expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -131,7 +130,6 @@ func TestTopNodeAllMetricsCustomDefaults(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -167,7 +165,7 @@ func TestTopNodeWithNameMetrics(t *testing.T) { expectedPath := fmt.Sprintf("%s/%s/nodes/%s", baseMetricsAddress, metricsApiVersion, expectedMetrics.Name) expectedNodePath := fmt.Sprintf("/%s/%s/nodes/%s", apiPrefix, apiVersion, expectedMetrics.Name) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -195,7 +193,6 @@ func TestTopNodeWithNameMetrics(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -234,7 +231,7 @@ func TestTopNodeWithLabelSelectorMetrics(t *testing.T) { expectedQuery := fmt.Sprintf("labelSelector=%s", url.QueryEscape(label)) expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -262,7 +259,6 @@ func TestTopNodeWithLabelSelectorMetrics(t *testing.T) { } }), } - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -289,7 +285,7 @@ func TestTopNodeAllMetricsFromMetricsServer(t *testing.T) { expectedMetrics, nodes := testNodeV1beta1MetricsData() expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -315,7 +311,6 @@ func TestTopNodeAllMetricsFromMetricsServer(t *testing.T) { fakemetricsClientset.AddReactor("list", "nodes", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, expectedMetrics, nil }) - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -357,7 +352,7 @@ func TestTopNodeWithNameMetricsFromMetricsServer(t *testing.T) { } expectedNodePath := fmt.Sprintf("/%s/%s/nodes/%s", apiPrefix, apiVersion, expectedMetrics.Name) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -383,7 +378,6 @@ func TestTopNodeWithNameMetricsFromMetricsServer(t *testing.T) { fakemetricsClientset.AddReactor("get", "nodes", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, &expectedMetrics, nil }) - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -435,7 +429,7 @@ func TestTopNodeWithLabelSelectorMetricsFromMetricsServer(t *testing.T) { label := "key=value" expectedNodePath := fmt.Sprintf("/%s/%s/nodes", apiPrefix, apiVersion) - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace("test") defer tf.Cleanup() codec := legacyscheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) @@ -462,7 +456,6 @@ func TestTopNodeWithLabelSelectorMetricsFromMetricsServer(t *testing.T) { fakemetricsClientset.AddReactor("list", "nodes", func(action core.Action) (handled bool, ret runtime.Object, err error) { return true, expectedMetrics, nil }) - tf.Namespace = "test" tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index e90ac689882..b63dde44ebf 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -121,7 +121,7 @@ func (o *TopPodOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) } - o.Namespace, _, err = f.DefaultNamespace() + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/top_pod_test.go b/pkg/kubectl/cmd/top_pod_test.go index 494e08efa6c..1cba8d692eb 100644 --- a/pkg/kubectl/cmd/top_pod_test.go +++ b/pkg/kubectl/cmd/top_pod_test.go @@ -164,7 +164,7 @@ func TestTopPod(t *testing.T) { } } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace(testNS) defer tf.Cleanup() ns := legacyscheme.Codecs @@ -190,7 +190,6 @@ func TestTopPod(t *testing.T) { } }), } - tf.Namespace = testNS tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -309,7 +308,7 @@ func TestTopPodWithMetricsServer(t *testing.T) { }) } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace(testNS) defer tf.Cleanup() ns := legacyscheme.Codecs @@ -329,7 +328,6 @@ func TestTopPodWithMetricsServer(t *testing.T) { } }), } - tf.Namespace = testNS tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() @@ -508,7 +506,7 @@ func TestTopPodCustomDefaults(t *testing.T) { } } - tf := cmdtesting.NewTestFactory() + tf := cmdtesting.NewTestFactory().WithNamespace(testNS) defer tf.Cleanup() ns := legacyscheme.Codecs @@ -534,7 +532,6 @@ func TestTopPodCustomDefaults(t *testing.T) { } }), } - tf.Namespace = testNS tf.ClientConfigVal = defaultClientConfig() streams, _, buf, _ := genericclioptions.NewTestIOStreams() diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index abf1e7ef2a3..49417e54ad2 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -129,7 +129,7 @@ func (o *EditOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Comm return fmt.Errorf("the edit mode doesn't support output the patch") } - cmdNamespace, enforceNamespace, err := f.DefaultNamespace() + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() if err != nil { return err } diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 24544f67213..8dc6df3fa56 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -62,11 +62,6 @@ type Factory interface { // and which implements the common patterns for CLI interactions with generic resources. NewBuilder() *resource.Builder - // Returns the default namespace to use in cases where no - // other namespace is specified and whether the namespace was - // overridden. - DefaultNamespace() (string, bool, error) - // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index 8b806097680..e01e649f4f8 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -118,10 +118,6 @@ func (f *factoryImpl) RESTClient() (*restclient.RESTClient, error) { return restclient.RESTClientFor(clientConfig) } -func (f *factoryImpl) DefaultNamespace() (string, bool, error) { - return f.clientGetter.ToRawKubeConfigLoader().Namespace() -} - func (f *factoryImpl) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { cfg, err := f.clientGetter.ToRESTConfig() if err != nil { diff --git a/pkg/kubectl/genericclioptions/BUILD b/pkg/kubectl/genericclioptions/BUILD index 88c5662b716..0a04b54eeac 100644 --- a/pkg/kubectl/genericclioptions/BUILD +++ b/pkg/kubectl/genericclioptions/BUILD @@ -30,6 +30,7 @@ go_library( "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/restmapper:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", + "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//vendor/k8s.io/client-go/util/homedir:go_default_library", ], ) diff --git a/pkg/kubectl/genericclioptions/config_flags_fake.go b/pkg/kubectl/genericclioptions/config_flags_fake.go index d019b8c93a2..64e9a688330 100644 --- a/pkg/kubectl/genericclioptions/config_flags_fake.go +++ b/pkg/kubectl/genericclioptions/config_flags_fake.go @@ -24,6 +24,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) type TestConfigFlags struct { @@ -74,6 +75,36 @@ func (f *TestConfigFlags) WithDiscoveryClient(c discovery.CachedDiscoveryInterfa return f } +func (f *TestConfigFlags) WithNamespace(ns string) *TestConfigFlags { + if f.clientConfig == nil { + panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") + } + f.clientConfig = &namespacedClientConfig{ + delegate: f.clientConfig, + namespace: ns, + } + return f +} + func NewTestConfigFlags() *TestConfigFlags { return &TestConfigFlags{} } + +type namespacedClientConfig struct { + delegate clientcmd.ClientConfig + namespace string +} + +func (c *namespacedClientConfig) Namespace() (string, bool, error) { + return c.namespace, false, nil +} + +func (c *namespacedClientConfig) RawConfig() (clientcmdapi.Config, error) { + return c.delegate.RawConfig() +} +func (c *namespacedClientConfig) ClientConfig() (*rest.Config, error) { + return c.delegate.ClientConfig() +} +func (c *namespacedClientConfig) ConfigAccess() clientcmd.ConfigAccess { + return c.delegate.ConfigAccess() +} From a82d11bbbab22a1fb6e89bf29f7b703db8a4b811 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Mon, 28 May 2018 15:46:18 +0200 Subject: [PATCH 235/307] Increase the timeout when waiting for the job to be gone --- test/e2e/kubectl/kubectl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 254391188d5..3132d2c11a9 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -1545,7 +1545,7 @@ metadata: Expect(runOutput).To(ContainSubstring("abcd1234")) Expect(runOutput).To(ContainSubstring("stdin closed")) - err := framework.WaitForJobGone(c, ns, jobName, 10*time.Second) + err := framework.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout) Expect(err).NotTo(HaveOccurred()) By("verifying the job " + jobName + " was deleted") From 8ff0fff06511cadb3850ab8b96098e946926999f Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Mon, 28 May 2018 16:24:19 +0200 Subject: [PATCH 236/307] Allow AWS EBS volumes to be attached as ReadOnly. --- pkg/cloudprovider/providers/aws/aws.go | 10 ++-------- pkg/volume/aws_ebs/attacher.go | 4 ++-- pkg/volume/aws_ebs/attacher_test.go | 17 +++++------------ .../persistentvolume/label/admission_test.go | 2 +- 4 files changed, 10 insertions(+), 23 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 6ea14ef58ef..c2dc6add563 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -429,7 +429,7 @@ type Volumes interface { // Attach the disk to the node with the specified NodeName // nodeName can be empty to mean "the instance on which we are running" // Returns the device (e.g. /dev/xvdf) where we attached the volume - AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) + AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) // Detach the disk from the node with the specified NodeName // nodeName can be empty to mean "the instance on which we are running" // Returns the device where the volume was attached @@ -1956,7 +1956,7 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { } // AttachDisk implements Volumes.AttachDisk -func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { +func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) { disk, err := newAWSDisk(c, diskName) if err != nil { return "", err @@ -1967,12 +1967,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, return "", fmt.Errorf("error finding instance %s: %q", nodeName, err) } - if readOnly { - // TODO: We could enforce this when we mount the volume (?) - // TODO: We could also snapshot the volume and attach copies of it - return "", errors.New("AWS volumes cannot be mounted read-only") - } - // mountDevice will hold the device where we should try to attach the disk var mountDevice mountDevice // alreadyAttached is true if we have already called AttachVolume on this disk diff --git a/pkg/volume/aws_ebs/attacher.go b/pkg/volume/aws_ebs/attacher.go index 059431e1661..ac716ed1b0d 100644 --- a/pkg/volume/aws_ebs/attacher.go +++ b/pkg/volume/aws_ebs/attacher.go @@ -59,7 +59,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str } func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { - volumeSource, readOnly, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err } @@ -68,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName // awsCloud.AttachDisk checks if disk is already attached to node and // succeeds in that case, so no need to do that separately. - devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly) + devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName) if err != nil { glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) return "", err diff --git a/pkg/volume/aws_ebs/attacher_test.go b/pkg/volume/aws_ebs/attacher_test.go index 1076d06910e..36ed854d1a4 100644 --- a/pkg/volume/aws_ebs/attacher_test.go +++ b/pkg/volume/aws_ebs/attacher_test.go @@ -76,15 +76,14 @@ type testcase struct { func TestAttachDetach(t *testing.T) { diskName := aws.KubernetesVolumeID("disk") nodeName := types.NodeName("instance") - readOnly := false - spec := createVolSpec(diskName, readOnly) + spec := createVolSpec(diskName, false) attachError := errors.New("Fake attach error") detachError := errors.New("Fake detach error") tests := []testcase{ // Successful Attach call { name: "Attach_Positive", - attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil}, + attach: attachCall{diskName, nodeName, "/dev/sda", nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) return attacher.Attach(spec, nodeName) @@ -95,7 +94,7 @@ func TestAttachDetach(t *testing.T) { // Attach call fails { name: "Attach_Negative", - attach: attachCall{diskName, nodeName, readOnly, "", attachError}, + attach: attachCall{diskName, nodeName, "", attachError}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) return attacher.Attach(spec, nodeName) @@ -195,7 +194,6 @@ func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec { type attachCall struct { diskName aws.KubernetesVolumeID nodeName types.NodeName - readOnly bool retDeviceName string ret error } @@ -214,7 +212,7 @@ type diskIsAttachedCall struct { ret error } -func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { +func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) { expected := &testcase.attach if expected.diskName == "" && expected.nodeName == "" { @@ -234,12 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t return "", errors.New("Unexpected AttachDisk call: wrong nodeName") } - if expected.readOnly != readOnly { - testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly) - return "", errors.New("Unexpected AttachDisk call: wrong readOnly") - } - - glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret) + glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret) return expected.retDeviceName, expected.ret } diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go index c8cbc7ff4f4..f04939152e2 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go @@ -36,7 +36,7 @@ type mockVolumes struct { var _ aws.Volumes = &mockVolumes{} -func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { +func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) { return "", fmt.Errorf("not implemented") } From a3e841871ce35c4bd99e529a2d12dc84a2339588 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Mon, 28 May 2018 17:50:36 +0200 Subject: [PATCH 237/307] Add daemonset when to getReplicasFromRuntimeObject when cleaning objects in e2e --- test/e2e/framework/util.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 5dc45815b6f..ad85097d340 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3030,6 +3030,8 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) { return *typed.Spec.Replicas, nil } return 0, nil + case *extensions.DaemonSet: + return 0, nil case *batch.Job: // TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods // that actually finish we need a better way to do this. From a2a3a98e1db80702e9adec77598db31265d240db Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Mon, 28 May 2018 17:14:53 +0200 Subject: [PATCH 238/307] DaemonSet internals are still in extensions --- test/e2e/scheduling/BUILD | 2 +- test/e2e/scheduling/nvidia-gpus.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 33a4eda78ed..1e1cda3a2c4 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -20,8 +20,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/api/v1/pod:go_default_library", - "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", + "//pkg/apis/extensions:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/quota/evaluator/core:go_default_library", diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 5e08e339c5f..b0b4c62e7c9 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/kubernetes/pkg/apis/apps" + extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -137,10 +137,10 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") framework.Logf("Successfully created daemonset to install Nvidia drivers.") - pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, apps.Kind("DaemonSet")) + pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset") - devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", apps.Kind("DaemonSet")) + devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) if err == nil { framework.Logf("Adding deviceplugin addon pod.") pods.Items = append(pods.Items, devicepluginPods.Items...) From 6f1b178ed762202a83200c9523f095794b1c5bca Mon Sep 17 00:00:00 2001 From: Nail Islamov Date: Sun, 27 May 2018 23:40:52 +1000 Subject: [PATCH 239/307] Declare wait flag in way consistent with other deletion flags --- pkg/kubectl/cmd/delete.go | 14 ++++++-------- pkg/kubectl/cmd/delete_flags.go | 13 +++++++++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 6446e2a354f..10c93998f05 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -139,8 +139,6 @@ func NewCmdDelete(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra deleteFlags.AddFlags(cmd) - cmd.Flags().Bool("wait", true, `If true, wait for resources to be gone before returning. This waits for finalizers.`) - cmdutil.AddIncludeUninitializedFlag(cmd) return cmd } @@ -165,14 +163,9 @@ func (o *DeleteOptions) Complete(f cmdutil.Factory, args []string, cmd *cobra.Co } if o.GracePeriod == 0 && !o.ForceDeletion { // To preserve backwards compatibility, but prevent accidental data loss, we convert --grace-period=0 - // into --grace-period=1 and wait until the object is successfully deleted. Users may provide --force - // to bypass this wait. - o.WaitForDeletion = true + // into --grace-period=1. Users may provide --force to bypass this conversion. o.GracePeriod = 1 } - if b, err := cmd.Flags().GetBool("wait"); err == nil { - o.WaitForDeletion = b - } includeUninitialized := cmdutil.ShouldIncludeUninitialized(cmd, false) r := f.NewBuilder(). @@ -218,6 +211,11 @@ func (o *DeleteOptions) Validate(cmd *cobra.Command) error { return fmt.Errorf("cannot set --all and --field-selector at the same time") } + if o.GracePeriod == 0 && !o.ForceDeletion && !o.WaitForDeletion { + // With the explicit --wait flag we need extra validation for backward compatibility + return fmt.Errorf("--grace-period=0 must have either --force specified, or --wait to be set to true") + } + switch { case o.GracePeriod == 0 && o.ForceDeletion: fmt.Fprintf(o.ErrOut, "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n") diff --git a/pkg/kubectl/cmd/delete_flags.go b/pkg/kubectl/cmd/delete_flags.go index fc0580c688d..c98d1fce0f5 100644 --- a/pkg/kubectl/cmd/delete_flags.go +++ b/pkg/kubectl/cmd/delete_flags.go @@ -39,6 +39,7 @@ type DeleteFlags struct { IgnoreNotFound *bool Now *bool Timeout *time.Duration + Wait *bool Output *string } @@ -85,6 +86,9 @@ func (f *DeleteFlags) ToOptions(dynamicClient dynamic.Interface, streams generic if f.Timeout != nil { options.Timeout = *f.Timeout } + if f.Wait != nil { + options.WaitForDeletion = *f.Wait + } return options } @@ -118,11 +122,12 @@ func (f *DeleteFlags) AddFlags(cmd *cobra.Command) { if f.IgnoreNotFound != nil { cmd.Flags().BoolVar(f.IgnoreNotFound, "ignore-not-found", *f.IgnoreNotFound, "Treat \"resource not found\" as a successful delete. Defaults to \"true\" when --all is specified.") } - + if f.Wait != nil { + cmd.Flags().BoolVar(f.Wait, "wait", *f.Wait, "If true, wait for resources to be gone before returning. This waits for finalizers.") + } if f.Output != nil { cmd.Flags().StringVarP(f.Output, "output", "o", *f.Output, "Output mode. Use \"-o name\" for shorter output (resource/name).") } - } // NewDeleteCommandFlags provides default flags and values for use with the "delete" command @@ -139,6 +144,7 @@ func NewDeleteCommandFlags(usage string) *DeleteFlags { labelSelector := "" fieldSelector := "" timeout := time.Duration(0) + wait := true filenames := []string{} recursive := false @@ -156,6 +162,7 @@ func NewDeleteCommandFlags(usage string) *DeleteFlags { IgnoreNotFound: &ignoreNotFound, Now: &now, Timeout: &timeout, + Wait: &wait, Output: &output, } } @@ -167,6 +174,7 @@ func NewDeleteFlags(usage string) *DeleteFlags { force := false timeout := time.Duration(0) + wait := false filenames := []string{} recursive := false @@ -180,5 +188,6 @@ func NewDeleteFlags(usage string) *DeleteFlags { // add non-defaults Force: &force, Timeout: &timeout, + Wait: &wait, } } From f6d4244c30060bcf6ebe4ad80ecec47e836aca30 Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Tue, 29 May 2018 09:16:25 +0200 Subject: [PATCH 240/307] client-go: document README exception in .github/PULL_REQUEST_TEMPLATE.md --- .../src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md b/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md index e559c074bb5..6aed9889cf1 100644 --- a/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md +++ b/staging/src/k8s.io/client-go/.github/PULL_REQUEST_TEMPLATE.md @@ -1,2 +1,3 @@ -Sorry, we do not accept changes directly against this repository. Please see -CONTRIBUTING.md for information on where and how to contribute instead. +Sorry, we do not accept changes directly against this repository, unless the +change is to the `README.md` itself. Please see +`CONTRIBUTING.md` for information on where and how to contribute instead. From 1b1c2d1264516c51222d9d9e762885322da29d77 Mon Sep 17 00:00:00 2001 From: Di Xu Date: Tue, 29 May 2018 18:07:41 +0800 Subject: [PATCH 241/307] include rollout object name in cli message --- pkg/kubectl/rollout_status.go | 10 +++++----- pkg/kubectl/rollout_status_test.go | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/kubectl/rollout_status.go b/pkg/kubectl/rollout_status.go index b65b91fd4eb..714d364fd83 100644 --- a/pkg/kubectl/rollout_status.go +++ b/pkg/kubectl/rollout_status.go @@ -83,13 +83,13 @@ func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64) return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", name) } if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas { - return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil + return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil } if deployment.Status.Replicas > deployment.Status.UpdatedReplicas { - return fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil + return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil } if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas { - return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil + return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil } return fmt.Sprintf("deployment %q successfully rolled out\n", name), true, nil } @@ -109,10 +109,10 @@ func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) ( } if daemon.Generation <= daemon.Status.ObservedGeneration { if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled { - return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new pods have been updated...\n", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil + return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...\n", name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil } if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled { - return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated pods are available...\n", daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil + return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...\n", name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil } return fmt.Sprintf("daemon set %q successfully rolled out\n", name), true, nil } diff --git a/pkg/kubectl/rollout_status_test.go b/pkg/kubectl/rollout_status_test.go index 729709a7af3..ae346f116bc 100644 --- a/pkg/kubectl/rollout_status_test.go +++ b/pkg/kubectl/rollout_status_test.go @@ -45,7 +45,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { UnavailableReplicas: 0, }, - msg: "Waiting for rollout to finish: 0 out of 1 new replicas have been updated...\n", + msg: "Waiting for deployment \"foo\" rollout to finish: 0 out of 1 new replicas have been updated...\n", done: false, }, { @@ -59,7 +59,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { UnavailableReplicas: 0, }, - msg: "Waiting for rollout to finish: 1 old replicas are pending termination...\n", + msg: "Waiting for deployment \"foo\" rollout to finish: 1 old replicas are pending termination...\n", done: false, }, { @@ -73,7 +73,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { UnavailableReplicas: 1, }, - msg: "Waiting for rollout to finish: 1 of 2 updated replicas are available...\n", + msg: "Waiting for deployment \"foo\" rollout to finish: 1 of 2 updated replicas are available...\n", done: false, }, { @@ -155,7 +155,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { NumberAvailable: 0, }, - msg: "Waiting for rollout to finish: 0 out of 1 new pods have been updated...\n", + msg: "Waiting for daemon set \"foo\" rollout to finish: 0 out of 1 new pods have been updated...\n", done: false, }, { @@ -167,7 +167,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { NumberAvailable: 1, }, - msg: "Waiting for rollout to finish: 1 of 2 updated pods are available...\n", + msg: "Waiting for daemon set \"foo\" rollout to finish: 1 of 2 updated pods are available...\n", done: false, }, { From 2bf66c377d60276e017e78ab9dbc0490f26baa17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Mudrini=C4=87?= Date: Tue, 17 Apr 2018 22:22:37 +0200 Subject: [PATCH 242/307] apiextensions-apiserver: add establishing controller to avoid race between established and CRs actually served --- cmd/kube-apiserver/app/apiextensions.go | 2 + cmd/kube-apiserver/app/server.go | 2 +- .../src/k8s.io/apiextensions-apiserver/BUILD | 1 + .../pkg/apiserver/BUILD | 1 + .../pkg/apiserver/apiserver.go | 10 +- .../pkg/apiserver/customresource_handler.go | 33 +++- .../pkg/controller/establish/BUILD | 34 +++++ .../establish/establishing_controller.go | 142 ++++++++++++++++++ .../pkg/controller/status/BUILD | 1 + .../controller/status/naming_controller.go | 18 ++- .../status/naming_controller_test.go | 63 ++++---- 11 files changed, 270 insertions(+), 37 deletions(-) create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD create mode 100644 staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go diff --git a/cmd/kube-apiserver/app/apiextensions.go b/cmd/kube-apiserver/app/apiextensions.go index 375372a162e..b94b5f311b0 100644 --- a/cmd/kube-apiserver/app/apiextensions.go +++ b/cmd/kube-apiserver/app/apiextensions.go @@ -35,6 +35,7 @@ func createAPIExtensionsConfig( externalInformers kubeexternalinformers.SharedInformerFactory, pluginInitializers []admission.PluginInitializer, commandOptions *options.ServerRunOptions, + masterCount int, ) (*apiextensionsapiserver.Config, error) { // make a shallow copy to let us twiddle a few things // most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the apiextensions @@ -69,6 +70,7 @@ func createAPIExtensionsConfig( }, ExtraConfig: apiextensionsapiserver.ExtraConfig{ CRDRESTOptionsGetter: apiextensionscmd.NewCRDRESTOptionsGetter(etcdOptions), + MasterCount: masterCount, }, } diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 3fb5ed71bb1..b936c9ece2a 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -165,7 +165,7 @@ func CreateServerChain(completedOptions completedServerRunOptions, stopCh <-chan } // If additional API servers are added, they should be gated. - apiExtensionsConfig, err := createAPIExtensionsConfig(*kubeAPIServerConfig.GenericConfig, versionedInformers, pluginInitializer, completedOptions.ServerRunOptions) + apiExtensionsConfig, err := createAPIExtensionsConfig(*kubeAPIServerConfig.GenericConfig, versionedInformers, pluginInitializer, completedOptions.ServerRunOptions, completedOptions.MasterCount) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/apiextensions-apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/BUILD index 8092fcb8216..31d0de0ae36 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/BUILD @@ -44,6 +44,7 @@ filegroup( "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server:all-srcs", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status:all-srcs", "//staging/src/k8s.io/apiextensions-apiserver/pkg/features:all-srcs", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index 4d5e2a73ec0..f9c4a706d91 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -32,6 +32,7 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/controller/establish:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/controller/finalizer:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/controller/status:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/features:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go index f2c73601135..f1fc89ba96c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go @@ -37,11 +37,11 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset" internalinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" + "k8s.io/apiextensions-apiserver/pkg/controller/establish" "k8s.io/apiextensions-apiserver/pkg/controller/finalizer" "k8s.io/apiextensions-apiserver/pkg/controller/status" "k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition" - // make sure the generated client works _ "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" _ "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" _ "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" @@ -74,6 +74,10 @@ func init() { type ExtraConfig struct { CRDRESTOptionsGetter genericregistry.RESTOptionsGetter + + // MasterCount is used to detect whether cluster is HA, and if it is + // the CRD Establishing will be hold by 5 seconds. + MasterCount int } type Config struct { @@ -162,6 +166,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) discovery: map[string]*discovery.APIGroupHandler{}, delegate: delegateHandler, } + establishingController := establish.NewEstablishingController(s.Informers.Apiextensions().InternalVersion().CustomResourceDefinitions(), crdClient.Apiextensions()) crdHandler := NewCustomResourceDefinitionHandler( versionDiscoveryHandler, groupDiscoveryHandler, @@ -169,6 +174,8 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) delegateHandler, c.ExtraConfig.CRDRESTOptionsGetter, c.GenericConfig.AdmissionControl, + establishingController, + c.ExtraConfig.MasterCount, ) s.GenericAPIServer.Handler.NonGoRestfulMux.Handle("/apis", crdHandler) s.GenericAPIServer.Handler.NonGoRestfulMux.HandlePrefix("/apis/", crdHandler) @@ -188,6 +195,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) s.GenericAPIServer.AddPostStartHook("start-apiextensions-controllers", func(context genericapiserver.PostStartHookContext) error { go crdController.Run(context.StopCh) go namingController.Run(context.StopCh) + go establishingController.Run(context.StopCh) go finalizingController.Run(5, context.StopCh) return nil }) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index 231bffdbf6d..e93fad14336 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -62,6 +62,7 @@ import ( apiservervalidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" informers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion" listers "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion" + "k8s.io/apiextensions-apiserver/pkg/controller/establish" "k8s.io/apiextensions-apiserver/pkg/controller/finalizer" apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" "k8s.io/apiextensions-apiserver/pkg/registry/customresource" @@ -86,6 +87,12 @@ type crdHandler struct { delegate http.Handler restOptionsGetter generic.RESTOptionsGetter admission admission.Interface + + establishingController *establish.EstablishingController + + // MasterCount is used to implement sleep to improve + // CRD establishing process for HA clusters. + masterCount int } // crdInfo stores enough information to serve the storage for the custom resource @@ -120,7 +127,9 @@ func NewCustomResourceDefinitionHandler( crdInformer informers.CustomResourceDefinitionInformer, delegate http.Handler, restOptionsGetter generic.RESTOptionsGetter, - admission admission.Interface) *crdHandler { + admission admission.Interface, + establishingController *establish.EstablishingController, + masterCount int) *crdHandler { ret := &crdHandler{ versionDiscoveryHandler: versionDiscoveryHandler, groupDiscoveryHandler: groupDiscoveryHandler, @@ -129,6 +138,8 @@ func NewCustomResourceDefinitionHandler( delegate: delegate, restOptionsGetter: restOptionsGetter, admission: admission, + establishingController: establishingController, + masterCount: masterCount, } crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: ret.updateCustomResourceDefinition, @@ -181,7 +192,12 @@ func (r *crdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { r.delegate.ServeHTTP(w, req) return } - if !apiextensions.IsCRDConditionTrue(crd, apiextensions.Established) { + // There is a small chance that a CRD is being served because NamesAccepted condition is true, + // but it becomes "unserved" because another names update leads to a conflict + // and EstablishingController wasn't fast enough to put the CRD into the Established condition. + // We accept this as the problem is small and self-healing. + if !apiextensions.IsCRDConditionTrue(crd, apiextensions.NamesAccepted) && + !apiextensions.IsCRDConditionTrue(crd, apiextensions.Established) { r.delegate.ServeHTTP(w, req) return } @@ -299,6 +315,19 @@ func (r *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) r.customStorageLock.Lock() defer r.customStorageLock.Unlock() + // Add CRD to the establishing controller queue. + // For HA clusters, we want to prevent race conditions when changing status to Established, + // so we want to be sure that CRD is Installing at least for 5 seconds before Establishing it. + // TODO: find a real HA safe checkpointing mechanism instead of an arbitrary wait. + if !apiextensions.IsCRDConditionTrue(newCRD, apiextensions.Established) && + apiextensions.IsCRDConditionTrue(newCRD, apiextensions.NamesAccepted) { + if r.masterCount > 1 { + r.establishingController.QueueCRD(newCRD.Name, 5*time.Second) + } else { + r.establishingController.QueueCRD(newCRD.Name, 0) + } + } + storageMap := r.customStorage.Load().(crdStorageMap) oldInfo, found := storageMap[newCRD.UID] if !found { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD new file mode 100644 index 00000000000..fa3e5dfa1bb --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["establishing_controller.go"], + importpath = "k8s.io/apiextensions-apiserver/pkg/controller/establish", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", + "//vendor/k8s.io/client-go/util/workqueue:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go new file mode 100644 index 00000000000..5c2ebbcaad8 --- /dev/null +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package establish + +import ( + "fmt" + "time" + + "github.com/golang/glog" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + client "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion" + informers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion" + listers "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion" +) + +// EstablishingController controls how and when CRD is established. +type EstablishingController struct { + crdClient client.CustomResourceDefinitionsGetter + crdLister listers.CustomResourceDefinitionLister + crdSynced cache.InformerSynced + + // To allow injection for testing. + syncFn func(key string) error + + queue workqueue.RateLimitingInterface +} + +// NewEstablishingController creates new EstablishingController. +func NewEstablishingController(crdInformer informers.CustomResourceDefinitionInformer, + crdClient client.CustomResourceDefinitionsGetter) *EstablishingController { + ec := &EstablishingController{ + crdClient: crdClient, + crdLister: crdInformer.Lister(), + crdSynced: crdInformer.Informer().HasSynced, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crdEstablishing"), + } + + ec.syncFn = ec.sync + + return ec +} + +// QueueCRD adds CRD into the establishing queue. +func (ec *EstablishingController) QueueCRD(key string, timeout time.Duration) { + ec.queue.AddAfter(key, timeout) +} + +// Run starts the EstablishingController. +func (ec *EstablishingController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer ec.queue.ShutDown() + + glog.Infof("Starting EstablishingController") + defer glog.Infof("Shutting down EstablishingController") + + if !cache.WaitForCacheSync(stopCh, ec.crdSynced) { + return + } + + // only start one worker thread since its a slow moving API + go wait.Until(ec.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (ec *EstablishingController) runWorker() { + for ec.processNextWorkItem() { + } +} + +// processNextWorkItem deals with one key off the queue. +// It returns false when it's time to quit. +func (ec *EstablishingController) processNextWorkItem() bool { + key, quit := ec.queue.Get() + if quit { + return false + } + defer ec.queue.Done(key) + + err := ec.syncFn(key.(string)) + if err == nil { + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", key, err)) + ec.queue.AddRateLimited(key) + + return true +} + +// sync is used to turn CRDs into the Established state. +func (ec *EstablishingController) sync(key string) error { + cachedCRD, err := ec.crdLister.Get(key) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + if !apiextensions.IsCRDConditionTrue(cachedCRD, apiextensions.NamesAccepted) || + apiextensions.IsCRDConditionTrue(cachedCRD, apiextensions.Established) { + return nil + } + + crd := cachedCRD.DeepCopy() + establishedCondition := apiextensions.CustomResourceDefinitionCondition{ + Type: apiextensions.Established, + Status: apiextensions.ConditionTrue, + Reason: "InitialNamesAccepted", + Message: "the initial names have been accepted", + } + apiextensions.SetCRDCondition(crd, establishedCondition) + + // Update server with new CRD condition. + _, err = ec.crdClient.CustomResourceDefinitions().UpdateStatus(crd) + if err != nil { + return err + } + + return nil +} diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD index c408cf4874e..0489e9fccd7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD @@ -28,6 +28,7 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index 16016e493a5..f00def4b124 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -191,7 +192,10 @@ func (c *NamingConditionController) calculateNamesAndConditions(in *apiextension namesAcceptedCondition.Message = "no conflicts found" } - // set EstablishedCondition to true if all names are accepted. Never set it back to false. + // set EstablishedCondition initially to false, then set it to true in establishing controller. + // The Establishing Controller will see the NamesAccepted condition when it arrives through the shared informer. + // At that time the API endpoint handler will serve the endpoint, avoiding a race + // which we had if we set Established to true here. establishedCondition := apiextensions.CustomResourceDefinitionCondition{ Type: apiextensions.Established, Status: apiextensions.ConditionFalse, @@ -204,8 +208,8 @@ func (c *NamingConditionController) calculateNamesAndConditions(in *apiextension if establishedCondition.Status != apiextensions.ConditionTrue && namesAcceptedCondition.Status == apiextensions.ConditionTrue { establishedCondition = apiextensions.CustomResourceDefinitionCondition{ Type: apiextensions.Established, - Status: apiextensions.ConditionTrue, - Reason: "InitialNamesAccepted", + Status: apiextensions.ConditionFalse, + Reason: "Installing", Message: "the initial names have been accepted", } } @@ -238,12 +242,16 @@ func (c *NamingConditionController) sync(key string) error { return err } + // Skip checking names if Spec and Status names are same. + if equality.Semantic.DeepEqual(inCustomResourceDefinition.Spec.Names, inCustomResourceDefinition.Status.AcceptedNames) { + return nil + } + acceptedNames, namingCondition, establishedCondition := c.calculateNamesAndConditions(inCustomResourceDefinition) // nothing to do if accepted names and NamesAccepted condition didn't change if reflect.DeepEqual(inCustomResourceDefinition.Status.AcceptedNames, acceptedNames) && - apiextensions.IsCRDConditionEquivalent(&namingCondition, apiextensions.FindCRDCondition(inCustomResourceDefinition, apiextensions.NamesAccepted)) && - apiextensions.IsCRDConditionEquivalent(&establishedCondition, apiextensions.FindCRDCondition(inCustomResourceDefinition, apiextensions.Established)) { + apiextensions.IsCRDConditionEquivalent(&namingCondition, apiextensions.FindCRDCondition(inCustomResourceDefinition, apiextensions.NamesAccepted)) { return nil } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller_test.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller_test.go index 615c5dd85ff..717e5288484 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller_test.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller_test.go @@ -95,19 +95,17 @@ var acceptedCondition = apiextensions.CustomResourceDefinitionCondition{ Message: "no conflicts found", } -func nameConflictCondition(reason, message string) apiextensions.CustomResourceDefinitionCondition { - return apiextensions.CustomResourceDefinitionCondition{ - Type: apiextensions.NamesAccepted, - Status: apiextensions.ConditionFalse, - Reason: reason, - Message: message, - } +var notAcceptedCondition = apiextensions.CustomResourceDefinitionCondition{ + Type: apiextensions.NamesAccepted, + Status: apiextensions.ConditionFalse, + Reason: "NotAccepted", + Message: "not all names are accepted", } -var establishedCondition = apiextensions.CustomResourceDefinitionCondition{ +var installingCondition = apiextensions.CustomResourceDefinitionCondition{ Type: apiextensions.Established, - Status: apiextensions.ConditionTrue, - Reason: "InitialNamesAccepted", + Status: apiextensions.ConditionFalse, + Reason: "Installing", Message: "the initial names have been accepted", } @@ -118,6 +116,15 @@ var notEstablishedCondition = apiextensions.CustomResourceDefinitionCondition{ Message: "not all names are accepted", } +func nameConflictCondition(reason, message string) apiextensions.CustomResourceDefinitionCondition { + return apiextensions.CustomResourceDefinitionCondition{ + Type: apiextensions.NamesAccepted, + Status: apiextensions.ConditionFalse, + Reason: reason, + Message: message, + } +} + func TestSync(t *testing.T) { tests := []struct { name string @@ -136,7 +143,7 @@ func TestSync(t *testing.T) { Plural: "alfa", }, expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { name: "different groups", @@ -146,7 +153,7 @@ func TestSync(t *testing.T) { }, expectedNames: names("alfa", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1", "hotel-shortname-2"), expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { name: "conflict plural to singular", @@ -206,7 +213,7 @@ func TestSync(t *testing.T) { }, expectedNames: names("alfa", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1", "hotel-shortname-2"), expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { name: "merge on conflicts", @@ -248,7 +255,7 @@ func TestSync(t *testing.T) { }, expectedNames: names("alfa", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1", "hotel-shortname-2"), expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { name: "no conflicts on self, remove shortname", @@ -264,44 +271,44 @@ func TestSync(t *testing.T) { }, expectedNames: names("alfa", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1"), expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { - name: "established before with true condition", - in: newCRD("alfa.bravo.com").Condition(establishedCondition).NewOrDie(), + name: "installing before with true condition", + in: newCRD("alfa.bravo.com").Condition(acceptedCondition).NewOrDie(), existing: []*apiextensions.CustomResourceDefinition{}, expectedNames: apiextensions.CustomResourceDefinitionNames{ Plural: "alfa", }, expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { - name: "not established before with false condition", - in: newCRD("alfa.bravo.com").Condition(notEstablishedCondition).NewOrDie(), + name: "not installing before with false condition", + in: newCRD("alfa.bravo.com").Condition(notAcceptedCondition).NewOrDie(), existing: []*apiextensions.CustomResourceDefinition{}, expectedNames: apiextensions.CustomResourceDefinitionNames{ Plural: "alfa", }, expectedNameConflictCondition: acceptedCondition, - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: installingCondition, }, { - name: "conflicting, established before with true condition", + name: "conflicting, installing before with true condition", in: newCRD("alfa.bravo.com").SpecNames("alfa", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1", "hotel-shortname-2"). - Condition(establishedCondition). + Condition(acceptedCondition). NewOrDie(), existing: []*apiextensions.CustomResourceDefinition{ newCRD("india.bravo.com").StatusNames("india", "alfa", "", "").NewOrDie(), }, expectedNames: names("", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1", "hotel-shortname-2"), expectedNameConflictCondition: nameConflictCondition("PluralConflict", `"alfa" is already in use`), - expectedEstablishedCondition: establishedCondition, + expectedEstablishedCondition: notEstablishedCondition, }, { - name: "conflicting, not established before with false condition", + name: "conflicting, not installing before with false condition", in: newCRD("alfa.bravo.com").SpecNames("alfa", "delta-singular", "echo-kind", "foxtrot-listkind", "golf-shortname-1", "hotel-shortname-2"). - Condition(notEstablishedCondition). + Condition(notAcceptedCondition). NewOrDie(), existing: []*apiextensions.CustomResourceDefinition{ newCRD("india.bravo.com").StatusNames("india", "alfa", "", "").NewOrDie(), @@ -322,7 +329,7 @@ func TestSync(t *testing.T) { crdLister: listers.NewCustomResourceDefinitionLister(crdIndexer), crdMutationCache: cache.NewIntegerResourceVersionMutationCache(crdIndexer, crdIndexer, 60*time.Second, false), } - actualNames, actualNameConflictCondition, actualEstablishedCondition := c.calculateNamesAndConditions(tc.in) + actualNames, actualNameConflictCondition, establishedCondition := c.calculateNamesAndConditions(tc.in) if e, a := tc.expectedNames, actualNames; !reflect.DeepEqual(e, a) { t.Errorf("%v expected %v, got %#v", tc.name, e, a) @@ -330,7 +337,7 @@ func TestSync(t *testing.T) { if e, a := tc.expectedNameConflictCondition, actualNameConflictCondition; !apiextensions.IsCRDConditionEquivalent(&e, &a) { t.Errorf("%v expected %v, got %v", tc.name, e, a) } - if e, a := tc.expectedEstablishedCondition, actualEstablishedCondition; !apiextensions.IsCRDConditionEquivalent(&e, &a) { + if e, a := tc.expectedEstablishedCondition, establishedCondition; !apiextensions.IsCRDConditionEquivalent(&e, &a) { t.Errorf("%v expected %v, got %v", tc.name, e, a) } } From b3ce7a9935e5c71225493cbad744bd9a3b2f4875 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 29 May 2018 08:28:41 -0400 Subject: [PATCH 243/307] services must listen on port 443 --- .../status/available_controller.go | 16 +++++++++++++ .../status/available_controller_test.go | 24 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index a9bbe08a1fd..367b60dbeb8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -176,6 +176,22 @@ func (c *AvailableConditionController) sync(key string) error { } if service.Spec.Type == v1.ServiceTypeClusterIP { + // if we have a cluster IP service, it must be listening on 443 and we can check that + foundPort := false + for _, port := range service.Spec.Ports { + if port.Port == 443 { + foundPort = true + } + } + if !foundPort { + availableCondition.Status = apiregistration.ConditionFalse + availableCondition.Reason = "ServicePortError" + availableCondition.Message = fmt.Sprintf("service/%s in %q is not listening on port 443", apiService.Spec.Service.Name, apiService.Spec.Service.Namespace) + apiregistration.SetAPIServiceCondition(apiService, availableCondition) + _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) + return err + } + endpoints, err := c.endpointsLister.Endpoints(apiService.Spec.Service.Namespace).Get(apiService.Spec.Service.Name) if apierrors.IsNotFound(err) { availableCondition.Status = apiregistration.ConditionFalse diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go index d0dbff24776..d82a0a0bd39 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller_test.go @@ -55,6 +55,9 @@ func newService(namespace, name string) *v1.Service { ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Spec: v1.ServiceSpec{ Type: v1.ServiceTypeClusterIP, + Ports: []v1.ServicePort{ + {Port: 443}, + }, }, } } @@ -110,6 +113,27 @@ func TestSync(t *testing.T) { Message: `service/bar in "foo" is not present`, }, }, + { + name: "service on bad port", + apiServiceName: "remote.group", + apiServices: []*apiregistration.APIService{newRemoteAPIService("remote.group")}, + services: []*v1.Service{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + Ports: []v1.ServicePort{ + {Port: 6443}, + }, + }, + }}, + endpoints: []*v1.Endpoints{newEndpointsWithAddress("foo", "bar")}, + expectedAvailability: apiregistration.APIServiceCondition{ + Type: apiregistration.Available, + Status: apiregistration.ConditionFalse, + Reason: "ServicePortError", + Message: `service/bar in "foo" is not listening on port 443`, + }, + }, { name: "no endpoints", apiServiceName: "remote.group", From cb09607536b178cddc831c6513bdba27b41c61f0 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 29 May 2018 08:34:47 -0400 Subject: [PATCH 244/307] fix the delete result being used --- pkg/kubectl/cmd/delete.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index 10c93998f05..cbe8abd0a28 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -273,7 +273,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { effectiveTimeout = 168 * time.Hour } waitOptions := kubectlwait.WaitOptions{ - ResourceFinder: genericclioptions.ResourceFinderForResult(o.Result), + ResourceFinder: genericclioptions.ResourceFinderForResult(r), DynamicClient: o.DynamicClient, Timeout: effectiveTimeout, From 11f65b2a30be2bfc014c563dc7a8e98ec3ccc1ff Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 25 May 2018 15:49:44 +0200 Subject: [PATCH 245/307] client-go: start fresh with owner file --- staging/src/k8s.io/client-go/OWNERS | 40 ++++------------------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/staging/src/k8s.io/client-go/OWNERS b/staging/src/k8s.io/client-go/OWNERS index 13af74c628a..b3991682993 100644 --- a/staging/src/k8s.io/client-go/OWNERS +++ b/staging/src/k8s.io/client-go/OWNERS @@ -1,45 +1,15 @@ approvers: - caesarxuchao - deads2k -- krousey - lavalamp +- liggitt - smarterclayton - sttts -- liggitt reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- yujuhong -- derekwaynecarr - caesarxuchao -- vishh -- mikedanese +- deads2k +- lavalamp - liggitt -- nikhiljindal -- gmarek -- erictune -- davidopp -- pmorie -- sttts -- dchen1107 -- saad-ali -- zmerlynn -- luxas -- janetkuo -- justinsb -- roberthbailey -- ncdc -- tallclair -- yifan-gu -- eparis -- mwielgus -- timothysc -- feiskyer -- jlowdermilk - soltysh -- piosz -- jsafrane -- awly +- sttts +- yliaog From 9c5bdd4b5c67024412a84e5fb09033cd38d21e9d Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 29 May 2018 10:46:54 -0400 Subject: [PATCH 246/307] add resource builder flags --- pkg/kubectl/cmd/wait/wait.go | 9 +- pkg/kubectl/cmd/wait/wait_test.go | 4 +- .../genericclioptions/builder_flags.go | 98 ++++++++++++++++--- .../genericclioptions/builder_flags_fake.go | 2 +- 4 files changed, 92 insertions(+), 21 deletions(-) diff --git a/pkg/kubectl/cmd/wait/wait.go b/pkg/kubectl/cmd/wait/wait.go index 9ccf3cb735e..37b5a4b66d1 100644 --- a/pkg/kubectl/cmd/wait/wait.go +++ b/pkg/kubectl/cmd/wait/wait.go @@ -53,9 +53,12 @@ type WaitFlags struct { // NewWaitFlags returns a default WaitFlags func NewWaitFlags(restClientGetter genericclioptions.RESTClientGetter, streams genericclioptions.IOStreams) *WaitFlags { return &WaitFlags{ - RESTClientGetter: restClientGetter, - PrintFlags: genericclioptions.NewPrintFlags("condition met"), - ResourceBuilderFlags: genericclioptions.NewResourceBuilderFlags(), + RESTClientGetter: restClientGetter, + PrintFlags: genericclioptions.NewPrintFlags("condition met"), + ResourceBuilderFlags: genericclioptions.NewResourceBuilderFlags(). + WithLabelSelector(""). + WithAllNamespaces(false). + WithLatest(), Timeout: 30 * time.Second, diff --git a/pkg/kubectl/cmd/wait/wait_test.go b/pkg/kubectl/cmd/wait/wait_test.go index 77d98e8d459..27446e840f6 100644 --- a/pkg/kubectl/cmd/wait/wait_test.go +++ b/pkg/kubectl/cmd/wait/wait_test.go @@ -219,7 +219,7 @@ func TestWaitForDeletion(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeClient := test.fakeClient() o := &WaitOptions{ - ResourceFinder: genericclioptions.NewSimpleResourceFinder(test.info), + ResourceFinder: genericclioptions.NewSimpleFakeResourceFinder(test.info), DynamicClient: fakeClient, Timeout: test.timeout, @@ -451,7 +451,7 @@ func TestWaitForCondition(t *testing.T) { t.Run(test.name, func(t *testing.T) { fakeClient := test.fakeClient() o := &WaitOptions{ - ResourceFinder: genericclioptions.NewSimpleResourceFinder(test.info), + ResourceFinder: genericclioptions.NewSimpleFakeResourceFinder(test.info), DynamicClient: fakeClient, Timeout: test.timeout, diff --git a/pkg/kubectl/genericclioptions/builder_flags.go b/pkg/kubectl/genericclioptions/builder_flags.go index 4648751c315..ad0ec7a6376 100644 --- a/pkg/kubectl/genericclioptions/builder_flags.go +++ b/pkg/kubectl/genericclioptions/builder_flags.go @@ -18,6 +18,7 @@ package genericclioptions import ( "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" ) @@ -29,8 +30,11 @@ type ResourceBuilderFlags struct { LabelSelector *string FieldSelector *string AllNamespaces *bool + All *bool + Local *bool - All bool + Scheme *runtime.Scheme + Latest bool } // NewResourceBuilderFlags returns a default ResourceBuilderFlags @@ -43,17 +47,54 @@ func NewResourceBuilderFlags() *ResourceBuilderFlags { Filenames: &filenames, Recursive: boolPtr(true), }, - - LabelSelector: strPtr(""), - AllNamespaces: boolPtr(false), } } +func (o *ResourceBuilderFlags) WithFile(recurse bool, files ...string) *ResourceBuilderFlags { + o.FileNameFlags = &FileNameFlags{ + Usage: "identifying the resource.", + Filenames: &files, + Recursive: boolPtr(recurse), + } + + return o +} + +func (o *ResourceBuilderFlags) WithLabelSelector(selector string) *ResourceBuilderFlags { + o.LabelSelector = &selector + return o +} + func (o *ResourceBuilderFlags) WithFieldSelector(selector string) *ResourceBuilderFlags { o.FieldSelector = &selector return o } +func (o *ResourceBuilderFlags) WithAllNamespaces(defaultVal bool) *ResourceBuilderFlags { + o.AllNamespaces = &defaultVal + return o +} + +func (o *ResourceBuilderFlags) WithAll(defaultVal bool) *ResourceBuilderFlags { + o.All = &defaultVal + return o +} + +func (o *ResourceBuilderFlags) WithLocal(defaultVal bool) *ResourceBuilderFlags { + o.Local = &defaultVal + return o +} + +func (o *ResourceBuilderFlags) WithScheme(scheme *runtime.Scheme) *ResourceBuilderFlags { + o.Scheme = scheme + return o +} + +func (o *ResourceBuilderFlags) WithLatest() *ResourceBuilderFlags { + o.Latest = true + return o +} + // AddFlags registers flags for finding resources func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { o.FileNameFlags.AddFlags(flagset) @@ -67,6 +108,12 @@ func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { if o.AllNamespaces != nil { flagset.BoolVar(o.AllNamespaces, "all-namespaces", *o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") } + if o.All != nil { + flagset.BoolVar(o.All, "all", *o.All, "Select all resources in the namespace of the specified resource types") + } + if o.Local != nil { + flagset.BoolVar(o.Local, "local", *o.Local, "If true, annotation will NOT contact api-server but run locally.") + } } // ToBuilder gives you back a resource finder to visit resources that are located @@ -74,24 +121,45 @@ func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, reso namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() builder := resource.NewBuilder(restClientGetter). - Unstructured(). - NamespaceParam(namespace).DefaultNamespace(). - ResourceTypeOrNameArgs(o.All, resources...) + NamespaceParam(namespace).DefaultNamespace() + + if o.Scheme != nil { + builder.WithScheme(o.Scheme, o.Scheme.PrioritizedVersionsAllGroups()...) + } else { + builder.Unstructured() + } + if o.FileNameFlags != nil { opts := o.FileNameFlags.ToOptions() - builder = builder.FilenameParam(enforceNamespace, &opts) + builder.FilenameParam(enforceNamespace, &opts) } - if o.LabelSelector != nil { - builder = builder.LabelSelectorParam(*o.LabelSelector) - } - if o.FieldSelector != nil { - builder = builder.FieldSelectorParam(*o.FieldSelector) + + if o.Local == nil || !*o.Local { + // resource type/name tuples only work non-local + if o.All != nil { + builder.ResourceTypeOrNameArgs(*o.All, resources...) + } else { + builder.ResourceTypeOrNameArgs(false, resources...) + } + // label selectors only work non-local (for now) + if o.LabelSelector != nil { + builder.LabelSelectorParam(*o.LabelSelector) + } + // field selectors only work non-local (forever) + if o.FieldSelector != nil { + builder.FieldSelectorParam(*o.FieldSelector) + } + // latest only works non-local (forever) + if o.Latest { + builder.Latest() + } + } else { + builder.Local() } return &ResourceFindBuilderWrapper{ builder: builder. - Latest(). - Flatten(). + Flatten(). // I think we're going to recommend this everywhere AddError(namespaceErr), } } diff --git a/pkg/kubectl/genericclioptions/builder_flags_fake.go b/pkg/kubectl/genericclioptions/builder_flags_fake.go index 15137c9e797..de968d8e5d7 100644 --- a/pkg/kubectl/genericclioptions/builder_flags_fake.go +++ b/pkg/kubectl/genericclioptions/builder_flags_fake.go @@ -21,7 +21,7 @@ import ( ) // NewSimpleResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided -func NewSimpleResourceFinder(infos ...*resource.Info) ResourceFinder { +func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder { return &fakeResourceFinder{ Infos: infos, } From b48f23b786f026edb407c27866c667df2809fe4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 29 May 2018 17:51:39 +0300 Subject: [PATCH 247/307] kubeadm: Move .NodeName and .CRISocket to a common sub-struct --- cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go | 18 +++- cmd/kubeadm/app/apis/kubeadm/types.go | 51 +++++---- .../app/apis/kubeadm/v1alpha1/conversion.go | 63 +++++++++++ .../app/apis/kubeadm/v1alpha2/defaults.go | 16 +-- .../app/apis/kubeadm/v1alpha2/types.go | 45 +++++--- .../app/apis/kubeadm/validation/validation.go | 20 +++- cmd/kubeadm/app/cmd/config.go | 2 +- cmd/kubeadm/app/cmd/init.go | 18 ++-- cmd/kubeadm/app/cmd/join.go | 14 ++- cmd/kubeadm/app/cmd/phases/kubeconfig.go | 2 +- cmd/kubeadm/app/cmd/phases/kubelet.go | 5 +- cmd/kubeadm/app/cmd/phases/markmaster.go | 4 +- cmd/kubeadm/app/constants/constants.go | 7 +- .../app/phases/certs/pkiutil/pki_helpers.go | 4 +- .../app/phases/kubeconfig/kubeconfig.go | 2 +- cmd/kubeadm/app/phases/kubelet/config.go | 17 +-- cmd/kubeadm/app/phases/kubelet/dynamic.go | 71 ++++-------- cmd/kubeadm/app/phases/kubelet/flags.go | 23 ++-- .../app/phases/markmaster/markmaster.go | 101 +++--------------- .../app/phases/selfhosting/selfhosting.go | 2 +- cmd/kubeadm/app/phases/upgrade/staticpods.go | 6 +- cmd/kubeadm/app/preflight/checks.go | 2 +- cmd/kubeadm/app/util/apiclient/idempotency.go | 52 +++++++++ cmd/kubeadm/app/util/config/masterconfig.go | 8 +- cmd/kubeadm/app/util/config/nodeconfig.go | 2 +- cmd/kubeadm/test/util.go | 7 +- 26 files changed, 309 insertions(+), 253 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go index 2ede4cb22ba..b640d6e533e 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/fuzzer.go @@ -21,6 +21,7 @@ import ( fuzz "github.com/google/gofuzz" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -43,7 +44,6 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.APIServerCertSANs = []string{"foo"} obj.Token = "foo" - obj.CRISocket = "foo" obj.TokenTTL = &metav1.Duration{Duration: 1 * time.Hour} obj.TokenUsages = []string{"foo"} obj.TokenGroups = []string{"foo"} @@ -59,6 +59,9 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { MountPath: "foo", Writable: false, }} + // Note: We don't set values here for obj.Etcd.External, as these are mutually exlusive. + // And to make sure the fuzzer doesn't set a random value for obj.Etcd.External, we let + // kubeadmapi.Etcd implement fuzz.Interface (we handle that ourselves) obj.Etcd.Local = &kubeadm.LocalEtcd{ Image: "foo", DataDir: "foo", @@ -66,9 +69,11 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { PeerCertSANs: []string{"foo"}, ExtraArgs: map[string]string{"foo": "foo"}, } - // Note: We don't set values here for obj.Etcd.External, as these are mutually exlusive. - // And to make sure the fuzzer doesn't set a random value for obj.Etcd.External, we let - // kubeadmapi.Etcd implement fuzz.Interface (we handle that ourselves) + obj.NodeRegistration = kubeadm.NodeRegistrationOptions{ + CRISocket: "foo", + Name: "foo", + Taints: []v1.Taint{}, + } obj.KubeletConfiguration = kubeadm.KubeletConfiguration{ BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{ StaticPodPath: "foo", @@ -139,8 +144,11 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} { obj.DiscoveryTimeout = &metav1.Duration{Duration: 1} obj.TLSBootstrapToken = "foo" obj.Token = "foo" - obj.CRISocket = "foo" obj.ClusterName = "foo" + obj.NodeRegistration = kubeadm.NodeRegistrationOptions{ + CRISocket: "foo", + Name: "foo", + } }, } } diff --git a/cmd/kubeadm/app/apis/kubeadm/types.go b/cmd/kubeadm/app/apis/kubeadm/types.go index ea062c62184..f83d63e9bd6 100644 --- a/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/types.go @@ -44,13 +44,9 @@ type MasterConfiguration struct { Networking Networking // KubernetesVersion is the target version of the control plane. KubernetesVersion string - // NodeName is the name of the node that will host the k8s control plane. - // Defaults to the hostname if not provided. - NodeName string - // NoTaintMaster will, if set, suppress the tainting of the - // master node allowing workloads to be run on it (e.g. in - // single node configurations). - NoTaintMaster bool + + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions // Token is used for establishing bidirectional trust between nodes and masters. // Used for joining nodes in the cluster. @@ -62,9 +58,6 @@ type MasterConfiguration struct { // Extra groups that this token will authenticate as when used for authentication TokenGroups []string - // CRISocket is used to retrieve container runtime info. - CRISocket string - // APIServerExtraArgs is a set of extra flags to pass to the API Server or override // default ones in form of =. // TODO: This is temporary and ideally we would like to switch all components to @@ -138,6 +131,28 @@ type API struct { BindPort int32 } +// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" +type NodeRegistrationOptions struct { + + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // This field is also used in the CommonName field of the kubelet's client certificate to the API server. + // Defaults to the hostname of the node if not provided. + Name string + + // CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use + CRISocket string + + // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an + // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. + Taints []v1.Taint + + // ExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + ExtraArgs map[string]string +} + // TokenDiscovery contains elements needed for token discovery. type TokenDiscovery struct { // ID is the first part of a bootstrap token. Considered public information. @@ -223,6 +238,9 @@ type ExternalEtcd struct { type NodeConfiguration struct { metav1.TypeMeta + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions + // CACertPath is the path to the SSL certificate authority used to // secure comunications between node and master. // Defaults to "/etc/kubernetes/pki/ca.crt". @@ -239,16 +257,11 @@ type NodeConfiguration struct { DiscoveryTokenAPIServers []string // DiscoveryTimeout modifies the discovery timeout DiscoveryTimeout *metav1.Duration - // NodeName is the name of the node to join the cluster. Defaults - // to the name of the host. - NodeName string // TLSBootstrapToken is a token used for TLS bootstrapping. // Defaults to Token. TLSBootstrapToken string // Token is used for both discovery and TLS bootstrapping. Token string - // CRISocket is used to retrieve container runtime info. - CRISocket string // The cluster name ClusterName string @@ -332,13 +345,13 @@ type CommonConfiguration interface { // GetCRISocket will return the CRISocket that is defined for the MasterConfiguration. // This is used internally to deduplicate the kubeadm preflight checks. func (cfg *MasterConfiguration) GetCRISocket() string { - return cfg.CRISocket + return cfg.NodeRegistration.CRISocket } // GetNodeName will return the NodeName that is defined for the MasterConfiguration. // This is used internally to deduplicate the kubeadm preflight checks. func (cfg *MasterConfiguration) GetNodeName() string { - return cfg.NodeName + return cfg.NodeRegistration.Name } // GetKubernetesVersion will return the KubernetesVersion that is defined for the MasterConfiguration. @@ -350,13 +363,13 @@ func (cfg *MasterConfiguration) GetKubernetesVersion() string { // GetCRISocket will return the CRISocket that is defined for the NodeConfiguration. // This is used internally to deduplicate the kubeadm preflight checks. func (cfg *NodeConfiguration) GetCRISocket() string { - return cfg.CRISocket + return cfg.NodeRegistration.CRISocket } // GetNodeName will return the NodeName that is defined for the NodeConfiguration. // This is used internally to deduplicate the kubeadm preflight checks. func (cfg *NodeConfiguration) GetNodeName() string { - return cfg.NodeName + return cfg.NodeRegistration.Name } // GetKubernetesVersion will return an empty string since KubernetesVersion is not a diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go index d5492c3333a..54a598af1c9 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/conversion.go @@ -20,15 +20,20 @@ import ( "reflect" "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) func addConversionFuncs(scheme *runtime.Scheme) error { // Add non-generated conversion functions err := scheme.AddConversionFuncs( Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration, + Convert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration, + Convert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration, + Convert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration, Convert_v1alpha1_Etcd_To_kubeadm_Etcd, Convert_kubeadm_Etcd_To_v1alpha1_Etcd, ) @@ -39,6 +44,8 @@ func addConversionFuncs(scheme *runtime.Scheme) error { return nil } +// Upgrades below + func Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in *MasterConfiguration, out *kubeadm.MasterConfiguration, s conversion.Scope) error { if err := autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in, out, s); err != nil { return err @@ -46,12 +53,26 @@ func Convert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in *Mas UpgradeCloudProvider(in, out) UpgradeAuthorizationModes(in, out) + UpgradeNodeRegistrationOptionsForMaster(in, out) // We don't support migrating information from the .PrivilegedPods field which was removed in v1alpha2 // We don't support migrating information from the .ImagePullPolicy field which was removed in v1alpha2 return nil } +func Convert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration(in *NodeConfiguration, out *kubeadm.NodeConfiguration, s conversion.Scope) error { + if err := autoConvert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration(in, out, s); err != nil { + return err + } + + // .NodeName has moved to .NodeRegistration.Name + out.NodeRegistration.Name = in.NodeName + // .CRISocket has moved to .NodeRegistration.CRISocket + out.NodeRegistration.CRISocket = in.CRISocket + + return nil +} + func Convert_v1alpha1_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { if err := autoConvert_v1alpha1_Etcd_To_kubeadm_Etcd(in, out, s); err != nil { return err @@ -123,3 +144,45 @@ func UpgradeAuthorizationModes(in *MasterConfiguration, out *kubeadm.MasterConfi out.APIServerExtraArgs["authorization-mode"] = strings.Join(in.AuthorizationModes, ",") } } + +func UpgradeNodeRegistrationOptionsForMaster(in *MasterConfiguration, out *kubeadm.MasterConfiguration) { + // .NodeName has moved to .NodeRegistration.Name + out.NodeRegistration.Name = in.NodeName + + // .CRISocket has moved to .NodeRegistration.CRISocket + out.NodeRegistration.CRISocket = in.CRISocket + + // Transfer the information from .NoTaintMaster to the new layout + if in.NoTaintMaster { + out.NodeRegistration.Taints = []v1.Taint{} + } else { + out.NodeRegistration.Taints = []v1.Taint{constants.MasterTaint} + } +} + +// Downgrades below + +func Convert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in *kubeadm.MasterConfiguration, out *MasterConfiguration, s conversion.Scope) error { + if err := autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in, out, s); err != nil { + return err + } + + // Converting from newer API version to an older API version isn't supported. This is here only for the roundtrip tests meanwhile. + out.NodeName = in.NodeRegistration.Name + out.CRISocket = in.NodeRegistration.CRISocket + out.NoTaintMaster = in.NodeRegistration.Taints != nil && len(in.NodeRegistration.Taints) == 0 + + return nil +} + +func Convert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration(in *kubeadm.NodeConfiguration, out *NodeConfiguration, s conversion.Scope) error { + if err := autoConvert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration(in, out, s); err != nil { + return err + } + + // Converting from newer API version to an older API version isn't supported. This is here only for the roundtrip tests meanwhile. + out.NodeName = in.NodeRegistration.Name + out.CRISocket = in.NodeRegistration.CRISocket + + return nil +} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go index f9c4000198f..d9467e27f87 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go @@ -103,10 +103,6 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { } } - if obj.CRISocket == "" { - obj.CRISocket = DefaultCRISocket - } - if len(obj.TokenUsages) == 0 { obj.TokenUsages = constants.DefaultTokenUsages } @@ -123,6 +119,7 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) { obj.ClusterName = DefaultClusterName } + SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) SetDefaults_KubeletConfiguration(obj) SetDefaults_Etcd(obj) SetDefaults_ProxyConfiguration(obj) @@ -168,9 +165,6 @@ func SetDefaults_NodeConfiguration(obj *NodeConfiguration) { if len(obj.DiscoveryToken) == 0 && len(obj.DiscoveryFile) == 0 { obj.DiscoveryToken = obj.Token } - if obj.CRISocket == "" { - obj.CRISocket = DefaultCRISocket - } // Make sure file URLs become paths if len(obj.DiscoveryFile) != 0 { u, err := url.Parse(obj.DiscoveryFile) @@ -186,6 +180,8 @@ func SetDefaults_NodeConfiguration(obj *NodeConfiguration) { if obj.ClusterName == "" { obj.ClusterName = DefaultClusterName } + + SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) } // SetDefaults_KubeletConfiguration assigns default values to kubelet @@ -237,6 +233,12 @@ func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { } } +func SetDefaults_NodeRegistrationOptions(obj *NodeRegistrationOptions) { + if obj.CRISocket == "" { + obj.CRISocket = DefaultCRISocket + } +} + // SetDefaults_AuditPolicyConfiguration sets default values for the AuditPolicyConfiguration func SetDefaults_AuditPolicyConfiguration(obj *MasterConfiguration) { if obj.AuditPolicyConfiguration.LogDir == "" { diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go index dd48f2b9277..f1ec6f51079 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go @@ -40,15 +40,12 @@ type MasterConfiguration struct { KubeletConfiguration KubeletConfiguration `json:"kubeletConfiguration"` // Networking holds configuration for the networking topology of the cluster. Networking Networking `json:"networking"` + + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"` + // KubernetesVersion is the target version of the control plane. KubernetesVersion string `json:"kubernetesVersion"` - // NodeName is the name of the node that will host the k8s control plane. - // Defaults to the hostname if not provided. - NodeName string `json:"nodeName"` - // NoTaintMaster will, if set, suppress the tainting of the - // master node allowing workloads to be run on it (e.g. in - // single node configurations). - NoTaintMaster bool `json:"noTaintMaster,omitempty"` // Token is used for establishing bidirectional trust between nodes and masters. // Used for joining nodes in the cluster. @@ -60,9 +57,6 @@ type MasterConfiguration struct { // Extra groups that this token will authenticate as when used for authentication TokenGroups []string `json:"tokenGroups,omitempty"` - // CRISocket is used to retrieve container runtime info. - CRISocket string `json:"criSocket,omitempty"` - // APIServerExtraArgs is a set of extra flags to pass to the API Server or override // default ones in form of =. // TODO: This is temporary and ideally we would like to switch all components to @@ -129,6 +123,28 @@ type API struct { BindPort int32 `json:"bindPort"` } +// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" +type NodeRegistrationOptions struct { + + // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. + // This field is also used in the CommonName field of the kubelet's client certificate to the API server. + // Defaults to the hostname of the node if not provided. + Name string `json:"name"` + + // CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use + CRISocket string `json:"criSocket"` + + // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process + // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an + // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. + Taints []v1.Taint `json:"taints,omitempty"` + + // ExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file + // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap + // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. + ExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` +} + // TokenDiscovery contains elements needed for token discovery. type TokenDiscovery struct { // ID is the first part of a bootstrap token. Considered public information. @@ -206,6 +222,9 @@ type ExternalEtcd struct { type NodeConfiguration struct { metav1.TypeMeta `json:",inline"` + // NodeRegistration holds fields that relate to registering the new master node to the cluster + NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"` + // CACertPath is the path to the SSL certificate authority used to // secure comunications between node and master. // Defaults to "/etc/kubernetes/pki/ca.crt". @@ -222,16 +241,12 @@ type NodeConfiguration struct { DiscoveryTokenAPIServers []string `json:"discoveryTokenAPIServers,omitempty"` // DiscoveryTimeout modifies the discovery timeout DiscoveryTimeout *metav1.Duration `json:"discoveryTimeout,omitempty"` - // NodeName is the name of the node to join the cluster. Defaults - // to the name of the host. - NodeName string `json:"nodeName"` // TLSBootstrapToken is a token used for TLS bootstrapping. // Defaults to Token. TLSBootstrapToken string `json:"tlsBootstrapToken"` // Token is used for both discovery and TLS bootstrapping. Token string `json:"token"` - // CRISocket is used to retrieve container runtime info. - CRISocket string `json:"criSocket,omitempty"` + // ClusterName is the name for the cluster in kubeconfig. ClusterName string `json:"clusterName,omitempty"` diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index 116bfce6c8d..f51f539ac8f 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -54,7 +54,7 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs = append(allErrs, ValidateNetworking(&c.Networking, field.NewPath("networking"))...) allErrs = append(allErrs, ValidateCertSANs(c.APIServerCertSANs, field.NewPath("apiServerCertSANs"))...) allErrs = append(allErrs, ValidateAbsolutePath(c.CertificatesDir, field.NewPath("certificatesDir"))...) - allErrs = append(allErrs, ValidateNodeName(c.NodeName, field.NewPath("nodeName"))...) + allErrs = append(allErrs, ValidateNodeRegistrationOptions(&c.NodeRegistration, field.NewPath("nodeRegistration"))...) allErrs = append(allErrs, ValidateToken(c.Token, field.NewPath("token"))...) allErrs = append(allErrs, ValidateTokenUsages(c.TokenUsages, field.NewPath("tokenUsages"))...) allErrs = append(allErrs, ValidateTokenGroups(c.TokenUsages, c.TokenGroups, field.NewPath("tokenGroups"))...) @@ -62,9 +62,7 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs = append(allErrs, ValidateAPIEndpoint(&c.API, field.NewPath("api"))...) allErrs = append(allErrs, ValidateProxy(c.KubeProxy.Config, field.NewPath("kubeProxy").Child("config"))...) allErrs = append(allErrs, ValidateEtcd(&c.Etcd, field.NewPath("etcd"))...) - if features.Enabled(c.FeatureGates, features.DynamicKubeletConfig) { - allErrs = append(allErrs, ValidateKubeletConfiguration(&c.KubeletConfiguration, field.NewPath("kubeletConfiguration"))...) - } + allErrs = append(allErrs, ValidateKubeletConfiguration(&c.KubeletConfiguration, field.NewPath("kubeletConfiguration"))...) return allErrs } @@ -86,6 +84,7 @@ func ValidateProxy(c *kubeproxyconfigv1alpha1.KubeProxyConfiguration, fldPath *f func ValidateNodeConfiguration(c *kubeadm.NodeConfiguration) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateDiscovery(c)...) + allErrs = append(allErrs, ValidateNodeRegistrationOptions(&c.NodeRegistration, field.NewPath("nodeRegistration"))...) if !filepath.IsAbs(c.CACertPath) || !strings.HasSuffix(c.CACertPath, ".crt") { allErrs = append(allErrs, field.Invalid(field.NewPath("caCertPath"), c.CACertPath, "the ca certificate path must be an absolute path")) @@ -93,6 +92,15 @@ func ValidateNodeConfiguration(c *kubeadm.NodeConfiguration) field.ErrorList { return allErrs } +// ValidateNodeRegistrationOptions validates the NodeRegistrationOptions object +func ValidateNodeRegistrationOptions(nro *kubeadm.NodeRegistrationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNodeName(nro.Name, fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateAbsolutePath(nro.CRISocket, fldPath.Child("criSocket"))...) + // TODO: Maybe validate .Taints as well in the future using something like validateNodeTaints() in pkg/apis/core/validation + return allErrs +} + // ValidateDiscovery validates discovery related configuration and collects all encountered errors func ValidateDiscovery(c *kubeadm.NodeConfiguration) field.ErrorList { allErrs := field.ErrorList{} @@ -422,6 +430,10 @@ func ValidateIgnorePreflightErrors(ignorePreflightErrors []string, skipPreflight func ValidateKubeletConfiguration(c *kubeadm.KubeletConfiguration, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} + if c.BaseConfig == nil { + return allErrs + } + scheme, _, err := kubeletscheme.NewSchemeAndCodecs() if err != nil { allErrs = append(allErrs, field.Invalid(fldPath, "kubeletConfiguration", err.Error())) diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index 62f89e92788..8b45f8407fe 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -401,5 +401,5 @@ func AddImagesCommonConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.M // AddImagesPullFlags adds flags related to the `kubeadm config images pull` command func AddImagesPullFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.MasterConfiguration) { - flagSet.StringVar(&cfg.CRISocket, "cri-socket-path", cfg.CRISocket, "Path to the CRI socket.") + flagSet.StringVar(&cfg.NodeRegistration.CRISocket, "cri-socket-path", cfg.NodeRegistration.CRISocket, "Path to the CRI socket.") } diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 50d3beaa62a..68df174b7b9 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -189,7 +189,7 @@ func AddInitConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.MasterCon `Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.`, ) flagSet.StringVar( - &cfg.NodeName, "node-name", cfg.NodeName, + &cfg.NodeRegistration.Name, "node-name", cfg.NodeRegistration.Name, `Specify the node name.`, ) flagSet.StringVar( @@ -201,7 +201,7 @@ func AddInitConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.MasterCon "The duration before the bootstrap token is automatically deleted. If set to '0', the token will never expire.", ) flagSet.StringVar( - &cfg.CRISocket, "cri-socket", cfg.CRISocket, + &cfg.NodeRegistration.CRISocket, "cri-socket", cfg.NodeRegistration.CRISocket, `Specify the CRI socket to connect to.`, ) flagSet.StringVar(featureGatesString, "feature-gates", *featureGatesString, "A set of key=value pairs that describe feature gates for various features. "+ @@ -276,8 +276,10 @@ type Init struct { // Run executes master node provisioning, including certificates, needed static pod manifests, etc. func (i *Init) Run(out io.Writer) error { - // Write env file with flags for the kubelet to use - if err := kubeletphase.WriteKubeletDynamicEnvFile(i.cfg); err != nil { + // Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master, + // as we handle that ourselves in the markmaster phase + // TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase? + if err := kubeletphase.WriteKubeletDynamicEnvFile(&i.cfg.NodeRegistration, false); err != nil { return err } @@ -362,7 +364,7 @@ func (i *Init) Run(out io.Writer) error { } // Write the kubelet configuration to disk. - if err := kubeletphase.WriteConfigToDisk(i.cfg.KubeletConfiguration.BaseConfig, kubeletVersion); err != nil { + if err := kubeletphase.WriteConfigToDisk(i.cfg.KubeletConfiguration.BaseConfig); err != nil { return fmt.Errorf("error writing kubelet configuration to disk: %v", err) } @@ -411,7 +413,7 @@ func (i *Init) Run(out io.Writer) error { // PHASE 4: Mark the master with the right label/taint glog.V(1).Infof("[init] marking the master with right label") - if err := markmasterphase.MarkMaster(client, i.cfg.NodeName, !i.cfg.NoTaintMaster); err != nil { + if err := markmasterphase.MarkMaster(client, i.cfg.NodeRegistration.Name, i.cfg.NodeRegistration.Taints); err != nil { return fmt.Errorf("error marking master: %v", err) } @@ -419,7 +421,7 @@ func (i *Init) Run(out io.Writer) error { // This feature is disabled by default, as it is alpha still if features.Enabled(i.cfg.FeatureGates, features.DynamicKubeletConfig) { // Enable dynamic kubelet configuration for the node. - if err := kubeletphase.EnableDynamicConfigForNode(client, i.cfg.NodeName, kubeletVersion); err != nil { + if err := kubeletphase.EnableDynamicConfigForNode(client, i.cfg.NodeRegistration.Name, kubeletVersion); err != nil { return fmt.Errorf("error enabling dynamic kubelet configuration: %v", err) } } @@ -507,7 +509,7 @@ func (i *Init) Run(out io.Writer) error { func createClient(cfg *kubeadmapi.MasterConfiguration, dryRun bool) (clientset.Interface, error) { if dryRun { // If we're dry-running; we should create a faked client that answers some GETs in order to be able to do the full init flow and just logs the rest of requests - dryRunGetter := apiclient.NewInitDryRunGetter(cfg.NodeName, cfg.Networking.ServiceSubnet) + dryRunGetter := apiclient.NewInitDryRunGetter(cfg.NodeRegistration.Name, cfg.Networking.ServiceSubnet) return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil } diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 77fcb6697ca..0a3239fe4e0 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -155,7 +155,7 @@ func AddJoinConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.NodeConfi &cfg.DiscoveryToken, "discovery-token", "", "A token used to validate cluster information fetched from the master.") flagSet.StringVar( - &cfg.NodeName, "node-name", "", + &cfg.NodeRegistration.Name, "node-name", cfg.NodeRegistration.Name, "Specify the node name.") flagSet.StringVar( &cfg.TLSBootstrapToken, "tls-bootstrap-token", "", @@ -174,7 +174,7 @@ func AddJoinConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1alpha2.NodeConfi "A set of key=value pairs that describe feature gates for various features. "+ "Options are:\n"+strings.Join(features.KnownFeatures(&features.InitFeatureGates), "\n")) flagSet.StringVar( - &cfg.CRISocket, "cri-socket", cfg.CRISocket, + &cfg.NodeRegistration.CRISocket, "cri-socket", cfg.NodeRegistration.CRISocket, `Specify the CRI socket to connect to.`, ) } @@ -204,7 +204,7 @@ type Join struct { // NewJoin instantiates Join struct with given arguments func NewJoin(cfgPath string, args []string, defaultcfg *kubeadmapiv1alpha2.NodeConfiguration, ignorePreflightErrors sets.String) (*Join, error) { - if defaultcfg.NodeName == "" { + if defaultcfg.NodeRegistration.Name == "" { glog.V(1).Infoln("[join] found NodeName empty") glog.V(1).Infoln("[join] considered OS hostname as NodeName") } @@ -231,6 +231,12 @@ func NewJoin(cfgPath string, args []string, defaultcfg *kubeadmapiv1alpha2.NodeC // Run executes worker node provisioning and tries to join an existing cluster. func (j *Join) Run(out io.Writer) error { + + // Write env file with flags for the kubelet to use. Also register taints + if err := kubeletphase.WriteKubeletDynamicEnvFile(&j.cfg.NodeRegistration, true); err != nil { + return err + } + glog.V(1).Infoln("[join] retrieving KubeConfig objects") cfg, err := discovery.For(j.cfg) if err != nil { @@ -273,7 +279,7 @@ func (j *Join) Run(out io.Writer) error { return err } - if err := kubeletphase.EnableDynamicConfigForNode(client, j.cfg.NodeName, kubeletVersion); err != nil { + if err := kubeletphase.EnableDynamicConfigForNode(client, j.cfg.NodeRegistration.Name, kubeletVersion); err != nil { return fmt.Errorf("error consuming base kubelet configuration: %v", err) } } diff --git a/cmd/kubeadm/app/cmd/phases/kubeconfig.go b/cmd/kubeadm/app/cmd/phases/kubeconfig.go index 3f3e185299d..7a0dcb0ce5b 100644 --- a/cmd/kubeadm/app/cmd/phases/kubeconfig.go +++ b/cmd/kubeadm/app/cmd/phases/kubeconfig.go @@ -184,7 +184,7 @@ func getKubeConfigSubCommands(out io.Writer, outDir, defaultKubernetesVersion st cmd.Flags().Int32Var(&cfg.API.BindPort, "apiserver-bind-port", cfg.API.BindPort, "The port the API server is accessible on") cmd.Flags().StringVar(&outDir, "kubeconfig-dir", outDir, "The path where to save the kubeconfig file") if properties.use == "all" || properties.use == "kubelet" { - cmd.Flags().StringVar(&cfg.NodeName, "node-name", cfg.NodeName, `The node name that should be used for the kubelet client certificate`) + cmd.Flags().StringVar(&cfg.NodeRegistration.Name, "node-name", cfg.NodeRegistration.Name, `The node name that should be used for the kubelet client certificate`) } if properties.use == "user" { cmd.Flags().StringVar(&token, "token", token, "The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates") diff --git a/cmd/kubeadm/app/cmd/phases/kubelet.go b/cmd/kubeadm/app/cmd/phases/kubelet.go index c07257d9b7f..bd979d60492 100644 --- a/cmd/kubeadm/app/cmd/phases/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/kubelet.go @@ -134,9 +134,6 @@ func NewCmdKubeletWriteConfigToDisk(kubeConfigFile *string) *cobra.Command { kubeadmutil.CheckErr(fmt.Errorf("The --kubelet-version argument is required")) } - kubeletVersion, err := version.ParseSemantic(kubeletVersionStr) - kubeadmutil.CheckErr(err) - client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) kubeadmutil.CheckErr(err) @@ -144,7 +141,7 @@ func NewCmdKubeletWriteConfigToDisk(kubeConfigFile *string) *cobra.Command { internalcfg, err := configutil.FetchConfigFromFileOrCluster(client, os.Stdout, "kubelet", cfgPath) kubeadmutil.CheckErr(err) - err = kubeletphase.WriteConfigToDisk(internalcfg.KubeletConfiguration.BaseConfig, kubeletVersion) + err = kubeletphase.WriteConfigToDisk(internalcfg.KubeletConfiguration.BaseConfig) kubeadmutil.CheckErr(err) }, } diff --git a/cmd/kubeadm/app/cmd/phases/markmaster.go b/cmd/kubeadm/app/cmd/phases/markmaster.go index 5a2f11ddbb5..13ec0a314ce 100644 --- a/cmd/kubeadm/app/cmd/phases/markmaster.go +++ b/cmd/kubeadm/app/cmd/phases/markmaster.go @@ -75,14 +75,14 @@ func NewCmdMarkMaster() *cobra.Command { client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) kubeadmutil.CheckErr(err) - err = markmasterphase.MarkMaster(client, internalcfg.NodeName, !internalcfg.NoTaintMaster) + err = markmasterphase.MarkMaster(client, internalcfg.NodeRegistration.Name, internalcfg.NodeRegistration.Taints) kubeadmutil.CheckErr(err) }, } cmd.Flags().StringVar(&kubeConfigFile, "kubeconfig", "/etc/kubernetes/admin.conf", "The KubeConfig file to use when talking to the cluster") cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file. WARNING: Usage of a configuration file is experimental") - cmd.Flags().StringVar(&cfg.NodeName, "node-name", cfg.NodeName, `The node name to which label and taints should apply`) + cmd.Flags().StringVar(&cfg.NodeRegistration.Name, "node-name", cfg.NodeRegistration.Name, `The node name to which label and taints should apply`) return cmd } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index afd5ce5ba5c..3a6f4233f4f 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -165,8 +165,8 @@ const ( APICallRetryInterval = 500 * time.Millisecond // DiscoveryRetryInterval specifies how long kubeadm should wait before retrying to connect to the master when doing discovery DiscoveryRetryInterval = 5 * time.Second - // MarkMasterTimeout specifies how long kubeadm should wait for applying the label and taint on the master before timing out - MarkMasterTimeout = 2 * time.Minute + // PatchNodeTimeout specifies how long kubeadm should wait for applying the label and taint on the master before timing out + PatchNodeTimeout = 2 * time.Minute // UpdateNodeTimeout specifies how long kubeadm should wait for updating node with the initial remote configuration of kubelet before timing out UpdateNodeTimeout = 2 * time.Minute @@ -295,9 +295,6 @@ var ( // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports MinimumKubeletVersion = version.MustParseSemantic("v1.10.0") - // MinimumKubeletConfigVersion specifies the minimum version of Kubernetes where kubeadm supports specifying --config to the kubelet - MinimumKubeletConfigVersion = version.MustParseSemantic("v1.11.0-alpha.1") - // SupportedEtcdVersion lists officially supported etcd versions with corresponding kubernetes releases SupportedEtcdVersion = map[uint8]string{ 10: "3.1.12", diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go index 39ca139cba7..337656afd73 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go +++ b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go @@ -275,7 +275,7 @@ func GetAPIServerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltNam // create AltNames with defaults DNSNames/IPs altNames := &certutil.AltNames{ DNSNames: []string{ - cfg.NodeName, + cfg.NodeRegistration.Name, "kubernetes", "kubernetes.default", "kubernetes.default.svc", @@ -336,7 +336,7 @@ func GetEtcdPeerAltNames(cfg *kubeadmapi.MasterConfiguration) (*certutil.AltName // create AltNames with defaults DNSNames/IPs altNames := &certutil.AltNames{ - DNSNames: []string{cfg.NodeName}, + DNSNames: []string{cfg.NodeRegistration.Name}, IPs: []net.IP{advertiseAddress}, } diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index 02194885a97..6a21c8f772d 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -160,7 +160,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.MasterConfiguration) (map[string]*kubeCo kubeadmconstants.KubeletKubeConfigFileName: { CACert: caCert, APIServer: masterEndpoint, - ClientName: fmt.Sprintf("system:node:%s", cfg.NodeName), + ClientName: fmt.Sprintf("system:node:%s", cfg.NodeRegistration.Name), ClientCertAuth: &clientCertAuth{ CAKey: caKey, Organizations: []string{kubeadmconstants.NodesGroup}, diff --git a/cmd/kubeadm/app/phases/kubelet/config.go b/cmd/kubeadm/app/phases/kubelet/config.go index 5503a16a7fc..e515d649860 100644 --- a/cmd/kubeadm/app/phases/kubelet/config.go +++ b/cmd/kubeadm/app/phases/kubelet/config.go @@ -37,12 +37,7 @@ import ( // WriteConfigToDisk writes the kubelet config object down to a file // Used at "kubeadm init" and "kubeadm upgrade" time -func WriteConfigToDisk(kubeletConfig *kubeletconfigv1beta1.KubeletConfiguration, kubeletVersion *version.Version) error { - - // If the kubelet version is v1.10.x, exit - if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { - return nil - } +func WriteConfigToDisk(kubeletConfig *kubeletconfigv1beta1.KubeletConfiguration) error { kubeletBytes, err := getConfigBytes(kubeletConfig) if err != nil { @@ -60,11 +55,6 @@ func CreateConfigMap(cfg *kubeadmapi.MasterConfiguration, client clientset.Inter return err } - // If Kubernetes version is v1.10.x, exit - if k8sVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { - return nil - } - configMapName := configMapName(k8sVersion) fmt.Printf("[kubelet] Creating a ConfigMap %q in namespace %s with the configuration for the kubelets in the cluster\n", configMapName, metav1.NamespaceSystem) @@ -132,11 +122,6 @@ func createConfigMapRBACRules(client clientset.Interface, k8sVersion *version.Ve // Used at "kubeadm join" time func DownloadConfig(kubeletKubeConfig string, kubeletVersion *version.Version) error { - // If the kubelet version is v1.10.x, exit - if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { - return nil - } - // Download the ConfigMap from the cluster based on what version the kubelet is configMapName := configMapName(kubeletVersion) diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic.go b/cmd/kubeadm/app/phases/kubelet/dynamic.go index b65d7460370..2f49209182a 100644 --- a/cmd/kubeadm/app/phases/kubelet/dynamic.go +++ b/cmd/kubeadm/app/phases/kubelet/dynamic.go @@ -17,19 +17,17 @@ limitations under the License. package kubelet import ( - "encoding/json" "fmt" "os" "path/filepath" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" "k8s.io/kubernetes/pkg/util/version" ) @@ -39,64 +37,33 @@ import ( // This func is ONLY run if the user enables the `DynamicKubeletConfig` feature gate, which is by default off func EnableDynamicConfigForNode(client clientset.Interface, nodeName string, kubeletVersion *version.Version) error { - // If the kubelet version is v1.10.x, exit - if kubeletVersion.LessThan(kubeadmconstants.MinimumKubeletConfigVersion) { - return nil - } - configMapName := configMapName(kubeletVersion) fmt.Printf("[kubelet] Enabling Dynamic Kubelet Config for Node %q; config sourced from ConfigMap %q in namespace %s\n", nodeName, configMapName, metav1.NamespaceSystem) fmt.Println("[kubelet] WARNING: The Dynamic Kubelet Config feature is alpha and off by default. It hasn't been well-tested yet at this stage, use with caution.") + kubeletConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("couldn't get the kubelet configuration ConfigMap: %v", err) + } + // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. - return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.UpdateNodeTimeout, func() (bool, error) { - node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - oldData, err := json.Marshal(node) - if err != nil { - return false, err - } - - kubeletCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(configMapName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - node.Spec.ConfigSource = &v1.NodeConfigSource{ - ConfigMap: &v1.ConfigMapNodeConfigSource{ - Name: configMapName, - Namespace: metav1.NamespaceSystem, - UID: kubeletCfg.UID, - KubeletConfigKey: kubeadmconstants.KubeletBaseConfigurationConfigMapKey, - }, - } - - newData, err := json.Marshal(node) - if err != nil { - return false, err - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) - if err != nil { - return false, err - } - - if _, err := client.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes); err != nil { - if apierrs.IsConflict(err) { - fmt.Println("Temporarily unable to update node metadata due to conflict (will retry)") - return false, nil - } - return false, err - } - - return true, nil + return apiclient.PatchNode(client, nodeName, func(n *v1.Node) { + patchNodeForDynamicConfig(n, configMapName, kubeletConfigMap.UID) }) } +func patchNodeForDynamicConfig(n *v1.Node, configMapName string, configMapUID types.UID) { + n.Spec.ConfigSource = &v1.NodeConfigSource{ + ConfigMap: &v1.ConfigMapNodeConfigSource{ + Name: configMapName, + Namespace: metav1.NamespaceSystem, + UID: configMapUID, + KubeletConfigKey: kubeadmconstants.KubeletBaseConfigurationConfigMapKey, + }, + } +} + // GetLocalNodeTLSBootstrappedClient waits for the kubelet to perform the TLS bootstrap // and then creates a client from config file /etc/kubernetes/kubelet.conf func GetLocalNodeTLSBootstrappedClient() (clientset.Interface, error) { diff --git a/cmd/kubeadm/app/phases/kubelet/flags.go b/cmd/kubeadm/app/phases/kubelet/flags.go index 167966793aa..926c4b5022e 100644 --- a/cmd/kubeadm/app/phases/kubelet/flags.go +++ b/cmd/kubeadm/app/phases/kubelet/flags.go @@ -31,10 +31,9 @@ import ( // WriteKubeletDynamicEnvFile writes a environment file with dynamic flags to the kubelet. // Used at "kubeadm init" and "kubeadm join" time. -func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.MasterConfiguration) error { +func WriteKubeletDynamicEnvFile(nodeRegOpts *kubeadmapi.NodeRegistrationOptions, registerTaintsUsingFlags bool) error { - // TODO: Pass through extra arguments from the config file here in the future - argList := kubeadmutil.BuildArgumentListFromMap(buildKubeletArgMap(cfg), map[string]string{}) + argList := kubeadmutil.BuildArgumentListFromMap(buildKubeletArgMap(nodeRegOpts, registerTaintsUsingFlags), nodeRegOpts.ExtraArgs) envFileContent := fmt.Sprintf("%s=%s\n", constants.KubeletEnvFileVariableName, strings.Join(argList, " ")) return writeKubeletFlagBytesToDisk([]byte(envFileContent)) @@ -42,20 +41,28 @@ func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.MasterConfiguration) error { // buildKubeletArgMap takes a MasterConfiguration object and builds based on that a string-string map with flags // that should be given to the local kubelet daemon. -func buildKubeletArgMap(cfg *kubeadmapi.MasterConfiguration) map[string]string { +func buildKubeletArgMap(nodeRegOpts *kubeadmapi.NodeRegistrationOptions, registerTaintsUsingFlags bool) map[string]string { kubeletFlags := map[string]string{} - if cfg.CRISocket == kubeadmapiv1alpha2.DefaultCRISocket { + if nodeRegOpts.CRISocket == kubeadmapiv1alpha2.DefaultCRISocket { // These flags should only be set when running docker kubeletFlags["network-plugin"] = "cni" kubeletFlags["cni-conf-dir"] = "/etc/cni/net.d" kubeletFlags["cni-bin-dir"] = "/opt/cni/bin" } else { kubeletFlags["container-runtime"] = "remote" - kubeletFlags["container-runtime-endpoint"] = cfg.CRISocket + kubeletFlags["container-runtime-endpoint"] = nodeRegOpts.CRISocket } - // TODO: Add support for registering custom Taints and Labels - // TODO: Add support for overriding flags with ExtraArgs + + if registerTaintsUsingFlags && nodeRegOpts.Taints != nil && len(nodeRegOpts.Taints) > 0 { + taintStrs := []string{} + for _, taint := range nodeRegOpts.Taints { + taintStrs = append(taintStrs, taint.ToString()) + } + + kubeletFlags["register-with-taints"] = strings.Join(taintStrs, ",") + } + // TODO: Pass through --hostname-override if a custom name is used? // TODO: Check if `systemd-resolved` is running, and set `--resolv-conf` based on that // TODO: Conditionally set `--cgroup-driver` to either `systemd` or `cgroupfs` diff --git a/cmd/kubeadm/app/phases/markmaster/markmaster.go b/cmd/kubeadm/app/phases/markmaster/markmaster.go index 9f8f52237d9..39ee9174da5 100644 --- a/cmd/kubeadm/app/phases/markmaster/markmaster.go +++ b/cmd/kubeadm/app/phases/markmaster/markmaster.go @@ -17,107 +17,30 @@ limitations under the License. package markmaster import ( - "encoding/json" - "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" + "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" ) // MarkMaster taints the master and sets the master label -func MarkMaster(client clientset.Interface, masterName string, taint bool) error { +func MarkMaster(client clientset.Interface, masterName string, taints []v1.Taint) error { - if taint { - glog.Infof("[markmaster] will mark node %s as master by adding a label and a taint\n", masterName) - } else { - glog.Infof("[markmaster] will mark node %s as master by adding a label\n", masterName) + glog.Infof("[markmaster] Marking the node %s as master by adding the label \"%s=''\"\n", masterName, constants.LabelNodeRoleMaster) + + if taints != nil && len(taints) > 0 { + glog.Infof("[markmaster] Marking the node %s as master by adding the taints %v\n", masterName, taints) } - // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. - return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.MarkMasterTimeout, func() (bool, error) { - // First get the node object - n, err := client.CoreV1().Nodes().Get(masterName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - // The node may appear to have no labels at first, - // so we wait for it to get hostname label. - if _, found := n.ObjectMeta.Labels[kubeletapis.LabelHostname]; !found { - return false, nil - } - - oldData, err := json.Marshal(n) - if err != nil { - return false, err - } - - // The master node should be tainted and labelled accordingly - markMasterNode(n, taint) - - newData, err := json.Marshal(n) - if err != nil { - return false, err - } - - patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) - if err != nil { - return false, err - } - - if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil { - if apierrs.IsConflict(err) { - fmt.Println("[markmaster] Temporarily unable to update master node metadata due to conflict (will retry)") - return false, nil - } - return false, err - } - - if taint { - fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "") - } else { - fmt.Printf("[markmaster] Master %s labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "") - } - - return true, nil + return apiclient.PatchNode(client, masterName, func(n *v1.Node) { + markMasterNode(n, taints) }) } -func markMasterNode(n *v1.Node, taint bool) { - n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = "" - if taint { - addTaintIfNotExists(n, kubeadmconstants.MasterTaint) - } else { - delTaintIfExists(n, kubeadmconstants.MasterTaint) - } -} - -func addTaintIfNotExists(n *v1.Node, t v1.Taint) { - for _, taint := range n.Spec.Taints { - if taint == t { - return - } - } - - n.Spec.Taints = append(n.Spec.Taints, t) -} - -func delTaintIfExists(n *v1.Node, t v1.Taint) { - var taints []v1.Taint - for _, taint := range n.Spec.Taints { - if taint == t { - continue - } - taints = append(taints, t) - } +func markMasterNode(n *v1.Node, taints []v1.Taint) { + n.ObjectMeta.Labels[constants.LabelNodeRoleMaster] = "" + // TODO: Append taints, don't override? n.Spec.Taints = taints } diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go index 3d20c959dd8..5bd90886e4e 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go @@ -118,7 +118,7 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea // Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to // remove the Static Pod (or the mirror Pod respectively). This implicitly also tests that the API server endpoint is healthy, // because this blocks until the API server returns a 404 Not Found when getting the Static Pod - staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeName) + staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeRegistration.Name) if err := waiter.WaitForPodToDisappear(staticPodName); err != nil { return err } diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index a041c9669cf..77056dcadda 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -206,7 +206,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP // notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy // If we don't do this, there is a case where we remove the Static Pod manifest, kubelet is slow to react, kubeadm checks the // API endpoint below of the OLD Static Pod component and proceeds quickly enough, which might lead to unexpected results. - if err := waiter.WaitForStaticPodHashChange(cfg.NodeName, component, beforePodHash); err != nil { + if err := waiter.WaitForStaticPodHashChange(cfg.NodeRegistration.Name, component, beforePodHash); err != nil { return rollbackOldManifests(recoverManifests, err, pathMgr, recoverEtcd) } @@ -266,7 +266,7 @@ func performEtcdStaticPodUpgrade(waiter apiclient.Waiter, pathMgr StaticPodPathM return false, nil } - beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeName, constants.Etcd) + beforeEtcdPodHash, err := waiter.WaitForStaticPodSingleHash(cfg.NodeRegistration.Name, constants.Etcd) if err != nil { return true, fmt.Errorf("failed to get etcd pod's hash: %v", err) } @@ -376,7 +376,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager var isTLSUpgrade bool var isExternalEtcd bool - beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeName) + beforePodHashMap, err := waiter.WaitForStaticPodControlPlaneHashes(cfg.NodeRegistration.Name) if err != nil { return err } diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 922ca00825d..6f0cdf4c876 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -906,7 +906,7 @@ func RunInitMasterChecks(execer utilsexec.Interface, cfg *kubeadmapi.MasterConfi checks = addCommonChecks(execer, cfg, checks) // Check ipvs required kernel module once we use ipvs kube-proxy mode - if cfg.KubeProxy.Config.Mode == ipvsutil.IPVSProxyMode { + if cfg.KubeProxy.Config != nil && cfg.KubeProxy.Config.Mode == ipvsutil.IPVSProxyMode { checks = append(checks, ipvsutil.RequiredIPVSKernelModulesAvailableCheck{Executor: execer}, ) diff --git a/cmd/kubeadm/app/util/apiclient/idempotency.go b/cmd/kubeadm/app/util/apiclient/idempotency.go index ffe42df077a..bc36f65ebdd 100644 --- a/cmd/kubeadm/app/util/apiclient/idempotency.go +++ b/cmd/kubeadm/app/util/apiclient/idempotency.go @@ -17,6 +17,7 @@ limitations under the License. package apiclient import ( + "encoding/json" "fmt" apps "k8s.io/api/apps/v1" @@ -24,7 +25,12 @@ import ( rbac "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // TODO: We should invent a dynamic mechanism for this using the dynamic client instead of hard-coding these functions per-type @@ -186,3 +192,49 @@ func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterRoleBin } return nil } + +// PatchNode tries to patch a node using the following client, executing patchFn for the actual mutating logic +func PatchNode(client clientset.Interface, nodeName string, patchFn func(*v1.Node)) error { + // Loop on every false return. Return with an error if raised. Exit successfully if true is returned. + return wait.Poll(constants.APICallRetryInterval, constants.PatchNodeTimeout, func() (bool, error) { + // First get the node object + n, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return false, nil + } + + // The node may appear to have no labels at first, + // so we wait for it to get hostname label. + if _, found := n.ObjectMeta.Labels[kubeletapis.LabelHostname]; !found { + return false, nil + } + + oldData, err := json.Marshal(n) + if err != nil { + return false, err + } + + // Execute the mutating function + patchFn(n) + + newData, err := json.Marshal(n) + if err != nil { + return false, err + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return false, err + } + + if _, err := client.CoreV1().Nodes().Patch(n.Name, types.StrategicMergePatchType, patchBytes); err != nil { + if apierrors.IsConflict(err) { + fmt.Println("[patchnode] Temporarily unable to update node metadata due to conflict (will retry)") + return false, nil + } + return false, err + } + + return true, nil + }) +} diff --git a/cmd/kubeadm/app/util/config/masterconfig.go b/cmd/kubeadm/app/util/config/masterconfig.go index ec2f84dec46..a78149e211c 100644 --- a/cmd/kubeadm/app/util/config/masterconfig.go +++ b/cmd/kubeadm/app/util/config/masterconfig.go @@ -23,6 +23,7 @@ import ( "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" netutil "k8s.io/apimachinery/pkg/util/net" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -72,7 +73,12 @@ func SetInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { } } - cfg.NodeName = node.GetHostname(cfg.NodeName) + cfg.NodeRegistration.Name = node.GetHostname(cfg.NodeRegistration.Name) + + // Only if the slice is nil, we should append the master taint. This allows the user to specify an empty slice for no default master taint + if cfg.NodeRegistration.Taints == nil { + cfg.NodeRegistration.Taints = []v1.Taint{kubeadmconstants.MasterTaint} + } return nil } diff --git a/cmd/kubeadm/app/util/config/nodeconfig.go b/cmd/kubeadm/app/util/config/nodeconfig.go index b8ffc64d932..77bcf8b32f6 100644 --- a/cmd/kubeadm/app/util/config/nodeconfig.go +++ b/cmd/kubeadm/app/util/config/nodeconfig.go @@ -33,7 +33,7 @@ import ( // SetJoinDynamicDefaults checks and sets configuration values for the NodeConfiguration object func SetJoinDynamicDefaults(cfg *kubeadmapi.NodeConfiguration) error { - cfg.NodeName = node.GetHostname(cfg.NodeName) + cfg.NodeRegistration.Name = node.GetHostname(cfg.NodeRegistration.Name) return nil } diff --git a/cmd/kubeadm/test/util.go b/cmd/kubeadm/test/util.go index fdfdae5592b..cb7ae225a68 100644 --- a/cmd/kubeadm/test/util.go +++ b/cmd/kubeadm/test/util.go @@ -57,9 +57,10 @@ func SetupMasterConfigurationFile(t *testing.T, tmpdir string, cfg *kubeadmapi.M kind: MasterConfiguration certificatesDir: {{.CertificatesDir}} api: - advertiseAddress: {{.API.AdvertiseAddress}} - bindPort: {{.API.BindPort}} - nodeName: {{.NodeName}} + advertiseAddress: {{.API.AdvertiseAddress}} + bindPort: {{.API.BindPort}} + nodeRegistration: + name: {{.NodeRegistration.Name}} `))) f, err := os.Create(cfgPath) From fd47f8b20c9ff9acb167915959ac03c0d7232d6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 29 May 2018 17:52:10 +0300 Subject: [PATCH 248/307] Update unit tests to use the new NodeRegistration object --- .../kubeadm/validation/validation_test.go | 28 +++++++-------- cmd/kubeadm/app/cmd/phases/certs_test.go | 6 ++-- cmd/kubeadm/app/cmd/phases/kubeconfig_test.go | 6 ++-- cmd/kubeadm/app/cmd/upgrade/common_test.go | 8 +++-- cmd/kubeadm/app/phases/certs/certs_test.go | 36 +++++++++---------- .../phases/certs/pkiutil/pki_helpers_test.go | 8 ++--- .../app/phases/controlplane/manifests_test.go | 4 +-- .../app/phases/kubeconfig/kubeconfig_test.go | 32 ++++++++--------- cmd/kubeadm/app/phases/kubelet/config_test.go | 2 +- .../app/phases/kubelet/dynamic_test.go | 4 ++- .../app/phases/markmaster/markmaster_test.go | 35 +++++++++--------- .../app/phases/upgrade/postupgrade_test.go | 6 ++-- .../app/phases/upgrade/staticpods_test.go | 4 ++- .../app/util/config/nodeconfig_test.go | 22 ++++-------- 14 files changed, 100 insertions(+), 101 deletions(-) diff --git a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go index b0dd24a683b..6876bd702de 100644 --- a/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go +++ b/cmd/kubeadm/app/apis/kubeadm/validation/validation_test.go @@ -119,7 +119,7 @@ func TestValidateNodeName(t *testing.T) { actual := ValidateNodeName(rt.s, rt.f) if (len(actual) == 0) != rt.expected { t.Errorf( - "failed ValidateNodeName:\n\texpected: %t\n\t actual: %t", + "failed ValidateNodeRegistration: kubeadm.NodeRegistrationOptions{Name:\n\texpected: %t\n\t actual: %t", rt.expected, (len(actual) == 0), ) @@ -407,8 +407,8 @@ func TestValidateMasterConfiguration(t *testing.T) { ServiceSubnet: "10.96.0.1/12", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/cert/dir", - NodeName: nodename, + CertificatesDir: "/some/cert/dir", + NodeRegistration: kubeadm.NodeRegistrationOptions{Name: nodename, CRISocket: "/some/path"}, }, false}, {"invalid missing token with IPv6 service subnet", &kubeadm.MasterConfiguration{ @@ -420,8 +420,8 @@ func TestValidateMasterConfiguration(t *testing.T) { ServiceSubnet: "2001:db8::1/98", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/cert/dir", - NodeName: nodename, + CertificatesDir: "/some/cert/dir", + NodeRegistration: kubeadm.NodeRegistrationOptions{Name: nodename, CRISocket: "/some/path"}, }, false}, {"invalid missing node name", &kubeadm.MasterConfiguration{ @@ -447,9 +447,9 @@ func TestValidateMasterConfiguration(t *testing.T) { DNSDomain: "cluster.local", PodSubnet: "10.0.1.15", }, - CertificatesDir: "/some/other/cert/dir", - Token: "abcdef.0123456789abcdef", - NodeName: nodename, + CertificatesDir: "/some/other/cert/dir", + Token: "abcdef.0123456789abcdef", + NodeRegistration: kubeadm.NodeRegistrationOptions{Name: nodename, CRISocket: "/some/path"}, }, false}, {"valid master configuration with IPv4 service subnet", &kubeadm.MasterConfiguration{ @@ -493,9 +493,9 @@ func TestValidateMasterConfiguration(t *testing.T) { DNSDomain: "cluster.local", PodSubnet: "10.0.1.15/16", }, - CertificatesDir: "/some/other/cert/dir", - Token: "abcdef.0123456789abcdef", - NodeName: nodename, + CertificatesDir: "/some/other/cert/dir", + Token: "abcdef.0123456789abcdef", + NodeRegistration: kubeadm.NodeRegistrationOptions{Name: nodename, CRISocket: "/some/path"}, }, true}, {"valid master configuration using IPv6 service subnet", &kubeadm.MasterConfiguration{ @@ -538,9 +538,9 @@ func TestValidateMasterConfiguration(t *testing.T) { ServiceSubnet: "2001:db8::1/98", DNSDomain: "cluster.local", }, - CertificatesDir: "/some/other/cert/dir", - Token: "abcdef.0123456789abcdef", - NodeName: nodename, + CertificatesDir: "/some/other/cert/dir", + Token: "abcdef.0123456789abcdef", + NodeRegistration: kubeadm.NodeRegistrationOptions{Name: nodename, CRISocket: "/some/path"}, }, true}, } for _, rt := range tests { diff --git a/cmd/kubeadm/app/cmd/phases/certs_test.go b/cmd/kubeadm/app/cmd/phases/certs_test.go index 0b35c435650..32b90027479 100644 --- a/cmd/kubeadm/app/cmd/phases/certs_test.go +++ b/cmd/kubeadm/app/cmd/phases/certs_test.go @@ -254,9 +254,9 @@ func TestSubCmdCertsCreateFilesWithConfigFile(t *testing.T) { certdir := tmpdir cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234}, - CertificatesDir: certdir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234}, + CertificatesDir: certdir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, } configPath := testutil.SetupMasterConfigurationFile(t, tmpdir, cfg) diff --git a/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go b/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go index 309a5922a4a..f74538a6aab 100644 --- a/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go +++ b/cmd/kubeadm/app/cmd/phases/kubeconfig_test.go @@ -277,9 +277,9 @@ func TestKubeConfigSubCommandsThatCreateFilesWithConfigFile(t *testing.T) { // Adds a master configuration file cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234}, - CertificatesDir: pkidir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234}, + CertificatesDir: pkidir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, } cfgPath := testutil.SetupMasterConfigurationFile(t, tmpdir, cfg) diff --git a/cmd/kubeadm/app/cmd/upgrade/common_test.go b/cmd/kubeadm/app/cmd/upgrade/common_test.go index ac170b04bb6..8745378b60f 100644 --- a/cmd/kubeadm/app/cmd/upgrade/common_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/common_test.go @@ -65,7 +65,9 @@ func TestPrintConfiguration(t *testing.T) { dnsDomain: "" podSubnet: "" serviceSubnet: "" - nodeName: "" + nodeRegistration: + criSocket: "" + name: "" token: "" unifiedControlPlaneImage: "" `), @@ -108,7 +110,9 @@ func TestPrintConfiguration(t *testing.T) { dnsDomain: "" podSubnet: "" serviceSubnet: 10.96.0.1/12 - nodeName: "" + nodeRegistration: + criSocket: "" + name: "" token: "" unifiedControlPlaneImage: "" `), diff --git a/cmd/kubeadm/app/phases/certs/certs_test.go b/cmd/kubeadm/app/phases/certs/certs_test.go index c6fbdcca840..2e46abf1680 100644 --- a/cmd/kubeadm/app/phases/certs/certs_test.go +++ b/cmd/kubeadm/app/phases/certs/certs_test.go @@ -273,9 +273,9 @@ func TestNewAPIServerCertAndKey(t *testing.T) { advertiseAddresses := []string{"1.2.3.4", "1:2:3::4"} for _, addr := range advertiseAddresses { cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: addr}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: hostname, + API: kubeadmapi.API{AdvertiseAddress: addr}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: hostname}, } caCert, caKey, err := NewCACertAndKey() if err != nil { @@ -357,8 +357,8 @@ func TestNewEtcdPeerCertAndKey(t *testing.T) { advertiseAddresses := []string{"1.2.3.4", "1:2:3::4"} for _, addr := range advertiseAddresses { cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: addr}, - NodeName: hostname, + API: kubeadmapi.API{AdvertiseAddress: addr}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: hostname}, Etcd: kubeadmapi.Etcd{ Local: &kubeadmapi.LocalEtcd{ PeerCertSANs: []string{ @@ -481,10 +481,10 @@ func TestUsingExternalCA(t *testing.T) { defer os.RemoveAll(dir) cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", - CertificatesDir: dir, + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, + CertificatesDir: dir, } for _, f := range test.setupFuncs { @@ -564,10 +564,10 @@ func TestValidateMethods(t *testing.T) { test.loc.pkiDir = dir cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", - CertificatesDir: dir, + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, + CertificatesDir: dir, } fmt.Println("Testing", test.name) @@ -696,11 +696,11 @@ func TestCreateCertificateFilesMethods(t *testing.T) { defer os.RemoveAll(tmpdir) cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Etcd: kubeadmapi.Etcd{Local: &kubeadmapi.LocalEtcd{}}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", - CertificatesDir: tmpdir, + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, + Etcd: kubeadmapi.Etcd{Local: &kubeadmapi.LocalEtcd{}}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, + CertificatesDir: tmpdir, } if test.externalEtcd { diff --git a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go index 9ff34b0b20a..5bd203c1d50 100644 --- a/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go +++ b/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go @@ -448,7 +448,7 @@ func TestGetAPIServerAltNames(t *testing.T) { cfg: &kubeadmapi.MasterConfiguration{ API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:6443"}, Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95", "1.2.3.L", "invalid,commas,in,DNS"}, }, expectedDNSNames: []string{"valid-hostname", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local", "api.k8s.io"}, @@ -459,7 +459,7 @@ func TestGetAPIServerAltNames(t *testing.T) { cfg: &kubeadmapi.MasterConfiguration{ API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "4.5.6.7:6443"}, Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, APIServerCertSANs: []string{"10.1.245.94", "10.1.245.95", "1.2.3.L", "invalid,commas,in,DNS"}, }, expectedDNSNames: []string{"valid-hostname", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local"}, @@ -561,8 +561,8 @@ func TestGetEtcdPeerAltNames(t *testing.T) { proxyIP := "10.10.10.100" advertiseIP := "1.2.3.4" cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: advertiseIP}, - NodeName: hostname, + API: kubeadmapi.API{AdvertiseAddress: advertiseIP}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: hostname}, Etcd: kubeadmapi.Etcd{ Local: &kubeadmapi.LocalEtcd{ PeerCertSANs: []string{ diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index 6da80ae160d..58b81636874 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -838,7 +838,7 @@ func TestGetControllerManagerCommandExternalCA(t *testing.T) { KubernetesVersion: "v1.7.0", API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, }, caKeyPresent: false, expectedArgFunc: func(tmpdir string) []string { @@ -862,7 +862,7 @@ func TestGetControllerManagerCommandExternalCA(t *testing.T) { KubernetesVersion: "v1.7.0", API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "valid-hostname", + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-hostname"}, }, caKeyPresent: true, expectedArgFunc: func(tmpdir string) []string { diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go index 3b47f4e627f..ced29c30e3f 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go @@ -67,29 +67,29 @@ func TestGetKubeConfigSpecs(t *testing.T) { // Creates Master Configurations pointing to the pkidir folder cfgs := []*kubeadmapi.MasterConfiguration{ { - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234}, - CertificatesDir: pkidir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234}, + CertificatesDir: pkidir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, }, { - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234}, - CertificatesDir: pkidir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234}, + CertificatesDir: pkidir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, }, { - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:4321", BindPort: 1234}, - CertificatesDir: pkidir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:4321", BindPort: 1234}, + CertificatesDir: pkidir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, }, { - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234}, - CertificatesDir: pkidir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io", BindPort: 1234}, + CertificatesDir: pkidir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, }, { - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:4321", BindPort: 1234}, - CertificatesDir: pkidir, - NodeName: "valid-node-name", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", ControlPlaneEndpoint: "api.k8s.io:4321", BindPort: 1234}, + CertificatesDir: pkidir, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"}, }, } @@ -106,7 +106,7 @@ func TestGetKubeConfigSpecs(t *testing.T) { }, { kubeConfigFile: kubeadmconstants.KubeletKubeConfigFileName, - clientName: fmt.Sprintf("system:node:%s", cfg.NodeName), + clientName: fmt.Sprintf("system:node:%s", cfg.NodeRegistration.Name), organizations: []string{kubeadmconstants.NodesGroup}, }, { diff --git a/cmd/kubeadm/app/phases/kubelet/config_test.go b/cmd/kubeadm/app/phases/kubelet/config_test.go index 0e6d5b68c39..dc21da453d4 100644 --- a/cmd/kubeadm/app/phases/kubelet/config_test.go +++ b/cmd/kubeadm/app/phases/kubelet/config_test.go @@ -33,7 +33,7 @@ func TestCreateConfigMap(t *testing.T) { nodeName := "fake-node" client := fake.NewSimpleClientset() cfg := &kubeadmapi.MasterConfiguration{ - NodeName: nodeName, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodeName}, KubernetesVersion: "v1.11.0", KubeletConfiguration: kubeadmapi.KubeletConfiguration{ BaseConfig: &kubeletconfigv1beta1.KubeletConfiguration{}, diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go index fe0ba35f41c..150eeda405b 100644 --- a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go +++ b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/version" ) @@ -33,7 +34,8 @@ func TestEnableDynamicConfigForNode(t *testing.T) { client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { return true, &v1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: nodeName, + Name: nodeName, + Labels: map[string]string{kubeletapis.LabelHostname: nodeName}, }, Spec: v1.NodeSpec{ ConfigSource: &v1.NodeConfigSource{ diff --git a/cmd/kubeadm/app/phases/markmaster/markmaster_test.go b/cmd/kubeadm/app/phases/markmaster/markmaster_test.go index d7d5a934501..48b421cdbf7 100644 --- a/cmd/kubeadm/app/phases/markmaster/markmaster_test.go +++ b/cmd/kubeadm/app/phases/markmaster/markmaster_test.go @@ -40,52 +40,52 @@ func TestMarkMaster(t *testing.T) { // will need to change if strategicpatch's behavior changes in the // future. tests := []struct { - name string - existingLabel string - existingTaint *v1.Taint - wantTaint bool - expectedPatch string + name string + existingLabel string + existingTaints []v1.Taint + newTaints []v1.Taint + expectedPatch string }{ { "master label and taint missing", "", nil, - true, + []v1.Taint{kubeadmconstants.MasterTaint}, "{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}", }, { "master label and taint missing but taint not wanted", "", nil, - false, + nil, "{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}", }, { "master label missing", "", - &kubeadmconstants.MasterTaint, - true, + []v1.Taint{kubeadmconstants.MasterTaint}, + []v1.Taint{kubeadmconstants.MasterTaint}, "{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}", }, { "master taint missing", kubeadmconstants.LabelNodeRoleMaster, nil, - true, + []v1.Taint{kubeadmconstants.MasterTaint}, "{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}", }, { "nothing missing", kubeadmconstants.LabelNodeRoleMaster, - &kubeadmconstants.MasterTaint, - true, + []v1.Taint{kubeadmconstants.MasterTaint}, + []v1.Taint{kubeadmconstants.MasterTaint}, "{}", }, { "nothing missing but taint unwanted", kubeadmconstants.LabelNodeRoleMaster, - &kubeadmconstants.MasterTaint, - false, + []v1.Taint{kubeadmconstants.MasterTaint}, + nil, "{\"spec\":{\"taints\":null}}", }, } @@ -105,8 +105,8 @@ func TestMarkMaster(t *testing.T) { masterNode.ObjectMeta.Labels[tc.existingLabel] = "" } - if tc.existingTaint != nil { - masterNode.Spec.Taints = append(masterNode.Spec.Taints, *tc.existingTaint) + if tc.existingTaints != nil { + masterNode.Spec.Taints = tc.existingTaints } jsonNode, err := json.Marshal(masterNode) @@ -144,8 +144,7 @@ func TestMarkMaster(t *testing.T) { t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err) } - err = MarkMaster(cs, hostname, tc.wantTaint) - if err != nil { + if err := MarkMaster(cs, hostname, tc.newTaints); err != nil { t.Errorf("MarkMaster(%s) returned unexpected error: %v", tc.name, err) } diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go b/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go index 652dee2e40b..c25387e8eca 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade_test.go @@ -131,9 +131,9 @@ func TestRollbackFiles(t *testing.T) { func TestShouldBackupAPIServerCertAndKey(t *testing.T) { cfg := &kubeadmapi.MasterConfiguration{ - API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, - Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, - NodeName: "test-node", + API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4"}, + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "test-node"}, } for desc, test := range map[string]struct { diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 44a299b5a3a..1ba9042b77b 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -66,7 +66,9 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 -nodeName: thegopher +nodeRegistration: + name: foo + criSocket: "" schedulerExtraArgs: null token: ce3aa5.5ec8455bb76b379f tokenTTL: 24h diff --git a/cmd/kubeadm/app/util/config/nodeconfig_test.go b/cmd/kubeadm/app/util/config/nodeconfig_test.go index dd26b79dd32..dcf679815a0 100644 --- a/cmd/kubeadm/app/util/config/nodeconfig_test.go +++ b/cmd/kubeadm/app/util/config/nodeconfig_test.go @@ -24,19 +24,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" ) const ( - node_v1alpha1YAML = "testdata/conversion/node/v1alpha1.yaml" - node_v1alpha2YAML = "testdata/conversion/node/v1alpha2.yaml" - node_internalYAML = "testdata/conversion/node/internal.yaml" - node_incompleteYAML = "testdata/defaulting/node/incomplete.yaml" - node_defaultedv1alpha1YAML = "testdata/defaulting/node/defaulted_v1alpha1.yaml" - node_defaultedv1alpha2YAML = "testdata/defaulting/node/defaulted_v1alpha2.yaml" - node_invalidYAML = "testdata/validation/invalid_nodecfg.yaml" + node_v1alpha1YAML = "testdata/conversion/node/v1alpha1.yaml" + node_v1alpha2YAML = "testdata/conversion/node/v1alpha2.yaml" + node_internalYAML = "testdata/conversion/node/internal.yaml" + node_incompleteYAML = "testdata/defaulting/node/incomplete.yaml" + node_defaultedYAML = "testdata/defaulting/node/defaulted.yaml" + node_invalidYAML = "testdata/validation/invalid_nodecfg.yaml" ) func TestNodeConfigFileAndDefaultsToInternalConfig(t *testing.T) { @@ -67,16 +65,10 @@ func TestNodeConfigFileAndDefaultsToInternalConfig(t *testing.T) { }, // These tests are reading one file that has only a subset of the fields populated, loading it using NodeConfigFileAndDefaultsToInternalConfig, // and then marshals the internal object to the expected groupVersion - { // v1alpha1 -> default -> validate -> internal -> v1alpha1 - name: "incompleteYAMLToDefaulted", - in: node_incompleteYAML, - out: node_defaultedv1alpha1YAML, - groupVersion: v1alpha1.SchemeGroupVersion, - }, { // v1alpha1 -> default -> validate -> internal -> v1alpha2 name: "incompleteYAMLToDefaulted", in: node_incompleteYAML, - out: node_defaultedv1alpha2YAML, + out: node_defaultedYAML, groupVersion: v1alpha2.SchemeGroupVersion, }, { // v1alpha1 (faulty) -> validation should fail From 8bcbc1e9bdf67f253d749db95251f1c9234ed913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 29 May 2018 17:52:22 +0300 Subject: [PATCH 249/307] autogenerated --- cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD | 1 + .../v1alpha1/zz_generated.conversion.go | 32 +++--------- .../v1alpha2/zz_generated.conversion.go | 50 +++++++++++++++---- .../kubeadm/v1alpha2/zz_generated.deepcopy.go | 33 ++++++++++++ .../kubeadm/v1alpha2/zz_generated.defaults.go | 2 + .../app/apis/kubeadm/zz_generated.deepcopy.go | 33 ++++++++++++ cmd/kubeadm/app/phases/kubelet/BUILD | 3 +- cmd/kubeadm/app/phases/markmaster/BUILD | 7 +-- cmd/kubeadm/app/util/apiclient/BUILD | 3 ++ cmd/kubeadm/app/util/config/BUILD | 1 + .../testdata/conversion/master/internal.yaml | 10 ++-- .../testdata/conversion/master/v1alpha2.yaml | 8 ++- .../testdata/conversion/node/internal.yaml | 7 ++- .../testdata/conversion/node/v1alpha2.yaml | 5 +- .../testdata/defaulting/master/defaulted.yaml | 8 ++- ...defaulted_v1alpha2.yaml => defaulted.yaml} | 5 +- .../defaulting/node/defaulted_v1alpha1.yaml | 14 ------ 17 files changed, 152 insertions(+), 70 deletions(-) rename cmd/kubeadm/app/util/config/testdata/defaulting/node/{defaulted_v1alpha2.yaml => defaulted.yaml} (83%) delete mode 100644 cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha1.yaml diff --git a/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD b/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD index cea19499314..f78847d71b9 100644 --- a/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/fuzzer/BUILD @@ -16,6 +16,7 @@ go_library( "//pkg/proxy/apis/kubeproxyconfig/v1alpha1:go_default_library", "//pkg/util/pointer:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", ], diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go index e602d3aaafb..6a979a82eae 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/zz_generated.conversion.go @@ -217,15 +217,15 @@ func autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in } out.KubernetesVersion = in.KubernetesVersion // WARNING: in.CloudProvider requires manual conversion: does not exist in peer-type - out.NodeName = in.NodeName + // WARNING: in.NodeName requires manual conversion: does not exist in peer-type // WARNING: in.AuthorizationModes requires manual conversion: does not exist in peer-type - out.NoTaintMaster = in.NoTaintMaster + // WARNING: in.NoTaintMaster requires manual conversion: does not exist in peer-type // WARNING: in.PrivilegedPods requires manual conversion: does not exist in peer-type out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) out.TokenUsages = *(*[]string)(unsafe.Pointer(&in.TokenUsages)) out.TokenGroups = *(*[]string)(unsafe.Pointer(&in.TokenGroups)) - out.CRISocket = in.CRISocket + // WARNING: in.CRISocket requires manual conversion: does not exist in peer-type out.APIServerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.APIServerExtraArgs)) out.ControllerManagerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ControllerManagerExtraArgs)) out.SchedulerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.SchedulerExtraArgs)) @@ -262,13 +262,11 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in return err } out.KubernetesVersion = in.KubernetesVersion - out.NodeName = in.NodeName - out.NoTaintMaster = in.NoTaintMaster + // WARNING: in.NodeRegistration requires manual conversion: does not exist in peer-type out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) out.TokenUsages = *(*[]string)(unsafe.Pointer(&in.TokenUsages)) out.TokenGroups = *(*[]string)(unsafe.Pointer(&in.TokenGroups)) - out.CRISocket = in.CRISocket out.APIServerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.APIServerExtraArgs)) out.ControllerManagerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ControllerManagerExtraArgs)) out.SchedulerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.SchedulerExtraArgs)) @@ -288,11 +286,6 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in return nil } -// Convert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration is an autogenerated conversion function. -func Convert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in *kubeadm.MasterConfiguration, out *MasterConfiguration, s conversion.Scope) error { - return autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in, out, s) -} - func autoConvert_v1alpha1_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error { out.ServiceSubnet = in.ServiceSubnet out.PodSubnet = in.PodSubnet @@ -323,10 +316,10 @@ func autoConvert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration(in *Nod out.DiscoveryToken = in.DiscoveryToken out.DiscoveryTokenAPIServers = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenAPIServers)) out.DiscoveryTimeout = (*meta_v1.Duration)(unsafe.Pointer(in.DiscoveryTimeout)) - out.NodeName = in.NodeName + // WARNING: in.NodeName requires manual conversion: does not exist in peer-type out.TLSBootstrapToken = in.TLSBootstrapToken out.Token = in.Token - out.CRISocket = in.CRISocket + // WARNING: in.CRISocket requires manual conversion: does not exist in peer-type out.ClusterName = in.ClusterName out.DiscoveryTokenCACertHashes = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenCACertHashes)) out.DiscoveryTokenUnsafeSkipCAVerification = in.DiscoveryTokenUnsafeSkipCAVerification @@ -334,21 +327,15 @@ func autoConvert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration(in *Nod return nil } -// Convert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration(in *NodeConfiguration, out *kubeadm.NodeConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_NodeConfiguration_To_kubeadm_NodeConfiguration(in, out, s) -} - func autoConvert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration(in *kubeadm.NodeConfiguration, out *NodeConfiguration, s conversion.Scope) error { + // WARNING: in.NodeRegistration requires manual conversion: does not exist in peer-type out.CACertPath = in.CACertPath out.DiscoveryFile = in.DiscoveryFile out.DiscoveryToken = in.DiscoveryToken out.DiscoveryTokenAPIServers = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenAPIServers)) out.DiscoveryTimeout = (*meta_v1.Duration)(unsafe.Pointer(in.DiscoveryTimeout)) - out.NodeName = in.NodeName out.TLSBootstrapToken = in.TLSBootstrapToken out.Token = in.Token - out.CRISocket = in.CRISocket out.ClusterName = in.ClusterName out.DiscoveryTokenCACertHashes = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenCACertHashes)) out.DiscoveryTokenUnsafeSkipCAVerification = in.DiscoveryTokenUnsafeSkipCAVerification @@ -356,11 +343,6 @@ func autoConvert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration(in *kub return nil } -// Convert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration is an autogenerated conversion function. -func Convert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration(in *kubeadm.NodeConfiguration, out *NodeConfiguration, s conversion.Scope) error { - return autoConvert_kubeadm_NodeConfiguration_To_v1alpha1_NodeConfiguration(in, out, s) -} - func autoConvert_v1alpha1_TokenDiscovery_To_kubeadm_TokenDiscovery(in *TokenDiscovery, out *kubeadm.TokenDiscovery, s conversion.Scope) error { out.ID = in.ID out.Secret = in.Secret diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go index dd661e08773..888517af55d 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go @@ -62,6 +62,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_kubeadm_Networking_To_v1alpha2_Networking, Convert_v1alpha2_NodeConfiguration_To_kubeadm_NodeConfiguration, Convert_kubeadm_NodeConfiguration_To_v1alpha2_NodeConfiguration, + Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions, + Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions, Convert_v1alpha2_TokenDiscovery_To_kubeadm_TokenDiscovery, Convert_kubeadm_TokenDiscovery_To_v1alpha2_TokenDiscovery, ) @@ -275,14 +277,14 @@ func autoConvert_v1alpha2_MasterConfiguration_To_kubeadm_MasterConfiguration(in if err := Convert_v1alpha2_Networking_To_kubeadm_Networking(&in.Networking, &out.Networking, s); err != nil { return err } + if err := Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } out.KubernetesVersion = in.KubernetesVersion - out.NodeName = in.NodeName - out.NoTaintMaster = in.NoTaintMaster out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) out.TokenUsages = *(*[]string)(unsafe.Pointer(&in.TokenUsages)) out.TokenGroups = *(*[]string)(unsafe.Pointer(&in.TokenGroups)) - out.CRISocket = in.CRISocket out.APIServerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.APIServerExtraArgs)) out.ControllerManagerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ControllerManagerExtraArgs)) out.SchedulerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.SchedulerExtraArgs)) @@ -323,13 +325,13 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha2_MasterConfiguration(in return err } out.KubernetesVersion = in.KubernetesVersion - out.NodeName = in.NodeName - out.NoTaintMaster = in.NoTaintMaster + if err := Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } out.Token = in.Token out.TokenTTL = (*meta_v1.Duration)(unsafe.Pointer(in.TokenTTL)) out.TokenUsages = *(*[]string)(unsafe.Pointer(&in.TokenUsages)) out.TokenGroups = *(*[]string)(unsafe.Pointer(&in.TokenGroups)) - out.CRISocket = in.CRISocket out.APIServerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.APIServerExtraArgs)) out.ControllerManagerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ControllerManagerExtraArgs)) out.SchedulerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.SchedulerExtraArgs)) @@ -379,15 +381,16 @@ func Convert_kubeadm_Networking_To_v1alpha2_Networking(in *kubeadm.Networking, o } func autoConvert_v1alpha2_NodeConfiguration_To_kubeadm_NodeConfiguration(in *NodeConfiguration, out *kubeadm.NodeConfiguration, s conversion.Scope) error { + if err := Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } out.CACertPath = in.CACertPath out.DiscoveryFile = in.DiscoveryFile out.DiscoveryToken = in.DiscoveryToken out.DiscoveryTokenAPIServers = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenAPIServers)) out.DiscoveryTimeout = (*meta_v1.Duration)(unsafe.Pointer(in.DiscoveryTimeout)) - out.NodeName = in.NodeName out.TLSBootstrapToken = in.TLSBootstrapToken out.Token = in.Token - out.CRISocket = in.CRISocket out.ClusterName = in.ClusterName out.DiscoveryTokenCACertHashes = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenCACertHashes)) out.DiscoveryTokenUnsafeSkipCAVerification = in.DiscoveryTokenUnsafeSkipCAVerification @@ -401,15 +404,16 @@ func Convert_v1alpha2_NodeConfiguration_To_kubeadm_NodeConfiguration(in *NodeCon } func autoConvert_kubeadm_NodeConfiguration_To_v1alpha2_NodeConfiguration(in *kubeadm.NodeConfiguration, out *NodeConfiguration, s conversion.Scope) error { + if err := Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { + return err + } out.CACertPath = in.CACertPath out.DiscoveryFile = in.DiscoveryFile out.DiscoveryToken = in.DiscoveryToken out.DiscoveryTokenAPIServers = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenAPIServers)) out.DiscoveryTimeout = (*meta_v1.Duration)(unsafe.Pointer(in.DiscoveryTimeout)) - out.NodeName = in.NodeName out.TLSBootstrapToken = in.TLSBootstrapToken out.Token = in.Token - out.CRISocket = in.CRISocket out.ClusterName = in.ClusterName out.DiscoveryTokenCACertHashes = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenCACertHashes)) out.DiscoveryTokenUnsafeSkipCAVerification = in.DiscoveryTokenUnsafeSkipCAVerification @@ -422,6 +426,32 @@ func Convert_kubeadm_NodeConfiguration_To_v1alpha2_NodeConfiguration(in *kubeadm return autoConvert_kubeadm_NodeConfiguration_To_v1alpha2_NodeConfiguration(in, out, s) } +func autoConvert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { + out.Name = in.Name + out.CRISocket = in.CRISocket + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + return nil +} + +// Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions is an autogenerated conversion function. +func Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { + return autoConvert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in, out, s) +} + +func autoConvert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { + out.Name = in.Name + out.CRISocket = in.CRISocket + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) + return nil +} + +// Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions is an autogenerated conversion function. +func Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { + return autoConvert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(in, out, s) +} + func autoConvert_v1alpha2_TokenDiscovery_To_kubeadm_TokenDiscovery(in *TokenDiscovery, out *kubeadm.TokenDiscovery, s conversion.Scope) error { out.ID = in.ID out.Secret = in.Secret diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go index af11b89b973..9d5fac577c0 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha2 import ( + core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" v1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" @@ -231,6 +232,7 @@ func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { in.Etcd.DeepCopyInto(&out.Etcd) in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) out.Networking = in.Networking + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) if in.TokenTTL != nil { in, out := &in.TokenTTL, &out.TokenTTL if *in == nil { @@ -340,6 +342,7 @@ func (in *Networking) DeepCopy() *Networking { func (in *NodeConfiguration) DeepCopyInto(out *NodeConfiguration) { *out = *in out.TypeMeta = in.TypeMeta + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) if in.DiscoveryTokenAPIServers != nil { in, out := &in.DiscoveryTokenAPIServers, &out.DiscoveryTokenAPIServers *out = make([]string, len(*in)) @@ -387,6 +390,36 @@ func (in *NodeConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]core_v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRegistrationOptions. +func (in *NodeRegistrationOptions) DeepCopy() *NodeRegistrationOptions { + if in == nil { + return nil + } + out := new(NodeRegistrationOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenDiscovery) DeepCopyInto(out *TokenDiscovery) { *out = *in diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go index 7218ab95912..62ab2c8e971 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go @@ -43,8 +43,10 @@ func SetObjectDefaults_MasterConfiguration(in *MasterConfiguration) { if in.KubeletConfiguration.BaseConfig != nil { v1beta1.SetDefaults_KubeletConfiguration(in.KubeletConfiguration.BaseConfig) } + SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) } func SetObjectDefaults_NodeConfiguration(in *NodeConfiguration) { SetDefaults_NodeConfiguration(in) + SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) } diff --git a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go index ce680355710..c7923fb1480 100644 --- a/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go +++ b/cmd/kubeadm/app/apis/kubeadm/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package kubeadm import ( + core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" v1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1" @@ -231,6 +232,7 @@ func (in *MasterConfiguration) DeepCopyInto(out *MasterConfiguration) { in.Etcd.DeepCopyInto(&out.Etcd) in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) out.Networking = in.Networking + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) if in.TokenTTL != nil { in, out := &in.TokenTTL, &out.TokenTTL if *in == nil { @@ -340,6 +342,7 @@ func (in *Networking) DeepCopy() *Networking { func (in *NodeConfiguration) DeepCopyInto(out *NodeConfiguration) { *out = *in out.TypeMeta = in.TypeMeta + in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) if in.DiscoveryTokenAPIServers != nil { in, out := &in.DiscoveryTokenAPIServers, &out.DiscoveryTokenAPIServers *out = make([]string, len(*in)) @@ -387,6 +390,36 @@ func (in *NodeConfiguration) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]core_v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraArgs != nil { + in, out := &in.ExtraArgs, &out.ExtraArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRegistrationOptions. +func (in *NodeRegistrationOptions) DeepCopy() *NodeRegistrationOptions { + if in == nil { + return nil + } + out := new(NodeRegistrationOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenDiscovery) DeepCopyInto(out *TokenDiscovery) { *out = *in diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index 0dc4429f902..fecacf2a1f9 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -22,10 +22,8 @@ go_library( "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], @@ -40,6 +38,7 @@ go_test( embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/kubeletconfig/v1beta1:go_default_library", "//pkg/util/version:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/cmd/kubeadm/app/phases/markmaster/BUILD b/cmd/kubeadm/app/phases/markmaster/BUILD index efb2903f412..8d2dd295588 100644 --- a/cmd/kubeadm/app/phases/markmaster/BUILD +++ b/cmd/kubeadm/app/phases/markmaster/BUILD @@ -27,14 +27,9 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/markmaster", deps = [ "//cmd/kubeadm/app/constants:go_default_library", - "//pkg/kubelet/apis:go_default_library", + "//cmd/kubeadm/app/util/apiclient:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/apiclient/BUILD b/cmd/kubeadm/app/util/apiclient/BUILD index 5704d1228a1..d702b7328c9 100644 --- a/cmd/kubeadm/app/util/apiclient/BUILD +++ b/cmd/kubeadm/app/util/apiclient/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", + "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/registry/core/service/ipallocator:go_default_library", "//vendor/k8s.io/api/apps/v1:go_default_library", @@ -28,8 +29,10 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/dynamic:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD index 02eacd1243f..2bb14a8ded6 100644 --- a/cmd/kubeadm/app/util/config/BUILD +++ b/cmd/kubeadm/app/util/config/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/util/node:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index e6bcefe3baa..5267a7bce5c 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -11,7 +11,6 @@ AuditPolicyConfiguration: LogMaxAge: 2 Path: "" CIImageRepository: "" -CRISocket: /var/run/dockershim.sock CertificatesDir: /etc/kubernetes/pki ClusterName: kubernetes ControllerManagerExtraArgs: null @@ -140,8 +139,13 @@ Networking: DNSDomain: cluster.local PodSubnet: "" ServiceSubnet: 10.96.0.0/12 -NoTaintMaster: false -NodeName: master-1 +NodeRegistration: + CRISocket: /var/run/dockershim.sock + ExtraArgs: null + Name: master-1 + Taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master SchedulerExtraArgs: null SchedulerExtraVolumes: null Token: s73ybu.6tw6wnqgp5z0wb77 diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml index d5f30a60566..b6594689dc8 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml @@ -11,7 +11,6 @@ auditPolicy: path: "" certificatesDir: /etc/kubernetes/pki clusterName: kubernetes -criSocket: /var/run/dockershim.sock etcd: local: dataDir: /var/lib/etcd @@ -132,7 +131,12 @@ networking: dnsDomain: cluster.local podSubnet: "" serviceSubnet: 10.96.0.0/12 -nodeName: master-1 +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: master-1 + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master token: s73ybu.6tw6wnqgp5z0wb77 tokenGroups: - system:bootstrappers:kubeadm:default-node-token diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml index dce87719dd4..517a1def2ca 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/node/internal.yaml @@ -1,5 +1,4 @@ CACertPath: /etc/kubernetes/pki/ca.crt -CRISocket: /var/run/dockershim.sock ClusterName: kubernetes DiscoveryFile: "" DiscoveryTimeout: 5m0s @@ -9,6 +8,10 @@ DiscoveryTokenAPIServers: DiscoveryTokenCACertHashes: null DiscoveryTokenUnsafeSkipCAVerification: true FeatureGates: null -NodeName: master-1 +NodeRegistration: + CRISocket: /var/run/dockershim.sock + ExtraArgs: null + Name: master-1 + Taints: null TLSBootstrapToken: abcdef.0123456789abcdef Token: abcdef.0123456789abcdef diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml index 8cf55e5da27..40754a5888b 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml @@ -1,7 +1,6 @@ apiVersion: kubeadm.k8s.io/v1alpha2 caCertPath: /etc/kubernetes/pki/ca.crt clusterName: kubernetes -criSocket: /var/run/dockershim.sock discoveryFile: "" discoveryTimeout: 5m0s discoveryToken: abcdef.0123456789abcdef @@ -9,6 +8,8 @@ discoveryTokenAPIServers: - kube-apiserver:6443 discoveryTokenUnsafeSkipCAVerification: true kind: NodeConfiguration -nodeName: master-1 +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: master-1 tlsBootstrapToken: abcdef.0123456789abcdef token: abcdef.0123456789abcdef diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index 6cea70575d7..42647648181 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -9,7 +9,6 @@ auditPolicy: path: "" certificatesDir: /var/lib/kubernetes/pki clusterName: kubernetes -criSocket: /var/run/criruntime.sock etcd: local: dataDir: /var/lib/etcd @@ -127,7 +126,12 @@ networking: dnsDomain: cluster.global podSubnet: "" serviceSubnet: 10.196.0.0/12 -nodeName: master-1 +nodeRegistration: + criSocket: /var/run/criruntime.sock + name: master-1 + taints: + - effect: NoSchedule + key: node-role.kubernetes.io/master token: s73ybu.6tw6wnqgp5z0wb77 tokenGroups: - system:bootstrappers:kubeadm:default-node-token diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted.yaml similarity index 83% rename from cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha2.yaml rename to cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted.yaml index 4be5386539d..85c6b9b481c 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted.yaml @@ -1,7 +1,6 @@ apiVersion: kubeadm.k8s.io/v1alpha2 caCertPath: /etc/kubernetes/pki/ca.crt clusterName: kubernetes -criSocket: /var/run/dockershim.sock discoveryFile: "" discoveryTimeout: 5m0s discoveryToken: abcdef.0123456789abcdef @@ -9,6 +8,8 @@ discoveryTokenAPIServers: - kube-apiserver:6443 discoveryTokenUnsafeSkipCAVerification: true kind: NodeConfiguration -nodeName: thegopher +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: thegopher tlsBootstrapToken: abcdef.0123456789abcdef token: abcdef.0123456789abcdef diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha1.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha1.yaml deleted file mode 100644 index 5ffc2205fdf..00000000000 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/node/defaulted_v1alpha1.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: kubeadm.k8s.io/v1alpha1 -caCertPath: /etc/kubernetes/pki/ca.crt -clusterName: kubernetes -criSocket: /var/run/dockershim.sock -discoveryFile: "" -discoveryTimeout: 5m0s -discoveryToken: abcdef.0123456789abcdef -discoveryTokenAPIServers: -- kube-apiserver:6443 -discoveryTokenUnsafeSkipCAVerification: true -kind: NodeConfiguration -nodeName: thegopher -tlsBootstrapToken: abcdef.0123456789abcdef -token: abcdef.0123456789abcdef From 401bab36427759c77a3d00f8a5d9f7e2f903915a Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Tue, 1 May 2018 02:12:29 -0400 Subject: [PATCH 250/307] Auto-generated files --- .../pluginregistration/v1alpha1/api.pb.go | 1027 +++++++++++++++++ .../example_plugin_apis/v1beta1/api.pb.go | 632 ++++++++++ .../example_plugin_apis/v1beta2/api.pb.go | 633 ++++++++++ 3 files changed, 2292 insertions(+) create mode 100644 pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go diff --git a/pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go b/pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go new file mode 100644 index 00000000000..96e1d571dba --- /dev/null +++ b/pkg/kubelet/apis/pluginregistration/v1alpha1/api.pb.go @@ -0,0 +1,1027 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: api.proto +// DO NOT EDIT! + +/* + Package pluginregistration is a generated protocol buffer package. + + It is generated from these files: + api.proto + + It has these top-level messages: + PluginInfo + RegistrationStatus + RegistrationStatusResponse + InfoRequest +*/ +package pluginregistration + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration +type PluginInfo struct { + // Type of the Plugin. CSIPlugin or DevicePlugin + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Plugin name that uniquely identifies the plugin for the given plugin type. + // For DevicePlugin, this is the resource name that the plugin manages and + // should follow the extended resource name convention. + // For CSI, this is the CSI driver registrar name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Optional endpoint location. If found set by Kubelet component, + // Kubelet component will use this endpoint for specific requests. + // This allows the plugin to register using one endpoint and possibly use + // a different socket for control operations. CSI uses this model to delegate + // its registration external from the plugin. + Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // Plugin service API versions the plugin supports. + // For DevicePlugin, this maps to the deviceplugin API versions the + // plugin supports at the given socket. + // The Kubelet component communicating with the plugin should be able + // to choose any preferred version from this list, or returns an error + // if none of the listed versions is supported. + SupportedVersions []string `protobuf:"bytes,4,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` +} + +func (m *PluginInfo) Reset() { *m = PluginInfo{} } +func (*PluginInfo) ProtoMessage() {} +func (*PluginInfo) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +func (m *PluginInfo) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PluginInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginInfo) GetEndpoint() string { + if m != nil { + return m.Endpoint + } + return "" +} + +func (m *PluginInfo) GetSupportedVersions() []string { + if m != nil { + return m.SupportedVersions + } + return nil +} + +// RegistrationStatus is the message sent from Kubelet pluginwatcher to the plugin for notification on registration status +type RegistrationStatus struct { + // True if plugin gets registered successfully at Kubelet + PluginRegistered bool `protobuf:"varint,1,opt,name=plugin_registered,json=pluginRegistered,proto3" json:"plugin_registered,omitempty"` + // Error message in case plugin fails to register, empty string otherwise + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *RegistrationStatus) Reset() { *m = RegistrationStatus{} } +func (*RegistrationStatus) ProtoMessage() {} +func (*RegistrationStatus) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } + +func (m *RegistrationStatus) GetPluginRegistered() bool { + if m != nil { + return m.PluginRegistered + } + return false +} + +func (m *RegistrationStatus) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC +type RegistrationStatusResponse struct { +} + +func (m *RegistrationStatusResponse) Reset() { *m = RegistrationStatusResponse{} } +func (*RegistrationStatusResponse) ProtoMessage() {} +func (*RegistrationStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } + +// InfoRequest is the empty request message from Kubelet +type InfoRequest struct { +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{3} } + +func init() { + proto.RegisterType((*PluginInfo)(nil), "pluginregistration.PluginInfo") + proto.RegisterType((*RegistrationStatus)(nil), "pluginregistration.RegistrationStatus") + proto.RegisterType((*RegistrationStatusResponse)(nil), "pluginregistration.RegistrationStatusResponse") + proto.RegisterType((*InfoRequest)(nil), "pluginregistration.InfoRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Registration service + +type RegistrationClient interface { + GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) + NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) +} + +type registrationClient struct { + cc *grpc.ClientConn +} + +func NewRegistrationClient(cc *grpc.ClientConn) RegistrationClient { + return ®istrationClient{cc} +} + +func (c *registrationClient) GetInfo(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*PluginInfo, error) { + out := new(PluginInfo) + err := grpc.Invoke(ctx, "/pluginregistration.Registration/GetInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *registrationClient) NotifyRegistrationStatus(ctx context.Context, in *RegistrationStatus, opts ...grpc.CallOption) (*RegistrationStatusResponse, error) { + out := new(RegistrationStatusResponse) + err := grpc.Invoke(ctx, "/pluginregistration.Registration/NotifyRegistrationStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Registration service + +type RegistrationServer interface { + GetInfo(context.Context, *InfoRequest) (*PluginInfo, error) + NotifyRegistrationStatus(context.Context, *RegistrationStatus) (*RegistrationStatusResponse, error) +} + +func RegisterRegistrationServer(s *grpc.Server, srv RegistrationServer) { + s.RegisterService(&_Registration_serviceDesc, srv) +} + +func _Registration_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationServer).GetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginregistration.Registration/GetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationServer).GetInfo(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Registration_NotifyRegistrationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationStatus) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginregistration.Registration/NotifyRegistrationStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationServer).NotifyRegistrationStatus(ctx, req.(*RegistrationStatus)) + } + return interceptor(ctx, in, info, handler) +} + +var _Registration_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pluginregistration.Registration", + HandlerType: (*RegistrationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetInfo", + Handler: _Registration_GetInfo_Handler, + }, + { + MethodName: "NotifyRegistrationStatus", + Handler: _Registration_NotifyRegistrationStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *PluginInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Endpoint) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Endpoint))) + i += copy(dAtA[i:], m.Endpoint) + } + if len(m.SupportedVersions) > 0 { + for _, s := range m.SupportedVersions { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RegistrationStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegistrationStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PluginRegistered { + dAtA[i] = 0x8 + i++ + if m.PluginRegistered { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Error) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func (m *RegistrationStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RegistrationStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginInfo) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Endpoint) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.SupportedVersions) > 0 { + for _, s := range m.SupportedVersions { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *RegistrationStatus) Size() (n int) { + var l int + _ = l + if m.PluginRegistered { + n += 2 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RegistrationStatusResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *InfoRequest) Size() (n int) { + var l int + _ = l + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PluginInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginInfo{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `SupportedVersions:` + fmt.Sprintf("%v", this.SupportedVersions) + `,`, + `}`, + }, "") + return s +} +func (this *RegistrationStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegistrationStatus{`, + `PluginRegistered:` + fmt.Sprintf("%v", this.PluginRegistered) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *RegistrationStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RegistrationStatusResponse{`, + `}`, + }, "") + return s +} +func (this *InfoRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfoRequest{`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PluginInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupportedVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SupportedVersions = append(m.SupportedVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegistrationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegistrationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegistrationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginRegistered", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PluginRegistered = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RegistrationStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RegistrationStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RegistrationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 337 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x41, 0x4b, 0x33, 0x31, + 0x14, 0xdc, 0x7c, 0xed, 0xa7, 0xed, 0x53, 0xc1, 0x06, 0x0f, 0xcb, 0x52, 0x62, 0xd9, 0x83, 0x14, + 0xa4, 0x5b, 0xd0, 0x7f, 0xe0, 0x45, 0x04, 0x11, 0x89, 0xa0, 0xc7, 0xb2, 0xb5, 0xaf, 0x6b, 0xc0, + 0x26, 0x31, 0xc9, 0x0a, 0x3d, 0xe9, 0x4f, 0xf0, 0x67, 0xf5, 0x28, 0x9e, 0x3c, 0xda, 0xf5, 0x8f, + 0x48, 0xb3, 0x65, 0x2d, 0xb4, 0x07, 0x6f, 0x6f, 0xe6, 0x4d, 0x1e, 0x33, 0x43, 0xa0, 0x99, 0x6a, + 0x91, 0x68, 0xa3, 0x9c, 0xa2, 0x54, 0x3f, 0xe6, 0x99, 0x90, 0x06, 0x33, 0x61, 0x9d, 0x49, 0x9d, + 0x50, 0x32, 0xea, 0x65, 0xc2, 0x3d, 0xe4, 0xc3, 0xe4, 0x5e, 0x4d, 0xfa, 0x99, 0xca, 0x54, 0xdf, + 0x4b, 0x87, 0xf9, 0xd8, 0x23, 0x0f, 0xfc, 0x54, 0x9e, 0x88, 0x5f, 0x00, 0xae, 0xfd, 0x91, 0x0b, + 0x39, 0x56, 0x94, 0x42, 0xdd, 0x4d, 0x35, 0x86, 0xa4, 0x43, 0xba, 0x4d, 0xee, 0xe7, 0x05, 0x27, + 0xd3, 0x09, 0x86, 0xff, 0x4a, 0x6e, 0x31, 0xd3, 0x08, 0x1a, 0x28, 0x47, 0x5a, 0x09, 0xe9, 0xc2, + 0x9a, 0xe7, 0x2b, 0x4c, 0x7b, 0x40, 0x6d, 0xae, 0xb5, 0x32, 0x0e, 0x47, 0x83, 0x67, 0x34, 0x56, + 0x28, 0x69, 0xc3, 0x7a, 0xa7, 0xd6, 0x6d, 0xf2, 0x56, 0xb5, 0xb9, 0x5d, 0x2e, 0xe2, 0x3b, 0xa0, + 0x7c, 0xc5, 0xff, 0x8d, 0x4b, 0x5d, 0x6e, 0xe9, 0x31, 0xb4, 0xca, 0x6c, 0x83, 0x32, 0x1c, 0x1a, + 0x1c, 0x79, 0x57, 0x0d, 0xbe, 0x5f, 0x2e, 0x78, 0xc5, 0xd3, 0x03, 0xf8, 0x8f, 0xc6, 0x28, 0xb3, + 0xb4, 0x58, 0x82, 0xb8, 0x0d, 0xd1, 0xfa, 0x61, 0x8e, 0x56, 0x2b, 0x69, 0x31, 0xde, 0x83, 0x9d, + 0x45, 0x62, 0x8e, 0x4f, 0x39, 0x5a, 0x77, 0xf2, 0x41, 0x60, 0x77, 0x55, 0x4d, 0x2f, 0x61, 0xfb, + 0x1c, 0x9d, 0x2f, 0xe5, 0x30, 0x59, 0xaf, 0x39, 0x59, 0x79, 0x1c, 0xb1, 0x4d, 0x82, 0xdf, 0x56, + 0xe3, 0x80, 0x3a, 0x08, 0xaf, 0x94, 0x13, 0xe3, 0xe9, 0x86, 0xa8, 0x47, 0x9b, 0x5e, 0xaf, 0xeb, + 0xa2, 0xe4, 0x6f, 0xba, 0x2a, 0x61, 0x70, 0xd6, 0x9e, 0xcd, 0x19, 0xf9, 0x9c, 0xb3, 0xe0, 0xb5, + 0x60, 0x64, 0x56, 0x30, 0xf2, 0x5e, 0x30, 0xf2, 0x55, 0x30, 0xf2, 0xf6, 0xcd, 0x82, 0xe1, 0x96, + 0xff, 0x00, 0xa7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe4, 0xc0, 0xe3, 0x42, 0x50, 0x02, 0x00, + 0x00, +} diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go new file mode 100644 index 00000000000..671e3df493b --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.pb.go @@ -0,0 +1,632 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: api.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + api.proto + + It has these top-level messages: + ExampleRequest + ExampleResponse +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ExampleRequest struct { + Request string `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + V1Beta1Field string `protobuf:"bytes,2,opt,name=v1beta1_field,json=v1beta1Field,proto3" json:"v1beta1_field,omitempty"` +} + +func (m *ExampleRequest) Reset() { *m = ExampleRequest{} } +func (*ExampleRequest) ProtoMessage() {} +func (*ExampleRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +func (m *ExampleRequest) GetRequest() string { + if m != nil { + return m.Request + } + return "" +} + +func (m *ExampleRequest) GetV1Beta1Field() string { + if m != nil { + return m.V1Beta1Field + } + return "" +} + +type ExampleResponse struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *ExampleResponse) Reset() { *m = ExampleResponse{} } +func (*ExampleResponse) ProtoMessage() {} +func (*ExampleResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } + +func (m *ExampleResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterType((*ExampleRequest)(nil), "v1beta1.ExampleRequest") + proto.RegisterType((*ExampleResponse)(nil), "v1beta1.ExampleResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Example service + +type ExampleClient interface { + GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error) +} + +type exampleClient struct { + cc *grpc.ClientConn +} + +func NewExampleClient(cc *grpc.ClientConn) ExampleClient { + return &exampleClient{cc} +} + +func (c *exampleClient) GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error) { + out := new(ExampleResponse) + err := grpc.Invoke(ctx, "/v1beta1.Example/GetExampleInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Example service + +type ExampleServer interface { + GetExampleInfo(context.Context, *ExampleRequest) (*ExampleResponse, error) +} + +func RegisterExampleServer(s *grpc.Server, srv ExampleServer) { + s.RegisterService(&_Example_serviceDesc, srv) +} + +func _Example_GetExampleInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExampleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExampleServer).GetExampleInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta1.Example/GetExampleInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExampleServer).GetExampleInfo(ctx, req.(*ExampleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Example_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1beta1.Example", + HandlerType: (*ExampleServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetExampleInfo", + Handler: _Example_GetExampleInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *ExampleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExampleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Request) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Request))) + i += copy(dAtA[i:], m.Request) + } + if len(m.V1Beta1Field) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.V1Beta1Field))) + i += copy(dAtA[i:], m.V1Beta1Field) + } + return i, nil +} + +func (m *ExampleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExampleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Error) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ExampleRequest) Size() (n int) { + var l int + _ = l + l = len(m.Request) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.V1Beta1Field) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ExampleResponse) Size() (n int) { + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ExampleRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExampleRequest{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `V1Beta1Field:` + fmt.Sprintf("%v", this.V1Beta1Field) + `,`, + `}`, + }, "") + return s +} +func (this *ExampleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExampleResponse{`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExampleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExampleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExampleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V1Beta1Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.V1Beta1Field = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExampleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExampleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExampleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0x94, 0xd2, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, + 0x07, 0xcb, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa7, 0xe4, 0xcf, 0xc5, + 0xe7, 0x5a, 0x91, 0x98, 0x5b, 0x90, 0x93, 0x1a, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x24, + 0xc1, 0xc5, 0x5e, 0x04, 0x61, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x42, 0xca, + 0x5c, 0xbc, 0x50, 0x5b, 0xe2, 0xd3, 0x32, 0x53, 0x73, 0x52, 0x24, 0x98, 0xc0, 0xf2, 0x3c, 0x50, + 0x41, 0x37, 0x90, 0x98, 0x92, 0x3a, 0x17, 0x3f, 0xdc, 0xc0, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, + 0x21, 0x11, 0x2e, 0xd6, 0xd4, 0xa2, 0xa2, 0xfc, 0x22, 0xa8, 0x79, 0x10, 0x8e, 0x51, 0x00, 0x17, + 0x3b, 0x54, 0xa1, 0x90, 0x2b, 0x17, 0x9f, 0x7b, 0x6a, 0x09, 0x94, 0xe7, 0x99, 0x97, 0x96, 0x2f, + 0x24, 0xae, 0x07, 0x35, 0x54, 0x0f, 0xd5, 0x75, 0x52, 0x12, 0x98, 0x12, 0x10, 0x5b, 0x94, 0x18, + 0x9c, 0x64, 0x4e, 0x3c, 0x94, 0x63, 0xbc, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, + 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, + 0x12, 0x1b, 0xd8, 0xc3, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0x62, 0xd1, 0x9c, 0x35, + 0x01, 0x00, 0x00, +} diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go new file mode 100644 index 00000000000..0c63b31429f --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.pb.go @@ -0,0 +1,633 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: api.proto +// DO NOT EDIT! + +/* + Package v1beta2 is a generated protocol buffer package. + + It is generated from these files: + api.proto + + It has these top-level messages: + ExampleRequest + ExampleResponse +*/ +package v1beta2 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Renames a field from v1beta1 ExampleRequest. +type ExampleRequest struct { + Request string `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + V1Beta2Field string `protobuf:"bytes,2,opt,name=v1beta2_field,json=v1beta2Field,proto3" json:"v1beta2_field,omitempty"` +} + +func (m *ExampleRequest) Reset() { *m = ExampleRequest{} } +func (*ExampleRequest) ProtoMessage() {} +func (*ExampleRequest) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +func (m *ExampleRequest) GetRequest() string { + if m != nil { + return m.Request + } + return "" +} + +func (m *ExampleRequest) GetV1Beta2Field() string { + if m != nil { + return m.V1Beta2Field + } + return "" +} + +type ExampleResponse struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *ExampleResponse) Reset() { *m = ExampleResponse{} } +func (*ExampleResponse) ProtoMessage() {} +func (*ExampleResponse) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } + +func (m *ExampleResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterType((*ExampleRequest)(nil), "v1beta2.ExampleRequest") + proto.RegisterType((*ExampleResponse)(nil), "v1beta2.ExampleResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Example service + +type ExampleClient interface { + GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error) +} + +type exampleClient struct { + cc *grpc.ClientConn +} + +func NewExampleClient(cc *grpc.ClientConn) ExampleClient { + return &exampleClient{cc} +} + +func (c *exampleClient) GetExampleInfo(ctx context.Context, in *ExampleRequest, opts ...grpc.CallOption) (*ExampleResponse, error) { + out := new(ExampleResponse) + err := grpc.Invoke(ctx, "/v1beta2.Example/GetExampleInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Example service + +type ExampleServer interface { + GetExampleInfo(context.Context, *ExampleRequest) (*ExampleResponse, error) +} + +func RegisterExampleServer(s *grpc.Server, srv ExampleServer) { + s.RegisterService(&_Example_serviceDesc, srv) +} + +func _Example_GetExampleInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExampleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExampleServer).GetExampleInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1beta2.Example/GetExampleInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExampleServer).GetExampleInfo(ctx, req.(*ExampleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Example_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1beta2.Example", + HandlerType: (*ExampleServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetExampleInfo", + Handler: _Example_GetExampleInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *ExampleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExampleRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Request) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Request))) + i += copy(dAtA[i:], m.Request) + } + if len(m.V1Beta2Field) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.V1Beta2Field))) + i += copy(dAtA[i:], m.V1Beta2Field) + } + return i, nil +} + +func (m *ExampleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExampleResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Error) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + } + return i, nil +} + +func encodeFixed64Api(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Api(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ExampleRequest) Size() (n int) { + var l int + _ = l + l = len(m.Request) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.V1Beta2Field) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ExampleResponse) Size() (n int) { + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ExampleRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExampleRequest{`, + `Request:` + fmt.Sprintf("%v", this.Request) + `,`, + `V1Beta2Field:` + fmt.Sprintf("%v", this.V1Beta2Field) + `,`, + `}`, + }, "") + return s +} +func (this *ExampleResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExampleResponse{`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExampleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExampleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExampleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field V1Beta2Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.V1Beta2Field = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExampleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExampleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExampleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0x2c, 0xc8, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0x92, 0xd2, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, + 0x07, 0xcb, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa7, 0xe4, 0xcf, 0xc5, + 0xe7, 0x5a, 0x91, 0x98, 0x5b, 0x90, 0x93, 0x1a, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x24, + 0xc1, 0xc5, 0x5e, 0x04, 0x61, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x42, 0xca, + 0x5c, 0xbc, 0x50, 0x5b, 0xe2, 0xd3, 0x32, 0x53, 0x73, 0x52, 0x24, 0x98, 0xc0, 0xf2, 0x3c, 0x50, + 0x41, 0x37, 0x90, 0x98, 0x92, 0x3a, 0x17, 0x3f, 0xdc, 0xc0, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, + 0x21, 0x11, 0x2e, 0xd6, 0xd4, 0xa2, 0xa2, 0xfc, 0x22, 0xa8, 0x79, 0x10, 0x8e, 0x51, 0x00, 0x17, + 0x3b, 0x54, 0xa1, 0x90, 0x2b, 0x17, 0x9f, 0x7b, 0x6a, 0x09, 0x94, 0xe7, 0x99, 0x97, 0x96, 0x2f, + 0x24, 0xae, 0x07, 0x35, 0x54, 0x0f, 0xd5, 0x75, 0x52, 0x12, 0x98, 0x12, 0x10, 0x5b, 0x94, 0x18, + 0x9c, 0x64, 0x4e, 0x3c, 0x94, 0x63, 0xbc, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, + 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, + 0x12, 0x1b, 0xd8, 0xc3, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0x79, 0x17, 0x13, 0x35, + 0x01, 0x00, 0x00, +} From 3a2e3bcc70ef9810b871db8bcf0599c4712f6bc2 Mon Sep 17 00:00:00 2001 From: vikaschoudhary16 Date: Tue, 1 May 2018 02:15:06 -0400 Subject: [PATCH 251/307] Add probe based mechanism for kubelet plugin discovery --- hack/.golint_failures | 4 + ...-kubelet-plugin-registration-dockerized.sh | 29 ++ ...e-generated-kubelet-plugin-registration.sh | 27 ++ ...y-generated-kubelet-plugin-registration.sh | 39 +++ pkg/kubelet/BUILD | 1 + pkg/kubelet/apis/BUILD | 1 + .../apis/pluginregistration/v1alpha1/BUILD | 40 +++ .../pluginregistration/v1alpha1/api.proto | 60 ++++ .../pluginregistration/v1alpha1/constants.go | 22 ++ pkg/kubelet/kubelet.go | 12 + pkg/kubelet/util/BUILD | 2 + pkg/kubelet/util/pluginwatcher/BUILD | 58 ++++ pkg/kubelet/util/pluginwatcher/README | 29 ++ .../util/pluginwatcher/example_plugin.go | 150 ++++++++++ .../example_plugin_apis/v1beta1/BUILD | 34 +++ .../example_plugin_apis/v1beta1/api.proto | 28 ++ .../example_plugin_apis/v1beta2/BUILD | 34 +++ .../example_plugin_apis/v1beta2/api.proto | 29 ++ .../util/pluginwatcher/plugin_watcher.go | 260 ++++++++++++++++++ .../util/pluginwatcher/plugin_watcher_test.go | 220 +++++++++++++++ 20 files changed, 1079 insertions(+) create mode 100755 hack/update-generated-kubelet-plugin-registration-dockerized.sh create mode 100755 hack/update-generated-kubelet-plugin-registration.sh create mode 100755 hack/verify-generated-kubelet-plugin-registration.sh create mode 100644 pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD create mode 100644 pkg/kubelet/apis/pluginregistration/v1alpha1/api.proto create mode 100644 pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go create mode 100644 pkg/kubelet/util/pluginwatcher/BUILD create mode 100644 pkg/kubelet/util/pluginwatcher/README create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin.go create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/BUILD create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/BUILD create mode 100644 pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto create mode 100644 pkg/kubelet/util/pluginwatcher/plugin_watcher.go create mode 100644 pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go diff --git a/hack/.golint_failures b/hack/.golint_failures index 2b2798e46a7..0853347106b 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -166,6 +166,7 @@ pkg/kubelet/apis/deviceplugin/v1alpha pkg/kubelet/apis/deviceplugin/v1beta1 pkg/kubelet/apis/kubeletconfig pkg/kubelet/apis/kubeletconfig/v1beta1 +pkg/kubelet/apis/pluginregistration/v1alpha1 pkg/kubelet/cadvisor pkg/kubelet/cadvisor/testing pkg/kubelet/checkpoint @@ -217,6 +218,9 @@ pkg/kubelet/sysctl pkg/kubelet/types pkg/kubelet/util pkg/kubelet/util/cache +pkg/kubelet/util/pluginwatcher +pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1 +pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2 pkg/kubelet/util/queue pkg/kubelet/util/sliceutils pkg/kubemark diff --git a/hack/update-generated-kubelet-plugin-registration-dockerized.sh b/hack/update-generated-kubelet-plugin-registration-dockerized.sh new file mode 100755 index 00000000000..daf5abbd36a --- /dev/null +++ b/hack/update-generated-kubelet-plugin-registration-dockerized.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../" && pwd -P)" +KUBELET_PLUGIN_REGISTRATION_ROOT="${KUBE_ROOT}/pkg/kubelet/apis/pluginregistration/v1alpha1/" +KUBELET_EXAMPLE_PLUGIN_V1BETA1="${KUBE_ROOT}/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/" +KUBELET_EXAMPLE_PLUGIN_V1BETA2="${KUBE_ROOT}/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/" + +source "${KUBE_ROOT}/hack/lib/protoc.sh" +kube::protoc::generate_proto ${KUBELET_PLUGIN_REGISTRATION_ROOT} +kube::protoc::generate_proto ${KUBELET_EXAMPLE_PLUGIN_V1BETA1} +kube::protoc::generate_proto ${KUBELET_EXAMPLE_PLUGIN_V1BETA2} diff --git a/hack/update-generated-kubelet-plugin-registration.sh b/hack/update-generated-kubelet-plugin-registration.sh new file mode 100755 index 00000000000..308733c0246 --- /dev/null +++ b/hack/update-generated-kubelet-plugin-registration.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +# NOTE: All output from this script needs to be copied back to the calling +# source tree. This is managed in kube::build::copy_output in build/common.sh. +# If the output set is changed update that function. + +${KUBE_ROOT}/build/run.sh hack/update-generated-kubelet-plugin-registration-dockerized.sh "$@" diff --git a/hack/verify-generated-kubelet-plugin-registration.sh b/hack/verify-generated-kubelet-plugin-registration.sh new file mode 100755 index 00000000000..3dfffa8dcc0 --- /dev/null +++ b/hack/verify-generated-kubelet-plugin-registration.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +ERROR="Kubelet Plugin Registration api is out of date. Please run hack/update-generated-kubelet-plugin-registration.sh" +KUBELET_PLUGIN_REGISTRATION_ROOT="${KUBE_ROOT}/pkg/kubelet/apis/pluginregistration/v1alpha1/" + +source "${KUBE_ROOT}/hack/lib/protoc.sh" +kube::golang::setup_env + +function cleanup { + rm -rf ${KUBELET_PLUGIN_REGISTRATION_ROOT}/_tmp/ +} + +trap cleanup EXIT + +mkdir -p ${KUBELET_PLUGIN_REGISTRATION_ROOT}/_tmp +cp ${KUBELET_PLUGIN_REGISTRATION_ROOT}/api.pb.go ${KUBELET_PLUGIN_REGISTRATION_ROOT}/_tmp/ + +KUBE_VERBOSE=3 "${KUBE_ROOT}/hack/update-generated-kubelet-plugin-registration.sh" +kube::protoc::diff "${KUBELET_PLUGIN_REGISTRATION_ROOT}/api.pb.go" "${KUBELET_PLUGIN_REGISTRATION_ROOT}/_tmp/api.pb.go" ${ERROR} +echo "Generated Kubelet Plugin Registration api is up to date." diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index b4d6d166fa9..710af32b7a3 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -84,6 +84,7 @@ go_library( "//pkg/kubelet/util:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/util/manager:go_default_library", + "//pkg/kubelet/util/pluginwatcher:go_default_library", "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", "//pkg/kubelet/volumemanager:go_default_library", diff --git a/pkg/kubelet/apis/BUILD b/pkg/kubelet/apis/BUILD index 47cb8184ccb..2a22e48121a 100644 --- a/pkg/kubelet/apis/BUILD +++ b/pkg/kubelet/apis/BUILD @@ -41,6 +41,7 @@ filegroup( "//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs", "//pkg/kubelet/apis/deviceplugin/v1beta1:all-srcs", "//pkg/kubelet/apis/kubeletconfig:all-srcs", + "//pkg/kubelet/apis/pluginregistration/v1alpha1:all-srcs", "//pkg/kubelet/apis/stats/v1alpha1:all-srcs", ], tags = ["automanaged"], diff --git a/pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD b/pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD new file mode 100644 index 00000000000..f51668500bb --- /dev/null +++ b/pkg/kubelet/apis/pluginregistration/v1alpha1/BUILD @@ -0,0 +1,40 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "api.pb.go", + "constants.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1", + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["api.proto"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubelet/apis/pluginregistration/v1alpha1/api.proto b/pkg/kubelet/apis/pluginregistration/v1alpha1/api.proto new file mode 100644 index 00000000000..319b3f19fb7 --- /dev/null +++ b/pkg/kubelet/apis/pluginregistration/v1alpha1/api.proto @@ -0,0 +1,60 @@ +// To regenerate api.pb.go run hack/update-generated-kubelet-plugin-registration.sh +syntax = 'proto3'; + +package pluginregistration; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +// PluginInfo is the message sent from a plugin to the Kubelet pluginwatcher for plugin registration +message PluginInfo { + // Type of the Plugin. CSIPlugin or DevicePlugin + string type = 1; + // Plugin name that uniquely identifies the plugin for the given plugin type. + // For DevicePlugin, this is the resource name that the plugin manages and + // should follow the extended resource name convention. + // For CSI, this is the CSI driver registrar name. + string name = 2; + // Optional endpoint location. If found set by Kubelet component, + // Kubelet component will use this endpoint for specific requests. + // This allows the plugin to register using one endpoint and possibly use + // a different socket for control operations. CSI uses this model to delegate + // its registration external from the plugin. + string endpoint = 3; + // Plugin service API versions the plugin supports. + // For DevicePlugin, this maps to the deviceplugin API versions the + // plugin supports at the given socket. + // The Kubelet component communicating with the plugin should be able + // to choose any preferred version from this list, or returns an error + // if none of the listed versions is supported. + repeated string supported_versions = 4; +} + +// RegistrationStatus is the message sent from Kubelet pluginwatcher to the plugin for notification on registration status +message RegistrationStatus { + // True if plugin gets registered successfully at Kubelet + bool plugin_registered = 1; + // Error message in case plugin fails to register, empty string otherwise + string error = 2; +} + +// RegistrationStatusResponse is sent by plugin to kubelet in response to RegistrationStatus RPC +message RegistrationStatusResponse { +} + +// InfoRequest is the empty request message from Kubelet +message InfoRequest { +} + +// Registration is the service advertised by the Plugins. +service Registration { + rpc GetInfo(InfoRequest) returns (PluginInfo) {} + rpc NotifyRegistrationStatus(RegistrationStatus) returns (RegistrationStatusResponse) {} +} diff --git a/pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go b/pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go new file mode 100644 index 00000000000..cfc1b7c6d7c --- /dev/null +++ b/pkg/kubelet/apis/pluginregistration/v1alpha1/constants.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginregistration + +const ( + CSIPlugin = "CSIPlugin" + DevicePlugin = "DevicePlugin" +) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 421c0c98bea..9194c1bbb9c 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -93,6 +93,7 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/util/manager" + "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/kubelet/volumemanager" @@ -775,6 +776,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, if err != nil { return nil, err } + klet.pluginWatcher = pluginwatcher.NewWatcher(klet.getPluginsDir()) // If the experimentalMounterPathFlag is set, we do not want to // check node capabilities since the mount path is not the default @@ -1150,6 +1152,11 @@ type Kubelet struct { // This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node. // This can be useful for debugging volume related issues. keepTerminatedPodVolumes bool // DEPRECATED + + // pluginwatcher is a utility for Kubelet to register different types of node-level plugins + // such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the + // directory returned by kubelet.getPluginsDir() + pluginWatcher pluginwatcher.Watcher } func allGlobalUnicastIPs() ([]net.IP, error) { @@ -1264,6 +1271,11 @@ func (kl *Kubelet) initializeModules() error { } } + // Start the plugin watcher + if err := kl.pluginWatcher.Start(); err != nil { + return fmt.Errorf("failed to start Plugin Watcher. err: %v", err) + } + // Start the image manager. kl.imageManager.Start() diff --git a/pkg/kubelet/util/BUILD b/pkg/kubelet/util/BUILD index df02ebdcd25..ff1755ebd11 100644 --- a/pkg/kubelet/util/BUILD +++ b/pkg/kubelet/util/BUILD @@ -93,9 +93,11 @@ filegroup( "//pkg/kubelet/util/format:all-srcs", "//pkg/kubelet/util/ioutils:all-srcs", "//pkg/kubelet/util/manager:all-srcs", + "//pkg/kubelet/util/pluginwatcher:all-srcs", "//pkg/kubelet/util/queue:all-srcs", "//pkg/kubelet/util/sliceutils:all-srcs", "//pkg/kubelet/util/store:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/pkg/kubelet/util/pluginwatcher/BUILD b/pkg/kubelet/util/pluginwatcher/BUILD new file mode 100644 index 00000000000..b4173ab5e1c --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/BUILD @@ -0,0 +1,58 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "example_plugin.go", + "plugin_watcher.go", + ], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher", + deps = [ + "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", + "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1:go_default_library", + "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2:go_default_library", + "//pkg/util/filesystem:go_default_library", + "//vendor/github.com/fsnotify/fsnotify:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1:all-srcs", + "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["plugin_watcher_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", + "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1:go_default_library", + "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) diff --git a/pkg/kubelet/util/pluginwatcher/README b/pkg/kubelet/util/pluginwatcher/README new file mode 100644 index 00000000000..9654b2cf62a --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/README @@ -0,0 +1,29 @@ +This folder contains a utility, pluginwatcher, for Kubelet to register +different types of node-level plugins such as device plugins or CSI plugins. +It discovers plugins by monitoring inotify events under the directory returned by +kubelet.getPluginsDir(). Lets refer this directory as PluginsSockDir. +For any discovered plugin, pluginwatcher issues Registration.GetInfo grpc call +to get plugin type, name and supported service API versions. For any registered plugin type, +pluginwatcher calls the registered callback function with the received plugin +name, supported service API versions, and the full socket path. The Kubelet +component that receives this callback can acknowledge or reject the plugin +according to its own logic, and use the socket path to establish its service +communication with any API version supported by the plugin. + +Here are the general rules that Kubelet plugin developers should follow: +- Run as 'root' user. Currently creating socket under PluginsSockDir, a root owned directory, requires + plugin process to be running as 'root'. +- Implements the Registration service specified in + pkg/kubelet/apis/pluginregistration/v*/api.proto. +- The plugin name sent during Registration.GetInfo grpc should be unique + for the given plugin type (CSIPlugin or DevicePlugin). +- The socket path needs to be unique and doesn't conflict with the path chosen + by any other potential plugins. Currently we only support flat fs namespace + under PluginsSockDir but will soon support recursive inotify watch for + hierarchical socket paths. +- A plugin should clean up its own socket upon exiting or when a new instance + comes up. A plugin should NOT remove any sockets belonging to other plugins. +- A plugin should make sure it has service ready for any supported service API + version listed in the PluginInfo. +- For an example plugin implementation, take a look at example_plugin.go + included in this directory. diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin.go b/pkg/kubelet/util/pluginwatcher/example_plugin.go new file mode 100644 index 00000000000..fbca43acad5 --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginwatcher + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + + registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + v1beta1 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1" + v1beta2 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2" +) + +const ( + PluginName = "example-plugin" + PluginType = "example-plugin-type" +) + +// examplePlugin is a sample plugin to work with plugin watcher +type examplePlugin struct { + grpcServer *grpc.Server + wg sync.WaitGroup + registrationStatus chan registerapi.RegistrationStatus // for testing + endpoint string // for testing +} + +type pluginServiceV1Beta1 struct { + server *examplePlugin +} + +func (s *pluginServiceV1Beta1) GetExampleInfo(ctx context.Context, rqt *v1beta1.ExampleRequest) (*v1beta1.ExampleResponse, error) { + glog.Infof("GetExampleInfo v1beta1field: %s", rqt.V1Beta1Field) + return &v1beta1.ExampleResponse{}, nil +} + +func (s *pluginServiceV1Beta1) RegisterService() { + v1beta1.RegisterExampleServer(s.server.grpcServer, s) +} + +type pluginServiceV1Beta2 struct { + server *examplePlugin +} + +func (s *pluginServiceV1Beta2) GetExampleInfo(ctx context.Context, rqt *v1beta2.ExampleRequest) (*v1beta2.ExampleResponse, error) { + glog.Infof("GetExampleInfo v1beta2_field: %s", rqt.V1Beta2Field) + return &v1beta2.ExampleResponse{}, nil +} + +func (s *pluginServiceV1Beta2) RegisterService() { + v1beta2.RegisterExampleServer(s.server.grpcServer, s) +} + +// NewExamplePlugin returns an initialized examplePlugin instance +func NewExamplePlugin() *examplePlugin { + return &examplePlugin{} +} + +// NewTestExamplePlugin returns an initialized examplePlugin instance for testing +func NewTestExamplePlugin(endpoint string) *examplePlugin { + return &examplePlugin{ + registrationStatus: make(chan registerapi.RegistrationStatus), + endpoint: endpoint, + } +} + +// GetInfo is the RPC invoked by plugin watcher +func (e *examplePlugin) GetInfo(ctx context.Context, req *registerapi.InfoRequest) (*registerapi.PluginInfo, error) { + return ®isterapi.PluginInfo{ + Type: PluginType, + Name: PluginName, + Endpoint: e.endpoint, + SupportedVersions: []string{"v1beta1", "v1beta2"}, + }, nil +} + +func (e *examplePlugin) NotifyRegistrationStatus(ctx context.Context, status *registerapi.RegistrationStatus) (*registerapi.RegistrationStatusResponse, error) { + if e.registrationStatus != nil { + e.registrationStatus <- *status + } + if !status.PluginRegistered { + glog.Errorf("Registration failed: %s\n", status.Error) + } + return ®isterapi.RegistrationStatusResponse{}, nil +} + +// Serve starts example plugin grpc server +func (e *examplePlugin) Serve(socketPath string) error { + glog.Infof("starting example server at: %s\n", socketPath) + lis, err := net.Listen("unix", socketPath) + if err != nil { + return err + } + glog.Infof("example server started at: %s\n", socketPath) + e.grpcServer = grpc.NewServer() + // Registers kubelet plugin watcher api. + registerapi.RegisterRegistrationServer(e.grpcServer, e) + // Registers services for both v1beta1 and v1beta2 versions. + v1beta1 := &pluginServiceV1Beta1{server: e} + v1beta1.RegisterService() + v1beta2 := &pluginServiceV1Beta2{server: e} + v1beta2.RegisterService() + + // Starts service + e.wg.Add(1) + go func() { + defer e.wg.Done() + // Blocking call to accept incoming connections. + if err := e.grpcServer.Serve(lis); err != nil { + glog.Errorf("example server stopped serving: %v", err) + } + }() + return nil +} + +func (e *examplePlugin) Stop() error { + glog.Infof("Stopping example server\n") + e.grpcServer.Stop() + c := make(chan struct{}) + go func() { + defer close(c) + e.wg.Wait() + }() + select { + case <-c: + return nil + case <-time.After(time.Second): + glog.Errorf("Timed out on waiting for stop completion") + return fmt.Errorf("Timed out on waiting for stop completion") + } +} diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/BUILD b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/BUILD new file mode 100644 index 00000000000..affbd0aee4a --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["api.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["api.pb.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto new file mode 100644 index 00000000000..14aa7df2c4d --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1/api.proto @@ -0,0 +1,28 @@ +syntax = 'proto3'; + +package v1beta1; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +message ExampleRequest { + string request = 1; + string v1beta1_field = 2; +} + +message ExampleResponse { + string error = 1; +} + +// Example is a simple example service for general reference on the recommended +// kubelet plugin model and plugin watcher testing. +service Example { + rpc GetExampleInfo(ExampleRequest) returns (ExampleResponse) {} +} diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/BUILD b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/BUILD new file mode 100644 index 00000000000..f2b53898d38 --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/BUILD @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +filegroup( + name = "go_default_library_protos", + srcs = ["api.proto"], + visibility = ["//visibility:public"], +) + +go_library( + name = "go_default_library", + srcs = ["api.pb.go"], + importpath = "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/gogo/protobuf/gogoproto:go_default_library", + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/golang.org/x/net/context:go_default_library", + "//vendor/google.golang.org/grpc:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto new file mode 100644 index 00000000000..e34697f3a66 --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2/api.proto @@ -0,0 +1,29 @@ +syntax = 'proto3'; + +package v1beta2; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +// Renames a field from v1beta1 ExampleRequest. +message ExampleRequest { + string request = 1; + string v1beta2_field = 2; +} + +message ExampleResponse { + string error = 1; +} + +// Example is a simple example service for general reference on the recommended +// kubelet plugin model and plugin watcher testing. +service Example { + rpc GetExampleInfo(ExampleRequest) returns (ExampleResponse) {} +} diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go new file mode 100644 index 00000000000..9a5241cb2e5 --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -0,0 +1,260 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginwatcher + +import ( + "fmt" + "net" + "os" + "path" + "path/filepath" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + utilfs "k8s.io/kubernetes/pkg/util/filesystem" +) + +// RegisterCallbackFn is the type of the callback function that handlers will provide +type RegisterCallbackFn func(pluginName string, endpoint string, versions []string, socketPath string) (error, chan bool) + +// Watcher is the plugin watcher +type Watcher struct { + path string + handlers map[string]RegisterCallbackFn + stopCh chan interface{} + fs utilfs.Filesystem + watcher *fsnotify.Watcher + wg sync.WaitGroup + mutex sync.Mutex +} + +// NewWatcher provides a new watcher +func NewWatcher(sockDir string) Watcher { + return Watcher{ + path: sockDir, + handlers: make(map[string]RegisterCallbackFn), + fs: &utilfs.DefaultFs{}, + } +} + +// AddHandler registers a callback to be invoked for a particular type of plugin +func (w *Watcher) AddHandler(handlerType string, handlerCbkFn RegisterCallbackFn) { + w.mutex.Lock() + defer w.mutex.Unlock() + w.handlers[handlerType] = handlerCbkFn +} + +// Creates the plugin directory, if it doesn't already exist. +func (w *Watcher) createPluginDir() error { + glog.V(4).Infof("Ensuring Plugin directory at %s ", w.path) + if err := w.fs.MkdirAll(w.path, 0755); err != nil { + return fmt.Errorf("error (re-)creating driver directory: %s", err) + } + return nil +} + +// Walks through the plugin directory to discover any existing plugin sockets. +func (w *Watcher) traversePluginDir() error { + files, err := w.fs.ReadDir(w.path) + if err != nil { + return fmt.Errorf("error reading the plugin directory: %v", err) + } + for _, f := range files { + // Currently only supports flat fs namespace under the plugin directory. + // TODO: adds support for hierarchical fs namespace. + if !f.IsDir() && filepath.Base(f.Name())[0] != '.' { + go func(sockName string) { + w.watcher.Events <- fsnotify.Event{ + Name: sockName, + Op: fsnotify.Op(uint32(1)), + } + }(path.Join(w.path, f.Name())) + } + } + return nil +} + +func (w *Watcher) init() error { + if err := w.createPluginDir(); err != nil { + return err + } + return nil +} + +func (w *Watcher) registerPlugin(socketPath string) error { + //TODO: Implement rate limiting to mitigate any DOS kind of attacks. + glog.V(4).Infof("registerPlugin called for socketPath: %s", socketPath) + client, conn, err := dial(socketPath) + if err != nil { + return fmt.Errorf("dial failed at socket %s, err: %v", socketPath, err) + } + defer conn.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + infoResp, err := client.GetInfo(ctx, ®isterapi.InfoRequest{}) + if err != nil { + return fmt.Errorf("failed to get plugin info using RPC GetInfo at socket %s, err: %v", socketPath, err) + } + if err := w.invokeRegistrationCallbackAtHandler(ctx, client, infoResp, socketPath); err != nil { + return fmt.Errorf("failed to register plugin. Callback handler returned err: %v", err) + } + glog.V(4).Infof("Successfully registered plugin for plugin type: %s, name: %s, socket: %s", infoResp.Type, infoResp.Name, socketPath) + return nil +} + +func (w *Watcher) invokeRegistrationCallbackAtHandler(ctx context.Context, client registerapi.RegistrationClient, infoResp *registerapi.PluginInfo, socketPath string) error { + var handlerCbkFn RegisterCallbackFn + var ok bool + handlerCbkFn, ok = w.handlers[infoResp.Type] + if !ok { + if _, err := client.NotifyRegistrationStatus(ctx, ®isterapi.RegistrationStatus{ + PluginRegistered: false, + Error: fmt.Sprintf("No handler found registered for plugin type: %s, socket: %s", infoResp.Type, socketPath), + }); err != nil { + glog.Errorf("Failed to send registration status at socket %s, err: %v", socketPath, err) + } + return fmt.Errorf("no handler found registered for plugin type: %s, socket: %s", infoResp.Type, socketPath) + } + + var versions []string + for _, version := range infoResp.SupportedVersions { + versions = append(versions, version) + } + // calls handler callback to verify registration request + err, chanForAckOfNotification := handlerCbkFn(infoResp.Name, infoResp.Endpoint, versions, socketPath) + if err != nil { + if _, err := client.NotifyRegistrationStatus(ctx, ®isterapi.RegistrationStatus{ + PluginRegistered: false, + Error: fmt.Sprintf("Plugin registration failed with err: %v", err), + }); err != nil { + glog.Errorf("Failed to send registration status at socket %s, err: %v", socketPath, err) + } + chanForAckOfNotification <- false + return fmt.Errorf("plugin registration failed with err: %v", err) + } + + if _, err := client.NotifyRegistrationStatus(ctx, ®isterapi.RegistrationStatus{ + PluginRegistered: true, + }); err != nil { + return fmt.Errorf("failed to send registration status at socket %s, err: %v", socketPath, err) + } + chanForAckOfNotification <- true + return nil +} + +// Start watches for the creation of plugin sockets at the path +func (w *Watcher) Start() error { + glog.V(2).Infof("Plugin Watcher Start at %s", w.path) + w.stopCh = make(chan interface{}) + + // Creating the directory to be watched if it doesn't exist yet, + // and walks through the directory to discover the existing plugins. + if err := w.init(); err != nil { + return err + } + + watcher, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("failed to start plugin watcher, err: %v", err) + } + + if err := watcher.Add(w.path); err != nil { + watcher.Close() + return fmt.Errorf("failed to start plugin watcher, err: %v", err) + } + + w.watcher = watcher + + if err := w.traversePluginDir(); err != nil { + watcher.Close() + return fmt.Errorf("failed to traverse plugin socket path, err: %v", err) + } + + w.wg.Add(1) + go func(watcher *fsnotify.Watcher) { + defer w.wg.Done() + for { + select { + case event := <-watcher.Events: + if event.Op&fsnotify.Create == fsnotify.Create { + go func(eventName string) { + err := w.registerPlugin(eventName) + if err != nil { + glog.Errorf("Plugin %s registration failed with error: %v", eventName, err) + } + }(event.Name) + } + continue + case err := <-watcher.Errors: + //TODO: Handle errors by taking corrective measures + if err != nil { + glog.Errorf("Watcher received error: %v", err) + } + continue + + case <-w.stopCh: + watcher.Close() + break + } + break + } + }(watcher) + return nil +} + +// Stop stops probing the creation of plugin sockets at the path +func (w *Watcher) Stop() error { + close(w.stopCh) + c := make(chan struct{}) + go func() { + defer close(c) + w.wg.Wait() + }() + select { + case <-c: + case <-time.After(10 * time.Second): + return fmt.Errorf("timeout on stopping watcher") + } + return nil +} + +// Cleanup cleans the path by removing sockets +func (w *Watcher) Cleanup() error { + return os.RemoveAll(w.path) +} + +// Dial establishes the gRPC communication with the picked up plugin socket. https://godoc.org/google.golang.org/grpc#Dial +func dial(unixSocketPath string) (registerapi.RegistrationClient, *grpc.ClientConn, error) { + c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithTimeout(10*time.Second), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + ) + + if err != nil { + return nil, nil, fmt.Errorf("failed to dial socket %s, err: %v", unixSocketPath, err) + } + + return registerapi.NewRegistrationClient(c), c, nil +} diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go new file mode 100644 index 00000000000..44bccf9a6f3 --- /dev/null +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go @@ -0,0 +1,220 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pluginwatcher + +import ( + "fmt" + "io/ioutil" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/net/context" + + "k8s.io/apimachinery/pkg/util/sets" + registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" + v1beta1 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1" + v1beta2 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2" +) + +func TestExamplePlugin(t *testing.T) { + socketDir, err := ioutil.TempDir("", "plugin_test") + require.NoError(t, err) + socketPath := socketDir + "/plugin.sock" + w := NewWatcher(socketDir) + + testCases := []struct { + description string + expectedEndpoint string + returnErr error + }{ + { + description: "Successfully register plugin through inotify", + expectedEndpoint: "", + returnErr: nil, + }, + { + description: "Successfully register plugin through inotify and got expected optional endpoint", + expectedEndpoint: "dummyEndpoint", + returnErr: nil, + }, + { + description: "Fails registration because endpoint is expected to be non-empty", + expectedEndpoint: "dummyEndpoint", + returnErr: fmt.Errorf("empty endpoint received"), + }, + { + description: "Successfully register plugin through inotify after plugin restarts", + expectedEndpoint: "", + returnErr: nil, + }, + { + description: "Fails registration with conflicting plugin name", + expectedEndpoint: "", + returnErr: fmt.Errorf("conflicting plugin name"), + }, + { + description: "Successfully register plugin during initial traverse after plugin watcher restarts", + expectedEndpoint: "", + returnErr: nil, + }, + { + description: "Fails registration with conflicting plugin name during initial traverse after plugin watcher restarts", + expectedEndpoint: "", + returnErr: fmt.Errorf("conflicting plugin name"), + }, + } + + callbackCount := struct { + mutex sync.Mutex + count int32 + }{} + w.AddHandler(PluginType, func(name string, endpoint string, versions []string, sockPath string) (error, chan bool) { + callbackCount.mutex.Lock() + localCount := callbackCount.count + callbackCount.count = callbackCount.count + 1 + callbackCount.mutex.Unlock() + + require.True(t, localCount <= int32((len(testCases)-1))) + require.Equal(t, PluginName, name, "Plugin name mismatched!!") + retError := testCases[localCount].returnErr + if retError == nil || retError.Error() != "empty endpoint received" { + require.Equal(t, testCases[localCount].expectedEndpoint, endpoint, "Unexpected endpoint") + } else { + require.NotEqual(t, testCases[localCount].expectedEndpoint, endpoint, "Unexpected endpoint") + } + + require.Equal(t, []string{"v1beta1", "v1beta2"}, versions, "Plugin version mismatched!!") + // Verifies the grpcServer is ready to serve services. + _, conn, err := dial(sockPath) + require.Nil(t, err) + defer conn.Close() + + // The plugin handler should be able to use any listed service API version. + v1beta1Client := v1beta1.NewExampleClient(conn) + v1beta2Client := v1beta2.NewExampleClient(conn) + + // Tests v1beta1 GetExampleInfo + _, err = v1beta1Client.GetExampleInfo(context.Background(), &v1beta1.ExampleRequest{}) + require.Nil(t, err) + + // Tests v1beta1 GetExampleInfo + _, err = v1beta2Client.GetExampleInfo(context.Background(), &v1beta2.ExampleRequest{}) + //atomic.AddInt32(&callbackCount, 1) + chanForAckOfNotification := make(chan bool) + + go func() { + select { + case <-chanForAckOfNotification: + close(chanForAckOfNotification) + case <-time.After(time.Second): + t.Fatalf("Timed out while waiting for notification ack") + } + }() + return retError, chanForAckOfNotification + }) + require.NoError(t, w.Start()) + + p := NewTestExamplePlugin("") + require.NoError(t, p.Serve(socketPath)) + require.True(t, waitForPluginRegistrationStatus(t, p.registrationStatus)) + + require.NoError(t, p.Stop()) + + p = NewTestExamplePlugin("dummyEndpoint") + require.NoError(t, p.Serve(socketPath)) + require.True(t, waitForPluginRegistrationStatus(t, p.registrationStatus)) + + require.NoError(t, p.Stop()) + + p = NewTestExamplePlugin("") + require.NoError(t, p.Serve(socketPath)) + require.False(t, waitForPluginRegistrationStatus(t, p.registrationStatus)) + + // Trying to start a plugin service at the same socket path should fail + // with "bind: address already in use" + require.NotNil(t, p.Serve(socketPath)) + + // grpcServer.Stop() will remove the socket and starting plugin service + // at the same path again should succeeds and trigger another callback. + require.NoError(t, p.Stop()) + p = NewTestExamplePlugin("") + go func() { + require.Nil(t, p.Serve(socketPath)) + }() + require.True(t, waitForPluginRegistrationStatus(t, p.registrationStatus)) + + // Starting another plugin with the same name got verification error. + p2 := NewTestExamplePlugin("") + socketPath2 := socketDir + "/plugin2.sock" + go func() { + require.NoError(t, p2.Serve(socketPath2)) + }() + require.False(t, waitForPluginRegistrationStatus(t, p2.registrationStatus)) + + // Restarts plugin watcher should traverse the socket directory and issues a + // callback for every existing socket. + require.NoError(t, w.Stop()) + errCh := make(chan error) + go func() { + errCh <- w.Start() + }() + + var wg sync.WaitGroup + wg.Add(2) + var pStatus string + var p2Status string + go func() { + pStatus = strconv.FormatBool(waitForPluginRegistrationStatus(t, p.registrationStatus)) + wg.Done() + }() + go func() { + p2Status = strconv.FormatBool(waitForPluginRegistrationStatus(t, p2.registrationStatus)) + wg.Done() + }() + wg.Wait() + expectedSet := sets.NewString() + expectedSet.Insert("true", "false") + actualSet := sets.NewString() + actualSet.Insert(pStatus, p2Status) + + require.Equal(t, expectedSet, actualSet) + + select { + case err = <-errCh: + require.NoError(t, err) + case <-time.After(time.Second): + t.Fatalf("Timed out while waiting for watcher start") + + } + + require.NoError(t, w.Stop()) + err = w.Cleanup() + require.NoError(t, err) +} + +func waitForPluginRegistrationStatus(t *testing.T, statusCh chan registerapi.RegistrationStatus) bool { + select { + case status := <-statusCh: + return status.PluginRegistered + case <-time.After(10 * time.Second): + t.Fatalf("Timed out while waiting for registration status") + } + return false +} From b2d4426f093f092153030ab520e82a5c5e4ec642 Mon Sep 17 00:00:00 2001 From: Kevin Taylor Date: Fri, 21 Jul 2017 14:42:03 +0000 Subject: [PATCH 252/307] Add dynamic environment variable substitution to subpaths --- pkg/features/kube_features.go | 8 + pkg/kubelet/container/helpers.go | 5 + pkg/kubelet/container/helpers_test.go | 108 +++++++++++++ pkg/kubelet/kubelet_pods.go | 22 ++- pkg/kubelet/kubelet_pods_test.go | 6 +- pkg/kubelet/kubelet_pods_windows_test.go | 2 +- test/e2e/common/expansion.go | 189 +++++++++++++++++++++++ 7 files changed, 328 insertions(+), 12 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index e95a0fcc155..c8d77e5e927 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -286,6 +286,13 @@ const ( // // Extend the default scheduler to be aware of volume topology and handle PV provisioning DynamicProvisioningScheduling utilfeature.Feature = "DynamicProvisioningScheduling" + + // owner: @kevtaylor + // alpha: v1.11 + // + // Allow subpath environment variable substitution + // Only applicable if the VolumeSubpath feature is also enabled + VolumeSubpathEnvExpansion utilfeature.Feature = "VolumeSubpathEnvExpansion" ) func init() { @@ -335,6 +342,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 180a3e6df2d..fbf108c2a74 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -133,6 +133,11 @@ func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVa return command } +func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (expandedSubpath string) { + mapping := expansion.MappingFuncFor(EnvVarsToMap(envs)) + return expansion.Expand(mount.SubPath, mapping) +} + func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) { mapping := expansion.MappingFuncFor(EnvVarsToMap(envs)) diff --git a/pkg/kubelet/container/helpers_test.go b/pkg/kubelet/container/helpers_test.go index 14d9d6e6c8c..d6c2792c60b 100644 --- a/pkg/kubelet/container/helpers_test.go +++ b/pkg/kubelet/container/helpers_test.go @@ -138,6 +138,114 @@ func TestExpandCommandAndArgs(t *testing.T) { } } +func TestExpandVolumeMountsWithSubpath(t *testing.T) { + cases := []struct { + name string + container *v1.Container + envs []EnvVar + expectedSubPath string + expectedMountPath string + }{ + { + name: "subpath with no expansion", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{SubPath: "foo"}}, + }, + expectedSubPath: "foo", + expectedMountPath: "", + }, + { + name: "volumes with expanded subpath", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{SubPath: "foo/$(POD_NAME)"}}, + }, + envs: []EnvVar{ + { + Name: "POD_NAME", + Value: "bar", + }, + }, + expectedSubPath: "foo/bar", + expectedMountPath: "", + }, + { + name: "volumes expanded with empty subpath", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{SubPath: ""}}, + }, + envs: []EnvVar{ + { + Name: "POD_NAME", + Value: "bar", + }, + }, + expectedSubPath: "", + expectedMountPath: "", + }, + { + name: "volumes expanded with no envs subpath", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{SubPath: "/foo/$(POD_NAME)"}}, + }, + expectedSubPath: "/foo/$(POD_NAME)", + expectedMountPath: "", + }, + { + name: "volumes expanded with leading environment variable", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{SubPath: "$(POD_NAME)/bar"}}, + }, + envs: []EnvVar{ + { + Name: "POD_NAME", + Value: "foo", + }, + }, + expectedSubPath: "foo/bar", + expectedMountPath: "", + }, + { + name: "volumes with volume and subpath", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{MountPath: "/foo", SubPath: "$(POD_NAME)/bar"}}, + }, + envs: []EnvVar{ + { + Name: "POD_NAME", + Value: "foo", + }, + }, + expectedSubPath: "foo/bar", + expectedMountPath: "/foo", + }, + { + name: "volumes with volume and no subpath", + container: &v1.Container{ + VolumeMounts: []v1.VolumeMount{{MountPath: "/foo"}}, + }, + envs: []EnvVar{ + { + Name: "POD_NAME", + Value: "foo", + }, + }, + expectedSubPath: "", + expectedMountPath: "/foo", + }, + } + + for _, tc := range cases { + actualSubPath := ExpandContainerVolumeMounts(tc.container.VolumeMounts[0], tc.envs) + if e, a := tc.expectedSubPath, actualSubPath; !reflect.DeepEqual(e, a) { + t.Errorf("%v: unexpected subpath; expected %v, got %v", tc.name, e, a) + } + if e, a := tc.expectedMountPath, tc.container.VolumeMounts[0].MountPath; !reflect.DeepEqual(e, a) { + t.Errorf("%v: unexpected mountpath; expected %v, got %v", tc.name, e, a) + } + } + +} + func TestShouldContainerBeRestarted(t *testing.T) { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 46a17a01a58..50c28c0ebec 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -129,7 +129,8 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol } // makeMounts determines the mount points for the given container. -func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, mounter mountutil.Interface) ([]kubecontainer.Mount, func(), error) { +func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, mounter mountutil.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) { + // Kubernetes only mounts on /etc/hosts if: // - container is not an infrastructure (pause) container // - container is not already mounting on /etc/hosts @@ -166,6 +167,11 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h return nil, cleanupAction, fmt.Errorf("volume subpaths are disabled") } + // Expand subpath variables + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpathEnvExpansion) { + mount.SubPath = kubecontainer.ExpandContainerVolumeMounts(mount, expandEnvs) + } + if filepath.IsAbs(mount.SubPath) { return nil, cleanupAction, fmt.Errorf("error SubPath `%s` must not be an absolute path", mount.SubPath) } @@ -454,18 +460,18 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai opts.Devices = append(opts.Devices, blkVolumes...) } - mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes, kl.mounter) + envs, err := kl.makeEnvironmentVariables(pod, container, podIP) + if err != nil { + return nil, nil, err + } + opts.Envs = append(opts.Envs, envs...) + + mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes, kl.mounter, opts.Envs) if err != nil { return nil, cleanupAction, err } opts.Mounts = append(opts.Mounts, mounts...) - envs, err := kl.makeEnvironmentVariables(pod, container, podIP) - if err != nil { - return nil, cleanupAction, err - } - opts.Envs = append(opts.Envs, envs...) - // Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and // Docker for Windows has a bug where only directories can be mounted if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" { diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 9d2c813848b..6a26e20550e 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -273,7 +273,7 @@ func TestMakeMounts(t *testing.T) { return } - mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fm) + mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fm, nil) // validate only the error if we expect an error if tc.expectErr { @@ -296,7 +296,7 @@ func TestMakeMounts(t *testing.T) { t.Errorf("Failed to enable feature gate for MountPropagation: %v", err) return } - mounts, _, err = makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fm) + mounts, _, err = makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fm, nil) if !tc.expectErr { expectedPrivateMounts := []kubecontainer.Mount{} for _, mount := range tc.expectedMounts { @@ -357,7 +357,7 @@ func TestDisabledSubpath(t *testing.T) { defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true") for name, test := range cases { - _, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fm) + _, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fm, nil) if err != nil && !test.expectError { t.Errorf("test %v failed: %v", name, err) } diff --git a/pkg/kubelet/kubelet_pods_windows_test.go b/pkg/kubelet/kubelet_pods_windows_test.go index cc16b358fb0..628c2ecdd7a 100644 --- a/pkg/kubelet/kubelet_pods_windows_test.go +++ b/pkg/kubelet/kubelet_pods_windows_test.go @@ -66,7 +66,7 @@ func TestMakeMountsWindows(t *testing.T) { } fm := &mount.FakeMounter{} - mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes, fm) + mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes, fm, nil) expectedMounts := []kubecontainer.Mount{ { diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go index eadb324fecf..17123dd8c87 100644 --- a/test/e2e/common/expansion.go +++ b/test/e2e/common/expansion.go @@ -19,8 +19,13 @@ package common import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "time" ) // These tests exercise the Kubernetes expansion syntax $(VAR). @@ -144,4 +149,188 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { "test-value", }) }) + + /* + Testname: var-expansion-subpath + Description: Make sure a container's subpath can be set using an + expansion of environment variables. + */ + It("should allow substituting values in a volume subpath [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion]", func() { + podName := "var-expansion-" + string(uuid.NewUUID()) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: map[string]string{"name": podName}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dapi-container", + Image: busyboxImage, + Command: []string{"sh", "-c", "test -d /testcontainer/" + podName + ";echo $?"}, + Env: []v1.EnvVar{ + { + Name: "POD_NAME", + Value: podName, + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "workdir1", + MountPath: "/logscontainer", + SubPath: "$(POD_NAME)", + }, + { + Name: "workdir2", + MountPath: "/testcontainer", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "workdir1", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: "/tmp"}, + }, + }, + { + Name: "workdir2", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: "/tmp"}, + }, + }, + }, + }, + } + + f.TestContainerOutput("substitution in volume subpath", pod, 0, []string{ + "0", + }) + }) + + /* + Testname: var-expansion-subpath-with-backticks + Description: Make sure a container's subpath can not be set using an + expansion of environment variables when backticks are supplied. + */ + It("should fail substituting values in a volume subpath with backticks [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + + podName := "var-expansion-" + string(uuid.NewUUID()) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: map[string]string{"name": podName}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dapi-container", + Image: busyboxImage, + Env: []v1.EnvVar{ + { + Name: "POD_NAME", + Value: "..", + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "workdir1", + MountPath: "/logscontainer", + SubPath: "$(POD_NAME)", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "workdir1", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } + + // Pod should fail + testPodFailSubpath(f, pod, "SubPath `..`: must not contain '..'") + }) + + /* + Testname: var-expansion-subpath-with-absolute-path + Description: Make sure a container's subpath can not be set using an + expansion of environment variables when absoluete path is supplied. + */ + It("should fail substituting values in a volume subpath with absolute path [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() { + + podName := "var-expansion-" + string(uuid.NewUUID()) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: map[string]string{"name": podName}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dapi-container", + Image: busyboxImage, + Env: []v1.EnvVar{ + { + Name: "POD_NAME", + Value: "/tmp", + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "workdir1", + MountPath: "/logscontainer", + SubPath: "$(POD_NAME)", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: "workdir1", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } + + // Pod should fail + testPodFailSubpath(f, pod, "SubPath `/tmp` must not be an absolute path") + }) }) + +func testPodFailSubpath(f *framework.Framework, pod *v1.Pod, errorText string) { + + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).ToNot(HaveOccurred(), "while creating pod") + + defer func() { + framework.DeletePodWithWait(f, f.ClientSet, pod) + }() + + err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, 30*time.Second) + Expect(err).To(HaveOccurred(), "while waiting for pod to be running") + + selector := fields.Set{ + "involvedObject.kind": "Pod", + "involvedObject.name": pod.Name, + "involvedObject.namespace": f.Namespace.Name, + "reason": "Failed", + }.AsSelector().String() + + options := metav1.ListOptions{FieldSelector: selector} + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) + Expect(err).NotTo(HaveOccurred(), "while getting pod events") + Expect(len(events.Items)).NotTo(Equal(0), "no events found") + Expect(events.Items[0].Message).To(ContainSubstring(errorText), "subpath error not found") +} From 631124cde4989ec7233d9b885a9d140a0ed0bb89 Mon Sep 17 00:00:00 2001 From: Monis Khan Date: Mon, 7 May 2018 14:14:23 -0400 Subject: [PATCH 253/307] Correctly apply request transforms with flattened resource builder This change moves the NewClientWithOptions call into Builder.getClient. Since getClient is the only way for Builder and its visitors to create a RESTClient, we can reasonably guarantee that the request transforms will be honored. Previously, it was possible for a call to NewFlattenListVisitor to return resource Info objects whose Client field did not honor the request transforms. Signed-off-by: Monis Khan --- .../genericclioptions/resource/builder.go | 26 ++++---- .../resource/builder_test.go | 60 ++++++++++++++----- .../genericclioptions/resource/interfaces.go | 3 + 3 files changed, 63 insertions(+), 26 deletions(-) diff --git a/pkg/kubectl/genericclioptions/resource/builder.go b/pkg/kubectl/genericclioptions/resource/builder.go index 9f4e02d02b2..09e21827f7e 100644 --- a/pkg/kubectl/genericclioptions/resource/builder.go +++ b/pkg/kubectl/genericclioptions/resource/builder.go @@ -49,9 +49,7 @@ type Builder struct { categoryExpander restmapper.CategoryExpander // mapper is set explicitly by resource builders - mapper *mapper - internal *mapper - unstructured *mapper + mapper *mapper // clientConfigFn is a function to produce a client, *if* you need one clientConfigFn ClientConfigFunc @@ -829,7 +827,6 @@ func (b *Builder) visitBySelector() *Result { result.err = err return result } - client = NewClientWithOptions(client, b.requestTransforms...) selectorNamespace := b.namespace if mapping.Scope.Name() != meta.RESTScopeNameNamespace { selectorNamespace = "" @@ -846,15 +843,25 @@ func (b *Builder) visitBySelector() *Result { } func (b *Builder) getClient(gv schema.GroupVersion) (RESTClient, error) { - if b.fakeClientFn != nil { - return b.fakeClientFn(gv) + var ( + client RESTClient + err error + ) + + switch { + case b.fakeClientFn != nil: + client, err = b.fakeClientFn(gv) + case b.negotiatedSerializer != nil: + client, err = b.clientConfigFn.clientForGroupVersion(gv, b.negotiatedSerializer) + default: + client, err = b.clientConfigFn.unstructuredClientForGroupVersion(gv) } - if b.negotiatedSerializer != nil { - return b.clientConfigFn.clientForGroupVersion(gv, b.negotiatedSerializer) + if err != nil { + return nil, err } - return b.clientConfigFn.unstructuredClientForGroupVersion(gv) + return NewClientWithOptions(client, b.requestTransforms...), nil } func (b *Builder) visitByResource() *Result { @@ -891,7 +898,6 @@ func (b *Builder) visitByResource() *Result { result.err = err return result } - client = NewClientWithOptions(client, b.requestTransforms...) clients[s] = client } diff --git a/pkg/kubectl/genericclioptions/resource/builder_test.go b/pkg/kubectl/genericclioptions/resource/builder_test.go index e6308a56687..ecf12529a19 100644 --- a/pkg/kubectl/genericclioptions/resource/builder_test.go +++ b/pkg/kubectl/genericclioptions/resource/builder_test.go @@ -655,22 +655,50 @@ func TestMultipleResourceByTheSameName(t *testing.T) { } func TestRequestModifier(t *testing.T) { - var got *rest.Request - b := newDefaultBuilderWith(fakeClientWith("test", t, nil)). - NamespaceParam("foo"). - TransformRequests(func(req *rest.Request) { - got = req - }). - ResourceNames("", "services/baz"). - RequireObject(false) - - i, err := b.Do().Infos() - if err != nil { - t.Fatal(err) - } - req := i[0].Client.Get() - if got != req { - t.Fatalf("request was not received by modifier: %#v", req) + for _, tc := range []struct { + name string + f func(t *testing.T, got **rest.Request) *Builder + }{ + { + name: "simple", + f: func(t *testing.T, got **rest.Request) *Builder { + return newDefaultBuilderWith(fakeClientWith(t.Name(), t, nil)). + NamespaceParam("foo"). + TransformRequests(func(req *rest.Request) { + *got = req + }). + ResourceNames("", "services/baz"). + RequireObject(false) + }, + }, + { + name: "flatten", + f: func(t *testing.T, got **rest.Request) *Builder { + pods, _ := testData() + return newDefaultBuilderWith(fakeClientWith(t.Name(), t, map[string]string{ + "/namespaces/foo/pods": runtime.EncodeOrDie(corev1Codec, pods), + })). + NamespaceParam("foo"). + TransformRequests(func(req *rest.Request) { + *got = req + }). + ResourceTypeOrNameArgs(true, "pods"). + Flatten() + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + var got *rest.Request + b := tc.f(t, &got) + i, err := b.Do().Infos() + if err != nil { + t.Fatal(err) + } + req := i[0].Client.Get() + if got != req { + t.Fatalf("request was not received by modifier: %#v", req) + } + }) } } diff --git a/pkg/kubectl/genericclioptions/resource/interfaces.go b/pkg/kubectl/genericclioptions/resource/interfaces.go index 6179481a5d8..508d4d6b5e4 100644 --- a/pkg/kubectl/genericclioptions/resource/interfaces.go +++ b/pkg/kubectl/genericclioptions/resource/interfaces.go @@ -47,6 +47,9 @@ type RequestTransform func(*rest.Request) // NewClientWithOptions wraps the provided RESTClient and invokes each transform on each // newly created request. func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { + if len(transforms) == 0 { + return c + } return &clientOptions{c: c, transforms: transforms} } From 2d97f8ea3a5e64ca1a1575c5900f567e98c0da59 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Tue, 29 May 2018 09:15:29 -0700 Subject: [PATCH 254/307] node e2e: fix the missing square brackets Also tag the inode eviction tests with [NodeFeature:Eviction]. --- test/e2e_node/eviction_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index dcd29e43982..5131f53e771 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -59,7 +59,7 @@ const ( // InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods. // Node disk pressure is induced by consuming all inodes on the node. -var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("inode-eviction-test") expectedNodeCondition := v1.NodeDiskPressure pressureTimeout := 15 * time.Minute @@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][N // MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods. // Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved. -var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { +var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("memory-allocatable-eviction-test") expectedNodeCondition := v1.NodeMemoryPressure pressureTimeout := 10 * time.Minute @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space. -var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { +var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold. // Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run. -var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { +var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se // PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { +var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") expectedNodeCondition := v1.NodeMemoryPressure pressureTimeout := 10 * time.Minute @@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ // PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { +var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") expectedNodeCondition := v1.NodeDiskPressure pressureTimeout := 10 * time.Minute From 668e127a1e033c01e91924b70051c3c7a59e8859 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Tue, 29 May 2018 09:34:40 -0700 Subject: [PATCH 255/307] fix dynamic kubelet config tests --- test/e2e_node/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 918622ed4a0..a33ae7cc699 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -138,7 +138,7 @@ func isKubeletConfigEnabled(f *framework.Framework) (bool, error) { } v, ok := cfgz.FeatureGates[string(features.DynamicKubeletConfig)] if !ok { - return false, nil + return true, nil } return v, nil } From 207e9d1d90c47b2a40c3cd4d43563060f27f8ff7 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 29 May 2018 09:57:15 -0400 Subject: [PATCH 256/307] cleanup some dead kubectl code and narrow scope of helpers --- hack/.golint_failures | 2 - pkg/kubectl/cmd/apply.go | 10 - pkg/kubectl/cmd/set/BUILD | 7 +- pkg/kubectl/cmd/{util => set}/env/BUILD | 2 +- pkg/kubectl/cmd/{util => set}/env/doc.go | 4 +- .../cmd/{util => set}/env/env_parse.go | 19 -- .../cmd/{util => set}/env/env_parse_test.go | 27 --- .../cmd/{util => set}/env/env_resolve.go | 0 pkg/kubectl/cmd/set/set_env.go | 2 +- pkg/kubectl/cmd/util/BUILD | 7 +- pkg/kubectl/cmd/util/editor/BUILD | 7 +- .../{util => cmd/util/editor}/crlf/BUILD | 2 +- .../{util => cmd/util/editor}/crlf/crlf.go | 2 +- pkg/kubectl/cmd/util/editor/editoptions.go | 2 +- pkg/kubectl/cmd/util/factory.go | 28 --- pkg/kubectl/cmd/util/factory_client_access.go | 19 -- pkg/kubectl/cmd/util/factory_test.go | 79 ------- pkg/kubectl/cmd/util/generator.go | 1 - pkg/kubectl/cmd/util/helpers.go | 19 -- pkg/kubectl/cmd/util/jsonmerge/BUILD | 32 --- pkg/kubectl/cmd/util/jsonmerge/jsonmerge.go | 193 ------------------ pkg/kubectl/util/BUILD | 1 - 22 files changed, 18 insertions(+), 447 deletions(-) rename pkg/kubectl/cmd/{util => set}/env/BUILD (94%) rename pkg/kubectl/cmd/{util => set}/env/doc.go (86%) rename pkg/kubectl/cmd/{util => set}/env/env_parse.go (91%) rename pkg/kubectl/cmd/{util => set}/env/env_parse_test.go (76%) rename pkg/kubectl/cmd/{util => set}/env/env_resolve.go (100%) rename pkg/kubectl/{util => cmd/util/editor}/crlf/BUILD (85%) rename pkg/kubectl/{util => cmd/util/editor}/crlf/crlf.go (98%) delete mode 100644 pkg/kubectl/cmd/util/factory_test.go delete mode 100644 pkg/kubectl/cmd/util/jsonmerge/BUILD delete mode 100644 pkg/kubectl/cmd/util/jsonmerge/jsonmerge.go diff --git a/hack/.golint_failures b/hack/.golint_failures index 2b2798e46a7..e59d33138e6 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -148,7 +148,6 @@ pkg/kubectl/cmd/templates pkg/kubectl/cmd/testing pkg/kubectl/cmd/util pkg/kubectl/cmd/util/editor -pkg/kubectl/cmd/util/jsonmerge pkg/kubectl/cmd/util/sanity pkg/kubectl/cmd/wait pkg/kubectl/genericclioptions @@ -156,7 +155,6 @@ pkg/kubectl/genericclioptions/printers pkg/kubectl/genericclioptions/resource pkg/kubectl/metricsutil pkg/kubectl/util -pkg/kubectl/util/crlf pkg/kubectl/util/slice pkg/kubelet pkg/kubelet/apis diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index 650451120ed..354f9a7b675 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -40,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" - scaleclient "k8s.io/client-go/scale" oapi "k8s.io/kube-openapi/pkg/util/proto" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" @@ -79,7 +78,6 @@ type ApplyOptions struct { Validator validation.Schema Builder *resource.Builder Mapper meta.RESTMapper - Scaler scaleclient.ScalesGetter DynamicClient dynamic.Interface OpenAPISchema openapi.Resources @@ -220,11 +218,6 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } - o.Scaler, err = cmdutil.ScaleClientFn(f) - if err != nil { - return err - } - o.DynamicClient, err = f.DynamicClient() if err != nil { return err @@ -493,7 +486,6 @@ func (o *ApplyOptions) Run() error { cascade: o.DeleteOptions.Cascade, dryRun: o.DryRun, gracePeriod: o.DeleteOptions.GracePeriod, - scaler: o.Scaler, toPrinter: o.ToPrinter, @@ -583,8 +575,6 @@ type pruner struct { dryRun bool gracePeriod int - scaler scaleclient.ScalesGetter - toPrinter func(string) (printers.ResourcePrinter, error) out io.Writer diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index aba25208706..023d91e9910 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -21,9 +21,9 @@ go_library( deps = [ "//pkg/apis/rbac:go_default_library", "//pkg/kubectl:go_default_library", + "//pkg/kubectl/cmd/set/env:go_default_library", "//pkg/kubectl/cmd/templates:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", - "//pkg/kubectl/cmd/util/env:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", @@ -96,7 +96,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/kubectl/cmd/set/env:all-srcs", + ], tags = ["automanaged"], visibility = [ "//build/visible_to:pkg_kubectl_cmd_set_CONSUMERS", diff --git a/pkg/kubectl/cmd/util/env/BUILD b/pkg/kubectl/cmd/set/env/BUILD similarity index 94% rename from pkg/kubectl/cmd/util/env/BUILD rename to pkg/kubectl/cmd/set/env/BUILD index ecdf11acb37..97fb91a2072 100644 --- a/pkg/kubectl/cmd/util/env/BUILD +++ b/pkg/kubectl/cmd/set/env/BUILD @@ -7,7 +7,7 @@ go_library( "env_parse.go", "env_resolve.go", ], - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/env", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/set/env", visibility = ["//visibility:public"], deps = [ "//pkg/api/v1/resource:go_default_library", diff --git a/pkg/kubectl/cmd/util/env/doc.go b/pkg/kubectl/cmd/set/env/doc.go similarity index 86% rename from pkg/kubectl/cmd/util/env/doc.go rename to pkg/kubectl/cmd/set/env/doc.go index 39adb0adf0c..25e4c04a70f 100644 --- a/pkg/kubectl/cmd/util/env/doc.go +++ b/pkg/kubectl/cmd/set/env/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package env provides functions to incorporate environment variables into kubectl commands. -package env // import "k8s.io/kubernetes/pkg/kubectl/cmd/util/env" +// Package env provides functions to incorporate environment variables into set env. +package env diff --git a/pkg/kubectl/cmd/util/env/env_parse.go b/pkg/kubectl/cmd/set/env/env_parse.go similarity index 91% rename from pkg/kubectl/cmd/util/env/env_parse.go rename to pkg/kubectl/cmd/set/env/env_parse.go index 1d1686af647..5e2e529088d 100644 --- a/pkg/kubectl/cmd/util/env/env_parse.go +++ b/pkg/kubectl/cmd/set/env/env_parse.go @@ -20,7 +20,6 @@ import ( "bufio" "fmt" "io" - "os" "regexp" "strings" @@ -28,24 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// Env returns an environment variable if not nil, or a default value. -func Env(key string, defaultValue string) string { - val := os.Getenv(key) - if len(val) == 0 { - return defaultValue - } - return val -} - -// GetEnv returns an environment value if not nil, and an ok boolean. -func GetEnv(key string) (string, bool) { - val := os.Getenv(key) - if len(val) == 0 { - return "", false - } - return val, true -} - var argumentEnvironment = regexp.MustCompile("(?ms)^(.+)\\=(.*)$") var validArgumentEnvironment = regexp.MustCompile("(?ms)^(\\w+)\\=(.*)$") diff --git a/pkg/kubectl/cmd/util/env/env_parse_test.go b/pkg/kubectl/cmd/set/env/env_parse_test.go similarity index 76% rename from pkg/kubectl/cmd/util/env/env_parse_test.go rename to pkg/kubectl/cmd/set/env/env_parse_test.go index 32be9833c76..5cff84a1849 100644 --- a/pkg/kubectl/cmd/util/env/env_parse_test.go +++ b/pkg/kubectl/cmd/set/env/env_parse_test.go @@ -19,36 +19,9 @@ package env import ( "fmt" "io" - "os" "strings" ) -func ExampleEnv_defaultValue() { - fmt.Println(Env("TESTENVVAR", "default")) - // Output: default -} - -func ExampleEnv_variableExists() { - os.Setenv("TESTENVVAR", "test value") - defer os.Unsetenv("TESTENVVAR") - fmt.Println(Env("TESTENVVAR", "default")) - // Output: test value -} - -func ExampleGetEnv_variableExists() { - os.Setenv("THISVAREXISTS", "value") - defer os.Unsetenv("THISVAREXISTS") - fmt.Println(GetEnv("THISVAREXISTS")) - // Output: - // value true -} - -func ExampleGetEnv_variableDoesNotExist() { - fmt.Println(GetEnv("THISVARDOESNOTEXIST")) - // Output: - // false -} - func ExampleIsEnvironmentArgument_true() { test := "returns=true" fmt.Println(IsEnvironmentArgument(test)) diff --git a/pkg/kubectl/cmd/util/env/env_resolve.go b/pkg/kubectl/cmd/set/env/env_resolve.go similarity index 100% rename from pkg/kubectl/cmd/util/env/env_resolve.go rename to pkg/kubectl/cmd/set/env/env_resolve.go diff --git a/pkg/kubectl/cmd/set/set_env.go b/pkg/kubectl/cmd/set/set_env.go index 10800c5b716..f1e6c0cc65d 100644 --- a/pkg/kubectl/cmd/set/set_env.go +++ b/pkg/kubectl/cmd/set/set_env.go @@ -31,9 +31,9 @@ import ( "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/kubernetes" + envutil "k8s.io/kubernetes/pkg/kubectl/cmd/set/env" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - envutil "k8s.io/kubernetes/pkg/kubectl/cmd/util/env" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 026f03db50c..7ed95e5050b 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -58,10 +58,7 @@ go_library( go_test( name = "go_default_test", - srcs = [ - "factory_test.go", - "helpers_test.go", - ], + srcs = ["helpers_test.go"], embed = [":go_default_library"], deps = [ "//pkg/api/testapi:go_default_library", @@ -90,8 +87,6 @@ filegroup( srcs = [ ":package-srcs", "//pkg/kubectl/cmd/util/editor:all-srcs", - "//pkg/kubectl/cmd/util/env:all-srcs", - "//pkg/kubectl/cmd/util/jsonmerge:all-srcs", "//pkg/kubectl/cmd/util/openapi:all-srcs", "//pkg/kubectl/cmd/util/sanity:all-srcs", ], diff --git a/pkg/kubectl/cmd/util/editor/BUILD b/pkg/kubectl/cmd/util/editor/BUILD index 06cf3472f53..379bdcc5c95 100644 --- a/pkg/kubectl/cmd/util/editor/BUILD +++ b/pkg/kubectl/cmd/util/editor/BUILD @@ -18,11 +18,11 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", + "//pkg/kubectl/cmd/util/editor/crlf:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", - "//pkg/kubectl/util/crlf:go_default_library", "//pkg/kubectl/util/term:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -57,7 +57,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/kubectl/cmd/util/editor/crlf:all-srcs", + ], tags = ["automanaged"], visibility = [ "//build/visible_to:pkg_kubectl_cmd_util_editor_CONSUMERS", diff --git a/pkg/kubectl/util/crlf/BUILD b/pkg/kubectl/cmd/util/editor/crlf/BUILD similarity index 85% rename from pkg/kubectl/util/crlf/BUILD rename to pkg/kubectl/cmd/util/editor/crlf/BUILD index fad33187461..ebf46c7e204 100644 --- a/pkg/kubectl/util/crlf/BUILD +++ b/pkg/kubectl/cmd/util/editor/crlf/BUILD @@ -8,7 +8,7 @@ load( go_library( name = "go_default_library", srcs = ["crlf.go"], - importpath = "k8s.io/kubernetes/pkg/kubectl/util/crlf", + importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf", ) filegroup( diff --git a/pkg/kubectl/util/crlf/crlf.go b/pkg/kubectl/cmd/util/editor/crlf/crlf.go similarity index 98% rename from pkg/kubectl/util/crlf/crlf.go rename to pkg/kubectl/cmd/util/editor/crlf/crlf.go index c08fe6f3108..524a81f3e73 100644 --- a/pkg/kubectl/util/crlf/crlf.go +++ b/pkg/kubectl/cmd/util/editor/crlf/crlf.go @@ -51,7 +51,7 @@ func (w crlfWriter) Write(b []byte) (n int, err error) { } return written + n, err } - written += 1 + written++ i = next + 1 } } diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index 49417e54ad2..f2d2920b98a 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -45,11 +45,11 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" "k8s.io/kubernetes/pkg/kubectl/scheme" - "k8s.io/kubernetes/pkg/kubectl/util/crlf" ) // EditOptions contains all the options for running edit cli command. diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 8dc6df3fa56..3a0fc5d97cf 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -17,15 +17,10 @@ limitations under the License. package util import ( - "fmt" - "strconv" - "strings" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" @@ -73,26 +68,3 @@ type Factory interface { // OpenAPISchema returns the schema openapi schema definition OpenAPISchema() (openapi.Resources, error) } - -func makePortsString(ports []api.ServicePort, useNodePort bool) string { - pieces := make([]string, len(ports)) - for ix := range ports { - var port int32 - if useNodePort { - port = ports[ix].NodePort - } else { - port = ports[ix].Port - } - pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port) - } - return strings.Join(pieces, ",") -} - -// Extracts the protocols exposed by a service from the given service spec. -func getServiceProtocols(spec api.ServiceSpec) map[string]string { - result := make(map[string]string) - for _, servicePort := range spec.Ports { - result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) - } - return result -} diff --git a/pkg/kubectl/cmd/util/factory_client_access.go b/pkg/kubectl/cmd/util/factory_client_access.go index e01e649f4f8..43b4a1a9b08 100644 --- a/pkg/kubectl/cmd/util/factory_client_access.go +++ b/pkg/kubectl/cmd/util/factory_client_access.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - scaleclient "k8s.io/client-go/scale" "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" @@ -189,24 +188,6 @@ func (f *factoryImpl) OpenAPISchema() (openapi.Resources, error) { return f.openAPIGetter.getter.Get() } -func (f *factoryImpl) ScaleClient() (scaleclient.ScalesGetter, error) { - discoClient, err := f.clientGetter.ToDiscoveryClient() - if err != nil { - return nil, err - } - restClient, err := f.RESTClient() - if err != nil { - return nil, err - } - resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient) - mapper, err := f.clientGetter.ToRESTMapper() - if err != nil { - return nil, err - } - - return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil -} - // this method exists to help us find the points still relying on internal types. func InternalVersionDecoder() runtime.Decoder { return legacyscheme.Codecs.UniversalDecoder() diff --git a/pkg/kubectl/cmd/util/factory_test.go b/pkg/kubectl/cmd/util/factory_test.go deleted file mode 100644 index 06d9ac87062..00000000000 --- a/pkg/kubectl/cmd/util/factory_test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "testing" - - api "k8s.io/kubernetes/pkg/apis/core" -) - -func TestMakePortsString(t *testing.T) { - tests := []struct { - ports []api.ServicePort - useNodePort bool - expectedOutput string - }{ - {ports: nil, expectedOutput: ""}, - {ports: []api.ServicePort{}, expectedOutput: ""}, - {ports: []api.ServicePort{ - { - Port: 80, - Protocol: "TCP", - }, - }, - expectedOutput: "tcp:80", - }, - {ports: []api.ServicePort{ - { - Port: 80, - Protocol: "TCP", - }, - { - Port: 8080, - Protocol: "UDP", - }, - { - Port: 9000, - Protocol: "TCP", - }, - }, - expectedOutput: "tcp:80,udp:8080,tcp:9000", - }, - {ports: []api.ServicePort{ - { - Port: 80, - NodePort: 9090, - Protocol: "TCP", - }, - { - Port: 8080, - NodePort: 80, - Protocol: "UDP", - }, - }, - useNodePort: true, - expectedOutput: "tcp:9090,udp:80", - }, - } - for _, test := range tests { - output := makePortsString(test.ports, test.useNodePort) - if output != test.expectedOutput { - t.Errorf("expected: %s, saw: %s.", test.expectedOutput, output) - } - } -} diff --git a/pkg/kubectl/cmd/util/generator.go b/pkg/kubectl/cmd/util/generator.go index 551f9e94c10..7d02669bee8 100644 --- a/pkg/kubectl/cmd/util/generator.go +++ b/pkg/kubectl/cmd/util/generator.go @@ -62,7 +62,6 @@ const ( ConfigMapV1GeneratorName = "configmap/v1" ClusterRoleBindingV1GeneratorName = "clusterrolebinding.rbac.authorization.k8s.io/v1alpha1" RoleBindingV1GeneratorName = "rolebinding.rbac.authorization.k8s.io/v1alpha1" - ClusterV1Beta1GeneratorName = "cluster/v1beta1" PodDisruptionBudgetV1GeneratorName = "poddisruptionbudget/v1beta1" PodDisruptionBudgetV2GeneratorName = "poddisruptionbudget/v1beta1/v2" PriorityClassV1Alpha1GeneratorName = "priorityclass/v1alpha1" diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index d9b16d19152..aa2224652b8 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -300,16 +300,6 @@ func IsFilenameSliceEmpty(filenames []string) bool { return len(filenames) == 0 } -// Whether this cmd need watching objects. -func isWatch(cmd *cobra.Command) bool { - if w, err := cmd.Flags().GetBool("watch"); err == nil && w { - return true - } - - wo, err := cmd.Flags().GetBool("watch-only") - return err == nil && wo -} - func GetFlagString(cmd *cobra.Command, flag string) string { s, err := cmd.Flags().GetString(flag) if err != nil { @@ -336,15 +326,6 @@ func GetFlagStringArray(cmd *cobra.Command, flag string) []string { return s } -// GetWideFlag is used to determine if "-o wide" is used -func GetWideFlag(cmd *cobra.Command) bool { - f := cmd.Flags().Lookup("output") - if f != nil && f.Value != nil && f.Value.String() == "wide" { - return true - } - return false -} - func GetFlagBool(cmd *cobra.Command, flag string) bool { b, err := cmd.Flags().GetBool(flag) if err != nil { diff --git a/pkg/kubectl/cmd/util/jsonmerge/BUILD b/pkg/kubectl/cmd/util/jsonmerge/BUILD deleted file mode 100644 index 08551294bbd..00000000000 --- a/pkg/kubectl/cmd/util/jsonmerge/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["jsonmerge.go"], - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/jsonmerge", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = [ - "//build/visible_to:pkg_kubectl_cmd_util_jsonmerge_CONSUMERS", - ], -) diff --git a/pkg/kubectl/cmd/util/jsonmerge/jsonmerge.go b/pkg/kubectl/cmd/util/jsonmerge/jsonmerge.go deleted file mode 100644 index beebc7f052a..00000000000 --- a/pkg/kubectl/cmd/util/jsonmerge/jsonmerge.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonmerge - -import ( - "encoding/json" - "fmt" - - "github.com/evanphx/json-patch" - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/util/mergepatch" - "k8s.io/apimachinery/pkg/util/yaml" -) - -// Delta represents a change between two JSON documents. -type Delta struct { - original []byte - edit []byte - - preconditions []PreconditionFunc -} - -// PreconditionFunc is a test to verify that an incompatible change -// has occurred before an Apply can be successful. -type PreconditionFunc func(interface{}) (hold bool, message string) - -// AddPreconditions adds precondition checks to a change which must -// be satisfied before an Apply is considered successful. If a -// precondition returns false, the Apply is failed with -// ErrPreconditionFailed. -func (d *Delta) AddPreconditions(fns ...PreconditionFunc) { - d.preconditions = append(d.preconditions, fns...) -} - -// RequireKeyUnchanged creates a precondition function that fails -// if the provided key is present in the diff (indicating its value -// has changed). -func RequireKeyUnchanged(key string) PreconditionFunc { - return func(diff interface{}) (bool, string) { - m, ok := diff.(map[string]interface{}) - if !ok { - return true, "" - } - // the presence of key in a diff means that its value has been changed, therefore - // we should fail the precondition. - _, ok = m[key] - if ok { - return false, key + " should not be changed\n" - } else { - return true, "" - } - } -} - -// RequireMetadataKeyUnchanged creates a precondition function that fails -// if the metadata.key is present in the diff (indicating its value -// has changed). -func RequireMetadataKeyUnchanged(key string) PreconditionFunc { - return func(diff interface{}) (bool, string) { - m, ok := diff.(map[string]interface{}) - if !ok { - return true, "" - } - m1, ok := m["metadata"] - if !ok { - return true, "" - } - m2, ok := m1.(map[string]interface{}) - if !ok { - return true, "" - } - _, ok = m2[key] - if ok { - return false, "metadata." + key + " should not be changed\n" - } else { - return true, "" - } - } -} - -// TestPreconditions test if preconditions hold given the edit -func TestPreconditionsHold(edit []byte, preconditions []PreconditionFunc) (bool, string) { - diff := make(map[string]interface{}) - if err := json.Unmarshal(edit, &diff); err != nil { - return false, err.Error() - } - for _, fn := range preconditions { - if hold, msg := fn(diff); !hold { - return false, msg - } - } - return true, "" -} - -// NewDelta accepts two JSON or YAML documents and calculates the difference -// between them. It returns a Delta object which can be used to resolve -// conflicts against a third version with a common parent, or an error -// if either document is in error. -func NewDelta(from, to []byte) (*Delta, error) { - d := &Delta{} - before, err := yaml.ToJSON(from) - if err != nil { - return nil, err - } - after, err := yaml.ToJSON(to) - if err != nil { - return nil, err - } - diff, err := jsonpatch.CreateMergePatch(before, after) - if err != nil { - return nil, err - } - glog.V(6).Infof("Patch created from:\n%s\n%s\n%s", string(before), string(after), string(diff)) - d.original = before - d.edit = diff - return d, nil -} - -// Apply attempts to apply the changes described by Delta onto latest, -// returning an error if the changes cannot be applied cleanly. -// IsConflicting will be true if the changes overlap, otherwise a -// generic error will be returned. -func (d *Delta) Apply(latest []byte) ([]byte, error) { - base, err := yaml.ToJSON(latest) - if err != nil { - return nil, err - } - changes, err := jsonpatch.CreateMergePatch(d.original, base) - if err != nil { - return nil, err - } - diff1 := make(map[string]interface{}) - if err := json.Unmarshal(d.edit, &diff1); err != nil { - return nil, err - } - diff2 := make(map[string]interface{}) - if err := json.Unmarshal(changes, &diff2); err != nil { - return nil, err - } - for _, fn := range d.preconditions { - hold1, _ := fn(diff1) - hold2, _ := fn(diff2) - if !hold1 || !hold2 { - return nil, ErrPreconditionFailed - } - } - - glog.V(6).Infof("Testing for conflict between:\n%s\n%s", string(d.edit), string(changes)) - hasConflicts, err := mergepatch.HasConflicts(diff1, diff2) - if err != nil { - return nil, err - } - if hasConflicts { - return nil, ErrConflict - } - - return jsonpatch.MergePatch(base, d.edit) -} - -// IsConflicting returns true if the provided error indicates a -// conflict exists between the original changes and the applied -// changes. -func IsConflicting(err error) bool { - return err == ErrConflict -} - -// IsPreconditionFailed returns true if the provided error indicates -// a Delta precondition did not succeed. -func IsPreconditionFailed(err error) bool { - return err == ErrPreconditionFailed -} - -var ErrPreconditionFailed = fmt.Errorf("a precondition failed") -var ErrConflict = fmt.Errorf("changes are in conflict") - -func (d *Delta) Edit() []byte { - return d.edit -} diff --git a/pkg/kubectl/util/BUILD b/pkg/kubectl/util/BUILD index ad3c1b73130..73dfd3a1932 100644 --- a/pkg/kubectl/util/BUILD +++ b/pkg/kubectl/util/BUILD @@ -98,7 +98,6 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/kubectl/util/crlf:all-srcs", "//pkg/kubectl/util/hash:all-srcs", "//pkg/kubectl/util/i18n:all-srcs", "//pkg/kubectl/util/logs:all-srcs", From 4b836d77d5f046c6b9e124975b79af6badfa51f4 Mon Sep 17 00:00:00 2001 From: David Eads Date: Tue, 29 May 2018 11:18:50 -0400 Subject: [PATCH 257/307] update set selector to use resource builder flags --- pkg/kubectl/cmd/set/BUILD | 1 + pkg/kubectl/cmd/set/set_selector.go | 84 +++++-------------- pkg/kubectl/cmd/set/set_selector_test.go | 46 +++++----- .../genericclioptions/builder_flags.go | 43 ++++++++-- 4 files changed, 82 insertions(+), 92 deletions(-) diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index aba25208706..51dd84ba7a9 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -68,6 +68,7 @@ go_test( "//pkg/kubectl/cmd/testing:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/genericclioptions:go_default_library", + "//pkg/kubectl/genericclioptions/printers:go_default_library", "//pkg/kubectl/genericclioptions/resource:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 3f29dd07c05..70199c37679 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -23,7 +23,6 @@ import ( "github.com/spf13/cobra" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -40,26 +39,23 @@ import ( // SelectorOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() type SetSelectorOptions struct { - fileOptions resource.FilenameOptions - - PrintFlags *genericclioptions.PrintFlags - RecordFlags *genericclioptions.RecordFlags - - local bool - dryrun bool - all bool - output string + // Bound + ResourceBuilderFlags *genericclioptions.ResourceBuilderFlags + PrintFlags *genericclioptions.PrintFlags + RecordFlags *genericclioptions.RecordFlags + dryrun bool + // set by args resources []string selector *metav1.LabelSelector - ClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) - - PrintObj printers.ResourcePrinterFunc - Recorder genericclioptions.Recorder - - builder *resource.Builder + // computed + WriteToServer bool + PrintObj printers.ResourcePrinterFunc + Recorder genericclioptions.Recorder + ResourceFinder genericclioptions.ResourceFinder + // set at initialization genericclioptions.IOStreams } @@ -79,6 +75,12 @@ var ( func NewSelectorOptions(streams genericclioptions.IOStreams) *SetSelectorOptions { return &SetSelectorOptions{ + ResourceBuilderFlags: genericclioptions.NewResourceBuilderFlags(). + WithScheme(scheme.Scheme). + WithAll(false). + WithLocal(false). + WithUninitialized(false). + WithLatest(), PrintFlags: genericclioptions.NewPrintFlags("selector updated").WithTypeSetter(scheme.Scheme), RecordFlags: genericclioptions.NewRecordFlags(), @@ -105,16 +107,12 @@ func NewCmdSelector(f cmdutil.Factory, streams genericclioptions.IOStreams) *cob }, } + o.ResourceBuilderFlags.AddFlags(cmd.Flags()) o.PrintFlags.AddFlags(cmd) o.RecordFlags.AddFlags(cmd) - cmd.Flags().BoolVar(&o.all, "all", o.all, "Select all resources, including uninitialized ones, in the namespace of the specified resource types") - cmd.Flags().BoolVar(&o.local, "local", o.local, "If true, set selector will NOT contact api-server but run locally.") cmd.Flags().String("resource-version", "", "If non-empty, the selectors update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.") - usage := "the resource to update the selectors" - cmdutil.AddFilenameOptionFlags(cmd, &o.fileOptions, usage) cmdutil.AddDryRunFlag(cmd) - cmdutil.AddIncludeUninitializedFlag(cmd) return cmd } @@ -130,40 +128,14 @@ func (o *SetSelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg } o.dryrun = cmdutil.GetDryRunFlag(cmd) - o.output = cmdutil.GetFlagString(cmd, "output") - - cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() - if err != nil { - return err - } o.resources, o.selector, err = getResourcesAndSelector(args) if err != nil { return err } - includeUninitialized := cmdutil.ShouldIncludeUninitialized(cmd, false) - o.builder = f.NewBuilder(). - WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). - LocalParam(o.local). - ContinueOnError(). - NamespaceParam(cmdNamespace).DefaultNamespace(). - FilenameParam(enforceNamespace, &o.fileOptions). - IncludeUninitialized(includeUninitialized). - Flatten() - - if !o.local { - o.builder. - ResourceTypeOrNameArgs(o.all, o.resources...). - Latest() - } else { - // if a --local flag was provided, and a resource was specified in the form - // /, fail immediately as --local cannot query the api server - // for the specified resource. - if len(o.resources) > 0 { - return resource.LocalResourceError - } - } + o.ResourceFinder = o.ResourceBuilderFlags.ToBuilder(f, o.resources) + o.WriteToServer = !(*o.ResourceBuilderFlags.Local || o.dryrun) if o.dryrun { o.PrintFlags.Complete("%s (dry run)") @@ -174,17 +146,11 @@ func (o *SetSelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg } o.PrintObj = printer.PrintObj - o.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { - return f.ClientForMapping(mapping) - } return err } // Validate basic inputs func (o *SetSelectorOptions) Validate() error { - if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.fileOptions.Filenames) { - return fmt.Errorf("one or more resources must be specified as or /") - } if o.selector == nil { return fmt.Errorf("one selector is required") } @@ -193,11 +159,7 @@ func (o *SetSelectorOptions) Validate() error { // RunSelector executes the command. func (o *SetSelectorOptions) RunSelector() error { - r := o.builder.Do() - err := r.Err() - if err != nil { - return err - } + r := o.ResourceFinder.Do() return r.Visit(func(info *resource.Info, err error) error { patch := &Patch{Info: info} @@ -218,7 +180,7 @@ func (o *SetSelectorOptions) RunSelector() error { if patch.Err != nil { return patch.Err } - if o.local || o.dryrun { + if !o.WriteToServer { return o.PrintObj(info.Object, o.Out) } diff --git a/pkg/kubectl/cmd/set/set_selector_test.go b/pkg/kubectl/cmd/set/set_selector_test.go index 351e6d98d35..e1924f32c56 100644 --- a/pkg/kubectl/cmd/set/set_selector_test.go +++ b/pkg/kubectl/cmd/set/set_selector_test.go @@ -17,7 +17,6 @@ limitations under the License. package set import ( - "net/http" "reflect" "strings" "testing" @@ -28,13 +27,9 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/rest/fake" - cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" "k8s.io/kubernetes/pkg/kubectl/genericclioptions" - "k8s.io/kubernetes/pkg/kubectl/scheme" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/printers" + "k8s.io/kubernetes/pkg/kubectl/genericclioptions/resource" ) func TestUpdateSelectorForObjectTypes(t *testing.T) { @@ -317,27 +312,30 @@ func TestGetResourcesAndSelector(t *testing.T) { } func TestSelectorTest(t *testing.T) { - tf := cmdtesting.NewTestFactory().WithNamespace("test") - defer tf.Cleanup() - - tf.Client = &fake.RESTClient{ - GroupVersion: schema.GroupVersion{Version: ""}, - NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) - return nil, nil - }), + info := &resource.Info{ + Object: &v1.Service{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Service"}, + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "cassandra"}, + }, } - tf.ClientConfigVal = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: ""}}} - streams, _, buf, _ := genericclioptions.NewTestIOStreams() - cmd := NewCmdSelector(tf, streams) - cmd.Flags().Set("output", "name") - cmd.Flags().Set("local", "true") - cmd.Flags().Set("filename", "../../../../test/e2e/testing-manifests/statefulset/cassandra/service.yaml") + labelToSet, err := metav1.ParseToLabelSelector("environment=qa") + if err != nil { + t.Fatal(err) + } - cmd.Run(cmd, []string{"environment=qa"}) + iostreams, _, buf, _ := genericclioptions.NewTestIOStreams() + o := &SetSelectorOptions{ + selector: labelToSet, + ResourceFinder: genericclioptions.NewSimpleFakeResourceFinder(info), + Recorder: genericclioptions.NoopRecorder{}, + PrintObj: (&printers.NamePrinter{}).PrintObj, + IOStreams: iostreams, + } + if err := o.RunSelector(); err != nil { + t.Fatal(err) + } if !strings.Contains(buf.String(), "service/cassandra") { t.Errorf("did not set selector: %s", buf.String()) } diff --git a/pkg/kubectl/genericclioptions/builder_flags.go b/pkg/kubectl/genericclioptions/builder_flags.go index ad0ec7a6376..43ca43b3b6e 100644 --- a/pkg/kubectl/genericclioptions/builder_flags.go +++ b/pkg/kubectl/genericclioptions/builder_flags.go @@ -27,14 +27,16 @@ import ( type ResourceBuilderFlags struct { FileNameFlags *FileNameFlags - LabelSelector *string - FieldSelector *string - AllNamespaces *bool - All *bool - Local *bool + LabelSelector *string + FieldSelector *string + AllNamespaces *bool + All *bool + Local *bool + IncludeUninitialized *bool - Scheme *runtime.Scheme - Latest bool + Scheme *runtime.Scheme + Latest bool + StopOnFirstError bool } // NewResourceBuilderFlags returns a default ResourceBuilderFlags @@ -85,6 +87,12 @@ func (o *ResourceBuilderFlags) WithLocal(defaultVal bool) *ResourceBuilderFlags return o } +// WithUninitialized is using an alpha feature and may be dropped +func (o *ResourceBuilderFlags) WithUninitialized(defaultVal bool) *ResourceBuilderFlags { + o.IncludeUninitialized = &defaultVal + return o +} + func (o *ResourceBuilderFlags) WithScheme(scheme *runtime.Scheme) *ResourceBuilderFlags { o.Scheme = scheme return o @@ -95,6 +103,11 @@ func (o *ResourceBuilderFlags) WithLatest() *ResourceBuilderFlags { return o } +func (o *ResourceBuilderFlags) StopOnError() *ResourceBuilderFlags { + o.StopOnFirstError = true + return o +} + // AddFlags registers flags for finding resources func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { o.FileNameFlags.AddFlags(flagset) @@ -114,6 +127,9 @@ func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { if o.Local != nil { flagset.BoolVar(o.Local, "local", *o.Local, "If true, annotation will NOT contact api-server but run locally.") } + if o.IncludeUninitialized != nil { + flagset.BoolVar(o.IncludeUninitialized, "include-uninitialized", *o.IncludeUninitialized, `If true, the kubectl command applies to uninitialized objects. If explicitly set to false, this flag overrides other flags that make the kubectl commands apply to uninitialized objects, e.g., "--all". Objects with empty metadata.initializers are regarded as initialized.`) + } } // ToBuilder gives you back a resource finder to visit resources that are located @@ -153,8 +169,21 @@ func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, reso if o.Latest { builder.Latest() } + } else { builder.Local() + + if len(resources) > 0 { + builder.AddError(resource.LocalResourceError) + } + } + + if o.IncludeUninitialized != nil { + builder.IncludeUninitialized(*o.IncludeUninitialized) + } + + if !o.StopOnFirstError { + builder.ContinueOnError() } return &ResourceFindBuilderWrapper{ From a48008f5ad7906c3260c1562c3b01f13d1ebd69e Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Tue, 29 May 2018 12:56:37 -0700 Subject: [PATCH 258/307] e2e node: mark pod cgroup test as [NodeConformance] --- test/e2e_node/pods_container_manager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index 1768e7d9fa7..128860b17c0 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { f := framework.NewDefaultFramework("kubelet-cgroup-manager") Describe("QOS containers", func() { Context("On enabling QOS cgroup hierarchy", func() { - It("Top level QoS containers should have been created", func() { + It("Top level QoS containers should have been created [NodeConformance]", func() { if !framework.TestContext.KubeletConfig.CgroupsPerQOS { return } From 20cd94de176842cbe97970d4db22bbdec103176e Mon Sep 17 00:00:00 2001 From: Matt Rogers Date: Tue, 29 May 2018 12:50:14 -0400 Subject: [PATCH 259/307] Add dry-run to auth reconcile Signed-off-by: Matt Rogers --- pkg/kubectl/cmd/auth/reconcile.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index 555244ca61f..b6f1f6ac3eb 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -39,6 +39,7 @@ import ( type ReconcileOptions struct { PrintFlags *genericclioptions.PrintFlags FilenameOptions *resource.FilenameOptions + DryRun bool Visitor resource.Visitor RBACClient rbacv1client.RbacV1Interface @@ -87,6 +88,7 @@ func NewCmdReconcile(f cmdutil.Factory, streams genericclioptions.IOStreams) *co o.PrintFlags.AddFlags(cmd) cmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, "identifying the resource to reconcile.") + cmd.Flags().BoolVar(&o.DryRun, "dry-run", o.DryRun, "If true, display results but do not submit changes") cmd.MarkFlagRequired("filename") return cmd @@ -128,6 +130,9 @@ func (o *ReconcileOptions) Complete(cmd *cobra.Command, f cmdutil.Factory, args return err } + if o.DryRun { + o.PrintFlags.Complete("%s (dry run)") + } printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -168,7 +173,7 @@ func (o *ReconcileOptions) RunReconcile() error { switch t := info.Object.(type) { case *rbacv1.Role: reconcileOptions := reconciliation.ReconcileRoleOptions{ - Confirm: true, + Confirm: !o.DryRun, RemoveExtraPermissions: false, Role: reconciliation.RoleRuleOwner{Role: t}, Client: reconciliation.RoleModifier{ @@ -184,7 +189,7 @@ func (o *ReconcileOptions) RunReconcile() error { case *rbacv1.ClusterRole: reconcileOptions := reconciliation.ReconcileRoleOptions{ - Confirm: true, + Confirm: !o.DryRun, RemoveExtraPermissions: false, Role: reconciliation.ClusterRoleRuleOwner{ClusterRole: t}, Client: reconciliation.ClusterRoleModifier{ @@ -199,7 +204,7 @@ func (o *ReconcileOptions) RunReconcile() error { case *rbacv1.RoleBinding: reconcileOptions := reconciliation.ReconcileRoleBindingOptions{ - Confirm: true, + Confirm: !o.DryRun, RemoveExtraSubjects: false, RoleBinding: reconciliation.RoleBindingAdapter{RoleBinding: t}, Client: reconciliation.RoleBindingClientAdapter{ @@ -215,7 +220,7 @@ func (o *ReconcileOptions) RunReconcile() error { case *rbacv1.ClusterRoleBinding: reconcileOptions := reconciliation.ReconcileRoleBindingOptions{ - Confirm: true, + Confirm: !o.DryRun, RemoveExtraSubjects: false, RoleBinding: reconciliation.ClusterRoleBindingAdapter{ClusterRoleBinding: t}, Client: reconciliation.ClusterRoleBindingClientAdapter{ From aeb6cacf01409361091750e6910500afc340544e Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Wed, 16 May 2018 01:14:06 -0700 Subject: [PATCH 260/307] Remove direct and indirect streaming runtime interface. --- pkg/kubelet/container/helpers.go | 19 --- pkg/kubelet/container/runtime.go | 16 +- pkg/kubelet/container/testing/fake_runtime.go | 73 +-------- pkg/kubelet/kubelet.go | 4 + pkg/kubelet/kubelet_pods.go | 155 ++++++------------ pkg/kubelet/kubelet_pods_test.go | 101 +++--------- .../kuberuntime/kuberuntime_manager.go | 2 +- 7 files changed, 79 insertions(+), 291 deletions(-) diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 180a3e6df2d..abe80c545e5 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -17,11 +17,9 @@ limitations under the License. package container import ( - "bytes" "fmt" "hash/fnv" "strings" - "time" "github.com/golang/glog" @@ -32,7 +30,6 @@ import ( "k8s.io/client-go/tools/record" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/util/format" - "k8s.io/kubernetes/pkg/kubelet/util/ioutils" hashutil "k8s.io/kubernetes/pkg/util/hash" "k8s.io/kubernetes/third_party/forked/golang/expansion" ) @@ -253,22 +250,6 @@ func FormatPod(pod *Pod) string { return fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.ID) } -type containerCommandRunnerWrapper struct { - DirectStreamingRuntime -} - -var _ ContainerCommandRunner = &containerCommandRunnerWrapper{} - -func (r *containerCommandRunnerWrapper) RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { - var buffer bytes.Buffer - output := ioutils.WriteCloserWrapper(&buffer) - err := r.ExecInContainer(id, cmd, nil, output, output, false, nil, timeout) - // Even if err is non-nil, there still may be output (e.g. the exec wrote to stdout or stderr but - // the command returned a nonzero exit code). Therefore, always return the output along with the - // error. - return buffer.Bytes(), err -} - // GetContainerSpec gets the container spec by containerName. func GetContainerSpec(pod *v1.Pod, containerName string) *v1.Container { for i, c := range pod.Spec.Containers { diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 29852d435ed..70b72024c9c 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -124,22 +124,10 @@ type Runtime interface { UpdatePodCIDR(podCIDR string) error } -// DirectStreamingRuntime is the interface implemented by runtimes for which the streaming calls -// (exec/attach/port-forward) should be served directly by the Kubelet. -type DirectStreamingRuntime interface { - // Runs the command in the container of the specified pod. Attaches - // the processes stdin, stdout, and stderr. Optionally uses a tty. - ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error - // Forward the specified port from the specified pod to the stream. - PortForward(pod *Pod, port int32, stream io.ReadWriteCloser) error - // ContainerAttach encapsulates the attaching to containers for testability - ContainerAttacher -} - -// IndirectStreamingRuntime is the interface implemented by runtimes that handle the serving of the +// StreamingRuntime is the interface implemented by runtimes that handle the serving of the // streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to // the runtime server. -type IndirectStreamingRuntime interface { +type StreamingRuntime interface { GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index 3019d30094e..707ee1ac456 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -26,7 +26,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/util/flowcontrol" . "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/volume" @@ -59,34 +58,13 @@ type FakeRuntime struct { StatusErr error } -type FakeDirectStreamingRuntime struct { - *FakeRuntime - - // Arguments to streaming method calls. - Args struct { - // Attach / Exec args - ContainerID ContainerID - Cmd []string - Stdin io.Reader - Stdout io.WriteCloser - Stderr io.WriteCloser - TTY bool - // Port-forward args - Pod *Pod - Port int32 - Stream io.ReadWriteCloser - } -} - -var _ DirectStreamingRuntime = &FakeDirectStreamingRuntime{} - const FakeHost = "localhost:12345" -type FakeIndirectStreamingRuntime struct { +type FakeStreamingRuntime struct { *FakeRuntime } -var _ IndirectStreamingRuntime = &FakeIndirectStreamingRuntime{} +var _ StreamingRuntime = &FakeStreamingRuntime{} // FakeRuntime should implement Runtime. var _ Runtime = &FakeRuntime{} @@ -311,35 +289,6 @@ func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*PodS return &status, f.Err } -func (f *FakeDirectStreamingRuntime) ExecInContainer(containerID ContainerID, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - f.Lock() - defer f.Unlock() - - f.CalledFunctions = append(f.CalledFunctions, "ExecInContainer") - f.Args.ContainerID = containerID - f.Args.Cmd = cmd - f.Args.Stdin = stdin - f.Args.Stdout = stdout - f.Args.Stderr = stderr - f.Args.TTY = tty - - return f.Err -} - -func (f *FakeDirectStreamingRuntime) AttachContainer(containerID ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { - f.Lock() - defer f.Unlock() - - f.CalledFunctions = append(f.CalledFunctions, "AttachContainer") - f.Args.ContainerID = containerID - f.Args.Stdin = stdin - f.Args.Stdout = stdout - f.Args.Stderr = stderr - f.Args.TTY = tty - - return f.Err -} - func (f *FakeRuntime) GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { f.Lock() defer f.Unlock() @@ -394,18 +343,6 @@ func (f *FakeRuntime) RemoveImage(image ImageSpec) error { return f.Err } -func (f *FakeDirectStreamingRuntime) PortForward(pod *Pod, port int32, stream io.ReadWriteCloser) error { - f.Lock() - defer f.Unlock() - - f.CalledFunctions = append(f.CalledFunctions, "PortForward") - f.Args.Pod = pod - f.Args.Port = port - f.Args.Stream = stream - - return f.Err -} - func (f *FakeRuntime) GetNetNS(containerID ContainerID) (string, error) { f.Lock() defer f.Unlock() @@ -455,7 +392,7 @@ func (f *FakeRuntime) ImageStats() (*ImageStats, error) { return nil, f.Err } -func (f *FakeIndirectStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -463,7 +400,7 @@ func (f *FakeIndirectStreamingRuntime) GetExec(id ContainerID, cmd []string, std return &url.URL{Host: FakeHost}, f.Err } -func (f *FakeIndirectStreamingRuntime) GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -471,7 +408,7 @@ func (f *FakeIndirectStreamingRuntime) GetAttach(id ContainerID, stdin, stdout, return &url.URL{Host: FakeHost}, f.Err } -func (f *FakeIndirectStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { f.Lock() defer f.Unlock() diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 421c0c98bea..7b31fdeffbb 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -670,6 +670,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil, err } klet.containerRuntime = runtime + klet.streamingRuntime = runtime klet.runner = runtime if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) { @@ -1002,6 +1003,9 @@ type Kubelet struct { // Container runtime. containerRuntime kubecontainer.Runtime + // Streaming runtime handles container streaming. + streamingRuntime kubecontainer.StreamingRuntime + // Container runtime service (needed by container runtime Start()). // TODO(CD): try to make this available without holding a reference in this // struct. For example, by adding a getter to generic runtime. diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 46a17a01a58..99694d0b5fc 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1592,139 +1592,78 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe // ExecInContainer executes a command in a container, connecting the supplied // stdin/stdout/stderr to the command's IO streams. func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime) - if !ok { - return fmt.Errorf("streaming methods not supported by runtime") - } - - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return err - } - if container == nil { - return fmt.Errorf("container not found (%q)", containerName) - } - return streamingRuntime.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize, timeout) + // TODO(random-liu): Remove this. + return fmt.Errorf("unimplemented") } // AttachContainer uses the container runtime to attach the given streams to // the given container. func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { - streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime) - if !ok { - return fmt.Errorf("streaming methods not supported by runtime") - } - - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return err - } - if container == nil { - return fmt.Errorf("container not found (%q)", containerName) - } - return streamingRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize) + // TODO(random-liu): Remove this. + return fmt.Errorf("unimplemented") } // PortForward connects to the pod's port and copies data between the port // and the stream. func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { - streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime) - if !ok { - return fmt.Errorf("streaming methods not supported by runtime") - } - - pods, err := kl.containerRuntime.GetPods(false) - if err != nil { - return err - } - // Resolve and type convert back again. - // We need the static pod UID but the kubecontainer API works with types.UID. - podUID = types.UID(kl.podManager.TranslatePodUID(podUID)) - pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) - if pod.IsEmpty() { - return fmt.Errorf("pod not found (%q)", podFullName) - } - return streamingRuntime.PortForward(&pod, port, stream) + // TODO(random-liu): Remove this. + return fmt.Errorf("unimplemented") } // GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { - switch streamingRuntime := kl.containerRuntime.(type) { - case kubecontainer.DirectStreamingRuntime: - // Kubelet will serve the exec directly. - return nil, nil - case kubecontainer.IndirectStreamingRuntime: - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return nil, err - } - if container == nil { - return nil, fmt.Errorf("container not found (%q)", containerName) - } - return streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY) - default: - return nil, fmt.Errorf("container runtime does not support exec") + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return nil, err } + if container == nil { + return nil, fmt.Errorf("container not found (%q)", containerName) + } + return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY) } // GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { - switch streamingRuntime := kl.containerRuntime.(type) { - case kubecontainer.DirectStreamingRuntime: - // Kubelet will serve the attach directly. - return nil, nil - case kubecontainer.IndirectStreamingRuntime: - container, err := kl.findContainer(podFullName, podUID, containerName) - if err != nil { - return nil, err - } - if container == nil { - return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName) - } - - // The TTY setting for attach must match the TTY setting in the initial container configuration, - // since whether the process is running in a TTY cannot be changed after it has started. We - // need the api.Pod to get the TTY status. - pod, found := kl.GetPodByFullName(podFullName) - if !found || (string(podUID) != "" && pod.UID != podUID) { - return nil, fmt.Errorf("pod %s not found", podFullName) - } - containerSpec := kubecontainer.GetContainerSpec(pod, containerName) - if containerSpec == nil { - return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName) - } - tty := containerSpec.TTY - - return streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty) - default: - return nil, fmt.Errorf("container runtime does not support attach") + container, err := kl.findContainer(podFullName, podUID, containerName) + if err != nil { + return nil, err } + if container == nil { + return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName) + } + + // The TTY setting for attach must match the TTY setting in the initial container configuration, + // since whether the process is running in a TTY cannot be changed after it has started. We + // need the api.Pod to get the TTY status. + pod, found := kl.GetPodByFullName(podFullName) + if !found || (string(podUID) != "" && pod.UID != podUID) { + return nil, fmt.Errorf("pod %s not found", podFullName) + } + containerSpec := kubecontainer.GetContainerSpec(pod, containerName) + if containerSpec == nil { + return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName) + } + tty := containerSpec.TTY + + return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty) } // GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { - switch streamingRuntime := kl.containerRuntime.(type) { - case kubecontainer.DirectStreamingRuntime: - // Kubelet will serve the attach directly. - return nil, nil - case kubecontainer.IndirectStreamingRuntime: - pods, err := kl.containerRuntime.GetPods(false) - if err != nil { - return nil, err - } - // Resolve and type convert back again. - // We need the static pod UID but the kubecontainer API works with types.UID. - podUID = types.UID(kl.podManager.TranslatePodUID(podUID)) - podFullName := kubecontainer.BuildPodFullName(podName, podNamespace) - pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) - if pod.IsEmpty() { - return nil, fmt.Errorf("pod not found (%q)", podFullName) - } - - return streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports) - default: - return nil, fmt.Errorf("container runtime does not support port-forward") + pods, err := kl.containerRuntime.GetPods(false) + if err != nil { + return nil, err } + // Resolve and type convert back again. + // We need the static pod UID but the kubecontainer API works with types.UID. + podUID = types.UID(kl.podManager.TranslatePodUID(podUID)) + podFullName := kubecontainer.BuildPodFullName(podName, podNamespace) + pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID) + if pod.IsEmpty() { + return nil, fmt.Errorf("pod not found (%q)", podFullName) + } + + return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports) } // cleanupOrphanedPodCgroups removes cgroups that should no longer exist. diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 9d2c813848b..146ef3056ef 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -2149,53 +2149,21 @@ func TestExec(t *testing.T) { }}, } - { // No streaming case - description := "no streaming - " + tc.description - redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, command, remotecommand.Options{}) - assert.Error(t, err, description) - assert.Nil(t, redirect, description) + description := "streaming - " + tc.description + fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} + kubelet.containerRuntime = fakeRuntime + kubelet.streamingRuntime = fakeRuntime - err = kubelet.ExecInContainer(tc.podFullName, podUID, tc.container, command, stdin, stdout, stderr, tty, nil, 0) + redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, command, remotecommand.Options{}) + if tc.expectError { assert.Error(t, err, description) - } - { // Direct streaming case - description := "direct streaming - " + tc.description - fakeRuntime := &containertest.FakeDirectStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} - kubelet.containerRuntime = fakeRuntime - - redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, command, remotecommand.Options{}) + } else { assert.NoError(t, err, description) - assert.Nil(t, redirect, description) - - err = kubelet.ExecInContainer(tc.podFullName, podUID, tc.container, command, stdin, stdout, stderr, tty, nil, 0) - if tc.expectError { - assert.Error(t, err, description) - } else { - assert.NoError(t, err, description) - assert.Equal(t, fakeRuntime.Args.ContainerID.ID, containerID, description+": ID") - assert.Equal(t, fakeRuntime.Args.Cmd, command, description+": Command") - assert.Equal(t, fakeRuntime.Args.Stdin, stdin, description+": Stdin") - assert.Equal(t, fakeRuntime.Args.Stdout, stdout, description+": Stdout") - assert.Equal(t, fakeRuntime.Args.Stderr, stderr, description+": Stderr") - assert.Equal(t, fakeRuntime.Args.TTY, tty, description+": TTY") - } + assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect") } - { // Indirect streaming case - description := "indirect streaming - " + tc.description - fakeRuntime := &containertest.FakeIndirectStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} - kubelet.containerRuntime = fakeRuntime - redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, command, remotecommand.Options{}) - if tc.expectError { - assert.Error(t, err, description) - } else { - assert.NoError(t, err, description) - assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect") - } - - err = kubelet.ExecInContainer(tc.podFullName, podUID, tc.container, command, stdin, stdout, stderr, tty, nil, 0) - assert.Error(t, err, description) - } + err = kubelet.ExecInContainer(tc.podFullName, podUID, tc.container, command, stdin, stdout, stderr, tty, nil, 0) + assert.Error(t, err, description) } } @@ -2241,50 +2209,21 @@ func TestPortForward(t *testing.T) { } podFullName := kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, tc.podName, podNamespace)) - { // No streaming case - description := "no streaming - " + tc.description - redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) - assert.Error(t, err, description) - assert.Nil(t, redirect, description) + description := "streaming - " + tc.description + fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} + kubelet.containerRuntime = fakeRuntime + kubelet.streamingRuntime = fakeRuntime - err = kubelet.PortForward(podFullName, podUID, port, stream) + redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) + if tc.expectError { assert.Error(t, err, description) - } - { // Direct streaming case - description := "direct streaming - " + tc.description - fakeRuntime := &containertest.FakeDirectStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} - kubelet.containerRuntime = fakeRuntime - - redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) + } else { assert.NoError(t, err, description) - assert.Nil(t, redirect, description) - - err = kubelet.PortForward(podFullName, podUID, port, stream) - if tc.expectError { - assert.Error(t, err, description) - } else { - assert.NoError(t, err, description) - require.Equal(t, fakeRuntime.Args.Pod.ID, podUID, description+": Pod UID") - require.Equal(t, fakeRuntime.Args.Port, port, description+": Port") - require.Equal(t, fakeRuntime.Args.Stream, stream, description+": stream") - } + assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect") } - { // Indirect streaming case - description := "indirect streaming - " + tc.description - fakeRuntime := &containertest.FakeIndirectStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} - kubelet.containerRuntime = fakeRuntime - redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) - if tc.expectError { - assert.Error(t, err, description) - } else { - assert.NoError(t, err, description) - assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect") - } - - err = kubelet.PortForward(podFullName, podUID, port, stream) - assert.Error(t, err, description) - } + err = kubelet.PortForward(podFullName, podUID, port, stream) + assert.Error(t, err, description) } } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index c34136b569a..df207fb4352 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -120,7 +120,7 @@ type kubeGenericRuntimeManager struct { type KubeGenericRuntime interface { kubecontainer.Runtime - kubecontainer.IndirectStreamingRuntime + kubecontainer.StreamingRuntime kubecontainer.ContainerCommandRunner } From 82f9d9365e9e96dfa36b76a4323962ea5661818d Mon Sep 17 00:00:00 2001 From: Cheng Xing Date: Tue, 29 May 2018 14:31:46 -0700 Subject: [PATCH 261/307] Modified regional PD test to fetch template name from GCE --- test/e2e/framework/google_compute.go | 48 ++++++++++++++++++++++++++++ test/e2e/storage/regional_pd.go | 6 ++-- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/test/e2e/framework/google_compute.go b/test/e2e/framework/google_compute.go index cf937b72f5b..3c1ed0d5cdb 100644 --- a/test/e2e/framework/google_compute.go +++ b/test/e2e/framework/google_compute.go @@ -145,6 +145,28 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error { return nil } +func GetManagedInstanceGroupTemplateName(zone string) (string, error) { + // TODO(verult): make this hit the compute API directly instead of + // shelling out to gcloud. Use InstanceGroupManager to get Instance Template name. + + stdout, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed", + "list", + fmt.Sprintf("--filter=name:%s", TestContext.CloudConfig.NodeInstanceGroup), + fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID), + fmt.Sprintf("--zones=%s", zone), + ) + + if err != nil { + return "", fmt.Errorf("gcloud compute instance-groups managed list call failed with err: %v", err) + } + + templateName, err := parseInstanceTemplateName(stdout) + if err != nil { + return "", fmt.Errorf("error parsing gcloud output: %v", err) + } + return templateName, nil +} + func DeleteManagedInstanceGroup(zone string) error { // TODO(verult): make this hit the compute API directly instead of // shelling out to gcloud. @@ -158,3 +180,29 @@ func DeleteManagedInstanceGroup(zone string) error { } return nil } + +func parseInstanceTemplateName(gcloudOutput string) (string, error) { + const templateNameField = "INSTANCE_TEMPLATE" + + lines := strings.Split(gcloudOutput, "\n") + if len(lines) <= 1 { // Empty output or only contains column names + return "", fmt.Errorf("the list is empty") + } + + // Otherwise, there should be exactly 1 entry, i.e. 2 lines + fieldNames := strings.Fields(lines[0]) + instanceTemplateColumn := 0 + for instanceTemplateColumn < len(fieldNames) && + fieldNames[instanceTemplateColumn] != templateNameField { + instanceTemplateColumn++ + } + + if instanceTemplateColumn == len(fieldNames) { + return "", fmt.Errorf("the list does not contain instance template information") + } + + fields := strings.Fields(lines[1]) + instanceTemplateName := fields[instanceTemplateColumn] + + return instanceTemplateName, nil +} diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index c478a386a67..a4ce5ec5821 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -205,6 +205,9 @@ func testZonalFailover(c clientset.Interface, ns string) { instanceGroup, err := cloud.GetInstanceGroup(instanceGroupName, podZone) Expect(err).NotTo(HaveOccurred(), "Error getting instance group %s in zone %s", instanceGroupName, podZone) + templateName, err := framework.GetManagedInstanceGroupTemplateName(podZone) + Expect(err).NotTo(HaveOccurred(), + "Error getting instance group template in zone %s", podZone) err = framework.DeleteManagedInstanceGroup(podZone) Expect(err).NotTo(HaveOccurred(), "Error deleting instance group in zone %s", podZone) @@ -212,9 +215,6 @@ func testZonalFailover(c clientset.Interface, ns string) { defer func() { framework.Logf("recreating instance group %s", instanceGroup.Name) - // HACK improve this when Managed Instance Groups are available through the cloud provider API - templateName := strings.Replace(instanceGroupName, "group", "template", 1 /* n */) - framework.ExpectNoError(framework.CreateManagedInstanceGroup(instanceGroup.Size, podZone, templateName), "Error recreating instance group %s in zone %s", instanceGroup.Name, podZone) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, framework.RestartNodeReadyAgainTimeout), From aeccffc3396b9207d0ebfbc99ad33fe3d1f095a5 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Sat, 26 May 2018 19:41:18 -0400 Subject: [PATCH 262/307] Phase out rescheduler in favor of priority and preemption --- build/lib/release.sh | 1 - .../fluentd-es-configmap.yaml | 15 -- .../fluentd-gcp-configmap-old.yaml | 14 -- .../fluentd-gcp/fluentd-gcp-configmap.yaml | 14 -- cluster/gce/config-default.sh | 3 - cluster/gce/config-test.sh | 3 - cluster/gce/gci/configure-helper.sh | 11 -- cluster/gce/manifests/kube-proxy.manifest | 3 +- cluster/gce/manifests/rescheduler.manifest | 36 ----- cluster/gce/util.sh | 1 - cluster/log-dump/log-dump.sh | 2 +- pkg/kubelet/types/pod_update.go | 2 +- .../equivalence_cache_predicates.go | 4 + test/e2e/scheduling/rescheduler.go | 133 ------------------ 14 files changed, 7 insertions(+), 235 deletions(-) delete mode 100644 cluster/gce/manifests/rescheduler.manifest delete mode 100644 test/e2e/scheduling/rescheduler.go diff --git a/build/lib/release.sh b/build/lib/release.sh index aaec30f52e0..46e143e31d2 100644 --- a/build/lib/release.sh +++ b/build/lib/release.sh @@ -387,7 +387,6 @@ function kube::release::package_kube_manifests_tarball() { cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}" cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}" cp "${src_dir}/glbc.manifest" "${dst_dir}" - cp "${src_dir}/rescheduler.manifest" "${dst_dir}/" cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/" cp "${src_dir}/etcd-empty-dir-cleanup.yaml" "${dst_dir}/" cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh" diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml index 5b861084da0..7dd846248d8 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-configmap.yaml @@ -273,21 +273,6 @@ data: tag kube-scheduler - # Example: - # I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler - - @id rescheduler.log - @type tail - format multiline - multiline_flush_interval 5s - format_firstline /^\w\d{4}/ - format1 /^(?\w)(?

uid

UID is the metadata.UID of the referenced ConfigMap. This field is currently reqired in Node.Spec.

UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.

false

string

resourceVersion

ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec.

ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.

false

string

+
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -3475,6 +3523,13 @@ When an object is created, the system will populate this list with the current s

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/docs/api-reference/apps/v1beta1/definitions.html b/docs/api-reference/apps/v1beta1/definitions.html index 04d3a39f6d6..9b18bd7d019 100755 --- a/docs/api-reference/apps/v1beta1/definitions.html +++ b/docs/api-reference/apps/v1beta1/definitions.html @@ -1448,6 +1448,54 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -3472,6 +3520,13 @@ The StatefulSet guarantees that a given network identity will always map to the

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/docs/api-reference/apps/v1beta2/definitions.html b/docs/api-reference/apps/v1beta2/definitions.html index 2221a7e1464..3893b05b7c2 100755 --- a/docs/api-reference/apps/v1beta2/definitions.html +++ b/docs/api-reference/apps/v1beta2/definitions.html @@ -1629,6 +1629,54 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -4178,6 +4226,13 @@ The StatefulSet guarantees that a given network identity will always map to the

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/docs/api-reference/batch/v1/definitions.html b/docs/api-reference/batch/v1/definitions.html index 4e96f025f6a..35db780ced9 100755 --- a/docs/api-reference/batch/v1/definitions.html +++ b/docs/api-reference/batch/v1/definitions.html @@ -1203,6 +1203,54 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -2842,6 +2890,13 @@ When an object is created, the system will populate this list with the current s

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/docs/api-reference/batch/v1beta1/definitions.html b/docs/api-reference/batch/v1beta1/definitions.html index c8ce2f1f2f7..a04f138d6f1 100755 --- a/docs/api-reference/batch/v1beta1/definitions.html +++ b/docs/api-reference/batch/v1beta1/definitions.html @@ -1244,6 +1244,54 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -2876,6 +2924,13 @@ When an object is created, the system will populate this list with the current s

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + @@ -5106,6 +5161,47 @@ Examples:
+
+
+

v1.PodDNSConfigOption

+
+

PodDNSConfigOption defines DNS resolver options of a pod.

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

name

Required.

false

string

value

false

string

+

v1.SecretProjection

@@ -5157,47 +5253,6 @@ Examples:
-
-
-

v1.PodDNSConfigOption

-
-

PodDNSConfigOption defines DNS resolver options of a pod.

-
- ------- - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescriptionRequiredSchemaDefault

name

Required.

false

string

value

false

string

-

v1beta1.CronJobList

diff --git a/docs/api-reference/batch/v2alpha1/definitions.html b/docs/api-reference/batch/v2alpha1/definitions.html index 5926d49603a..088483d178f 100755 --- a/docs/api-reference/batch/v2alpha1/definitions.html +++ b/docs/api-reference/batch/v2alpha1/definitions.html @@ -1203,6 +1203,54 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -2849,6 +2897,13 @@ When an object is created, the system will populate this list with the current s

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + @@ -4962,6 +5017,47 @@ Examples:
+
+
+

v1.PodDNSConfigOption

+
+

PodDNSConfigOption defines DNS resolver options of a pod.

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

name

Required.

false

string

value

false

string

+

v1.SecretProjection

@@ -5013,47 +5109,6 @@ Examples:
-
-
-

v1.PodDNSConfigOption

-
-

PodDNSConfigOption defines DNS resolver options of a pod.

-
- ------- - - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescriptionRequiredSchemaDefault

name

Required.

false

string

value

false

string

-

v1.CinderVolumeSource

diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index d2969deece6..d9b51186ec0 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -1921,6 +1921,54 @@ Examples: /foo would allow /foo, /foo/ an +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -4056,6 +4104,13 @@ When an object is created, the system will populate this list with the current s

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html b/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html index e048b45f835..3254403bd5d 100755 --- a/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html +++ b/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html @@ -1573,6 +1573,54 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } +
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.Initializer

@@ -3726,6 +3774,13 @@ When an object is created, the system will populate this list with the current s

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index ac244a75f50..c6e49ce64a1 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -7300,6 +7300,54 @@ Examples:
+
+
+

v1.ServiceAccountTokenProjection

+
+

ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).

+
+ +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescriptionRequiredSchemaDefault

audience

Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.

false

string

expirationSeconds

ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.

false

integer (int64)

path

Path is the path relative to the mount point of the file to project the token into.

true

string

+

v1.LocalObjectReference

@@ -8777,6 +8825,13 @@ Examples:

v1.ConfigMapProjection

+ +

serviceAccountToken

+

information about the serviceAccountToken data to project

+

false

+

v1.ServiceAccountTokenProjection

+ + diff --git a/pkg/apis/apps/v1/zz_generated.defaults.go b/pkg/apis/apps/v1/zz_generated.defaults.go index 257bac9e8e6..8a4a0a5d724 100644 --- a/pkg/apis/apps/v1/zz_generated.defaults.go +++ b/pkg/apis/apps/v1/zz_generated.defaults.go @@ -86,6 +86,9 @@ func SetObjectDefaults_DaemonSet(in *v1.DaemonSet) { } } } + if b.ServiceAccountToken != nil { + core_v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -230,6 +233,9 @@ func SetObjectDefaults_Deployment(in *v1.Deployment) { } } } + if b.ServiceAccountToken != nil { + core_v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -374,6 +380,9 @@ func SetObjectDefaults_ReplicaSet(in *v1.ReplicaSet) { } } } + if b.ServiceAccountToken != nil { + core_v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -518,6 +527,9 @@ func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) { } } } + if b.ServiceAccountToken != nil { + core_v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/pkg/apis/apps/v1beta1/zz_generated.defaults.go index 17f2fda2ea9..656f61edc99 100644 --- a/pkg/apis/apps/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -82,6 +82,9 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -226,6 +229,9 @@ func SetObjectDefaults_StatefulSet(in *v1beta1.StatefulSet) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/apps/v1beta2/zz_generated.defaults.go b/pkg/apis/apps/v1beta2/zz_generated.defaults.go index 8415f3a614f..713bcaa7819 100644 --- a/pkg/apis/apps/v1beta2/zz_generated.defaults.go +++ b/pkg/apis/apps/v1beta2/zz_generated.defaults.go @@ -86,6 +86,9 @@ func SetObjectDefaults_DaemonSet(in *v1beta2.DaemonSet) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -230,6 +233,9 @@ func SetObjectDefaults_Deployment(in *v1beta2.Deployment) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -374,6 +380,9 @@ func SetObjectDefaults_ReplicaSet(in *v1beta2.ReplicaSet) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -518,6 +527,9 @@ func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/batch/v1/zz_generated.defaults.go b/pkg/apis/batch/v1/zz_generated.defaults.go index 985b58211f0..859d17b768c 100644 --- a/pkg/apis/batch/v1/zz_generated.defaults.go +++ b/pkg/apis/batch/v1/zz_generated.defaults.go @@ -80,6 +80,9 @@ func SetObjectDefaults_Job(in *v1.Job) { } } } + if b.ServiceAccountToken != nil { + core_v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/batch/v1beta1/zz_generated.defaults.go b/pkg/apis/batch/v1beta1/zz_generated.defaults.go index c18e8ec10a9..d90031c32aa 100644 --- a/pkg/apis/batch/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/batch/v1beta1/zz_generated.defaults.go @@ -81,6 +81,9 @@ func SetObjectDefaults_CronJob(in *v1beta1.CronJob) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -224,6 +227,9 @@ func SetObjectDefaults_JobTemplate(in *v1beta1.JobTemplate) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/pkg/apis/batch/v2alpha1/zz_generated.defaults.go index ea224b1024d..367c56b15ed 100644 --- a/pkg/apis/batch/v2alpha1/zz_generated.defaults.go +++ b/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -81,6 +81,9 @@ func SetObjectDefaults_CronJob(in *v2alpha1.CronJob) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -224,6 +227,9 @@ func SetObjectDefaults_JobTemplate(in *v2alpha1.JobTemplate) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 3106ad1770f..80bf9bc921f 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -374,6 +374,8 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_core_ServiceAccount_To_v1_ServiceAccount, Convert_v1_ServiceAccountList_To_core_ServiceAccountList, Convert_core_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection, + Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection, Convert_v1_ServiceList_To_core_ServiceList, Convert_core_ServiceList_To_v1_ServiceList, Convert_v1_ServicePort_To_core_ServicePort, @@ -4175,7 +4177,17 @@ func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion. } func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]core.VolumeProjection)(unsafe.Pointer(&in.Sources)) + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]core.VolumeProjection, len(*in)) + for i := range *in { + if err := Convert_v1_VolumeProjection_To_core_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Sources = nil + } out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) return nil } @@ -4186,7 +4198,17 @@ func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.Proje } func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]v1.VolumeProjection, len(*in)) + for i := range *in { + if err := Convert_core_VolumeProjection_To_v1_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Sources = nil + } out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) return nil } @@ -5055,6 +5077,34 @@ func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAc return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) } +func autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *v1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { + out.Audience = in.Audience + if err := meta_v1.Convert_Pointer_int64_To_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection is an autogenerated conversion function. +func Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *v1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in, out, s) +} + +func autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *v1.ServiceAccountTokenProjection, s conversion.Scope) error { + out.Audience = in.Audience + if err := meta_v1.Convert_int64_To_Pointer_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +// Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection is an autogenerated conversion function. +func Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *v1.ServiceAccountTokenProjection, s conversion.Scope) error { + return autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in, out, s) +} + func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { out.ListMeta = in.ListMeta if in.Items != nil { @@ -5487,6 +5537,15 @@ func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProje out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret)) out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(core.ServiceAccountTokenProjection) + if err := Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(*in, *out, s); err != nil { + return err + } + } else { + out.ServiceAccountToken = nil + } return nil } @@ -5499,6 +5558,15 @@ func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumePro out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(v1.ServiceAccountTokenProjection) + if err := Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(*in, *out, s); err != nil { + return err + } + } else { + out.ServiceAccountToken = nil + } return nil } @@ -5531,7 +5599,15 @@ func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out * out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*core.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(core.ProjectedVolumeSource) + if err := Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Projected = nil + } out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) @@ -5567,7 +5643,15 @@ func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(v1.ProjectedVolumeSource) + if err := Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Projected = nil + } out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) diff --git a/pkg/apis/core/v1/zz_generated.defaults.go b/pkg/apis/core/v1/zz_generated.defaults.go index 3c4dc5e17c5..00e0b384aa8 100644 --- a/pkg/apis/core/v1/zz_generated.defaults.go +++ b/pkg/apis/core/v1/zz_generated.defaults.go @@ -213,6 +213,9 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } } } + if b.ServiceAccountToken != nil { + SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -356,6 +359,9 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { } } } + if b.ServiceAccountToken != nil { + SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -501,6 +507,9 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { } } } + if b.ServiceAccountToken != nil { + SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index f4e79ce13bd..b066b5cae84 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -5147,6 +5147,22 @@ func (in *ServiceAccountList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in @@ -5556,6 +5572,15 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { (*in).DeepCopyInto(*out) } } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(ServiceAccountTokenProjection) + **out = **in + } + } return } diff --git a/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/pkg/apis/extensions/v1beta1/zz_generated.defaults.go index 14df117c3ec..0fa4c321c29 100644 --- a/pkg/apis/extensions/v1beta1/zz_generated.defaults.go +++ b/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -88,6 +88,9 @@ func SetObjectDefaults_DaemonSet(in *v1beta1.DaemonSet) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -232,6 +235,9 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { @@ -398,6 +404,9 @@ func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/pkg/apis/settings/v1alpha1/zz_generated.defaults.go index a02bfefecad..b8559cf3ef3 100644 --- a/pkg/apis/settings/v1alpha1/zz_generated.defaults.go +++ b/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -86,6 +86,9 @@ func SetObjectDefaults_PodPreset(in *v1alpha1.PodPreset) { } } } + if b.ServiceAccountToken != nil { + v1.SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } } } if a.VolumeSource.ScaleIO != nil { diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index 570ea23fddb..a94e6fe0dde 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -192,6 +192,7 @@ limitations under the License. Service ServiceAccount ServiceAccountList + ServiceAccountTokenProjection ServiceList ServicePort ServiceProxyOptions @@ -963,90 +964,96 @@ func (m *ServiceAccountList) Reset() { *m = ServiceAccountLis func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{167} +} + func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{173} + return fileDescriptorGenerated, []int{174} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{184} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{185} + return fileDescriptorGenerated, []int{186} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} + return fileDescriptorGenerated, []int{187} } func init() { @@ -1217,6 +1224,7 @@ func init() { proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") @@ -9472,6 +9480,37 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i += copy(dAtA[i:], m.Audience) + if m.ExpirationSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + func (m *ServiceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -10157,6 +10196,16 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { } i += n208 } + if m.ServiceAccountToken != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) + n209, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n209 + } return i, nil } @@ -10179,163 +10228,163 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n209, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n209 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n210, err := m.EmptyDir.MarshalTo(dAtA[i:]) + n210, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n210 } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a + if m.EmptyDir != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n211, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n211, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n211 } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n212, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n212, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n212 } - if m.GitRepo != nil { - dAtA[i] = 0x2a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n213, err := m.GitRepo.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n213, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n213 } - if m.Secret != nil { - dAtA[i] = 0x32 + if m.GitRepo != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n214, err := m.Secret.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n214, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n214 } - if m.NFS != nil { - dAtA[i] = 0x3a + if m.Secret != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n215, err := m.NFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n215, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n215 } - if m.ISCSI != nil { - dAtA[i] = 0x42 + if m.NFS != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n216, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n216, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n216 } - if m.Glusterfs != nil { - dAtA[i] = 0x4a + if m.ISCSI != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n217, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n217, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n217 } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 + if m.Glusterfs != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n218, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n218, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n218 } - if m.RBD != nil { - dAtA[i] = 0x5a + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n219, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n219, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n219 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.RBD != nil { + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n220, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n220, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n220 } - if m.Cinder != nil { - dAtA[i] = 0x6a + if m.FlexVolume != nil { + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n221, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n221, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n221 } - if m.CephFS != nil { - dAtA[i] = 0x72 + if m.Cinder != nil { + dAtA[i] = 0x6a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n222, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n222, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n222 } - if m.Flocker != nil { - dAtA[i] = 0x7a + if m.CephFS != nil { + dAtA[i] = 0x72 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n223, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n223, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n223 } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n224, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n224 + } if m.DownwardAPI != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n224, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n225, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n224 + i += n225 } if m.FC != nil { dAtA[i] = 0x8a @@ -10343,11 +10392,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n225, err := m.FC.MarshalTo(dAtA[i:]) + n226, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n225 + i += n226 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -10355,11 +10404,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n226, err := m.AzureFile.MarshalTo(dAtA[i:]) + n227, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n226 + i += n227 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -10367,11 +10416,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n227, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n228, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n227 + i += n228 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -10379,11 +10428,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n228, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n229, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n228 + i += n229 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -10391,11 +10440,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n229, err := m.Quobyte.MarshalTo(dAtA[i:]) + n230, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n229 + i += n230 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -10403,11 +10452,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n230, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n231, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n230 + i += n231 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -10415,11 +10464,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n231, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n232, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n231 + i += n232 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -10427,11 +10476,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n232, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n233, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n232 + i += n233 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -10439,11 +10488,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n233, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n234, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n233 + i += n234 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -10451,11 +10500,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n234, err := m.Projected.MarshalTo(dAtA[i:]) + n235, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n234 + i += n235 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -10463,11 +10512,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n235, err := m.StorageOS.MarshalTo(dAtA[i:]) + n236, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n235 + i += n236 } return i, nil } @@ -10527,11 +10576,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n236, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n237, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n236 + i += n237 return i, nil } @@ -13555,6 +13604,19 @@ func (m *ServiceAccountList) Size() (n int) { return n } +func (m *ServiceAccountTokenProjection) Size() (n int) { + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ServiceList) Size() (n int) { var l int _ = l @@ -13808,6 +13870,10 @@ func (m *VolumeProjection) Size() (n int) { l = m.ConfigMap.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -16351,6 +16417,18 @@ func (this *ServiceAccountList) String() string { }, "") return s } +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} func (this *ServiceList) String() string { if this == nil { return "nil" @@ -16569,6 +16647,7 @@ func (this *VolumeProjection) String() string { `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, `}`, }, "") return s @@ -45560,6 +45639,134 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } return nil } +func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ServiceList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -48158,6 +48365,39 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountToken == nil { + m.ServiceAccountToken = &ServiceAccountTokenProjection{} + } + if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -49495,778 +49735,782 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12356 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x5d, 0x6c, 0x24, 0x57, - 0x76, 0x18, 0xbc, 0xd5, 0xdd, 0xfc, 0xe9, 0xc3, 0xff, 0x3b, 0x33, 0x12, 0x87, 0x92, 0xa6, 0x47, - 0xa5, 0xdd, 0xd1, 0x68, 0x25, 0x91, 0xd6, 0x48, 0x5a, 0xc9, 0xab, 0x5d, 0xd9, 0x24, 0x9b, 0x9c, - 0x69, 0xcd, 0x90, 0xd3, 0xba, 0xcd, 0x99, 0xd9, 0x5d, 0x6b, 0xd7, 0x5b, 0xec, 0xbe, 0x24, 0x4b, - 0x2c, 0x56, 0xb5, 0xaa, 0xaa, 0x39, 0x43, 0xc1, 0x06, 0xbe, 0x4f, 0x4e, 0x9c, 0x38, 0xf6, 0xc3, - 0x22, 0x36, 0x12, 0xc7, 0x36, 0x1c, 0x20, 0x71, 0x60, 0x6f, 0x9c, 0x04, 0x71, 0xec, 0xd8, 0xce, - 0xae, 0x93, 0x38, 0x4e, 0x1e, 0x9c, 0x97, 0x8d, 0x93, 0x97, 0x35, 0x60, 0x84, 0xb1, 0x69, 0x23, - 0x81, 0x1f, 0x12, 0x04, 0x31, 0x10, 0xc0, 0x8c, 0x11, 0x07, 0xf7, 0xb7, 0xee, 0xad, 0xae, 0xea, - 0x6e, 0x8e, 0x38, 0x94, 0x6c, 0xec, 0x5b, 0xf7, 0x3d, 0xe7, 0x9e, 0x7b, 0xeb, 0xfe, 0x9e, 0x73, - 0xee, 0xf9, 0x81, 0x37, 0x76, 0x5f, 0x8f, 0xe6, 0xdd, 0x60, 0x61, 0xb7, 0xb3, 0x49, 0x42, 0x9f, - 0xc4, 0x24, 0x5a, 0xd8, 0x27, 0x7e, 0x2b, 0x08, 0x17, 0x04, 0xc0, 0x69, 0xbb, 0x0b, 0xcd, 0x20, - 0x24, 0x0b, 0xfb, 0x2f, 0x2d, 0x6c, 0x13, 0x9f, 0x84, 0x4e, 0x4c, 0x5a, 0xf3, 0xed, 0x30, 0x88, - 0x03, 0x84, 0x38, 0xce, 0xbc, 0xd3, 0x76, 0xe7, 0x29, 0xce, 0xfc, 0xfe, 0x4b, 0x73, 0x2f, 0x6e, - 0xbb, 0xf1, 0x4e, 0x67, 0x73, 0xbe, 0x19, 0xec, 0x2d, 0x6c, 0x07, 0xdb, 0xc1, 0x02, 0x43, 0xdd, - 0xec, 0x6c, 0xb1, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xad, 0x25, 0xcd, 0x90, 0x07, 0x31, - 0xf1, 0x23, 0x37, 0xf0, 0xa3, 0x17, 0x9d, 0xb6, 0x1b, 0x91, 0x70, 0x9f, 0x84, 0x0b, 0xed, 0xdd, - 0x6d, 0x0a, 0x8b, 0x4c, 0x84, 0x85, 0xfd, 0x97, 0x36, 0x49, 0xec, 0x74, 0xf5, 0x68, 0xee, 0x95, - 0x84, 0xdc, 0x9e, 0xd3, 0xdc, 0x71, 0x7d, 0x12, 0x1e, 0x48, 0x1a, 0x0b, 0x21, 0x89, 0x82, 0x4e, - 0xd8, 0x24, 0x27, 0xaa, 0x15, 0x2d, 0xec, 0x91, 0xd8, 0xc9, 0xf8, 0xfa, 0xb9, 0x85, 0xbc, 0x5a, - 0x61, 0xc7, 0x8f, 0xdd, 0xbd, 0xee, 0x66, 0x3e, 0xd3, 0xaf, 0x42, 0xd4, 0xdc, 0x21, 0x7b, 0x4e, - 0x57, 0xbd, 0x97, 0xf3, 0xea, 0x75, 0x62, 0xd7, 0x5b, 0x70, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, - 0xfb, 0xdb, 0x16, 0x5c, 0x5e, 0xbc, 0xd7, 0x58, 0xf1, 0x9c, 0x28, 0x76, 0x9b, 0x4b, 0x5e, 0xd0, - 0xdc, 0x6d, 0xc4, 0x41, 0x48, 0xee, 0x06, 0x5e, 0x67, 0x8f, 0x34, 0xd8, 0x40, 0xa0, 0x17, 0x60, - 0x74, 0x9f, 0xfd, 0xaf, 0x55, 0x67, 0xad, 0xcb, 0xd6, 0xd5, 0xf2, 0xd2, 0xf4, 0x6f, 0x1f, 0x56, - 0x3e, 0x71, 0x74, 0x58, 0x19, 0xbd, 0x2b, 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x60, 0x78, 0x2b, 0xda, - 0x38, 0x68, 0x93, 0xd9, 0x02, 0xc3, 0x9d, 0x14, 0xb8, 0xc3, 0xab, 0x0d, 0x5a, 0x8a, 0x05, 0x14, - 0x2d, 0x40, 0xb9, 0xed, 0x84, 0xb1, 0x1b, 0xbb, 0x81, 0x3f, 0x5b, 0xbc, 0x6c, 0x5d, 0x1d, 0x5a, - 0x9a, 0x11, 0xa8, 0xe5, 0xba, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x4e, 0xeb, 0xb6, 0xef, - 0x1d, 0xcc, 0x96, 0x2e, 0x5b, 0x57, 0x47, 0x93, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x53, - 0x05, 0x18, 0x5d, 0xdc, 0xda, 0x72, 0x7d, 0x37, 0x3e, 0x40, 0x77, 0x61, 0xdc, 0x0f, 0x5a, 0x44, - 0xfe, 0x67, 0x5f, 0x31, 0x76, 0xed, 0xf2, 0x7c, 0xf7, 0xca, 0x9c, 0x5f, 0xd7, 0xf0, 0x96, 0xa6, - 0x8f, 0x0e, 0x2b, 0xe3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x63, 0xed, 0xa0, 0xa5, 0xc8, 0x16, - 0x18, 0xd9, 0x4a, 0x16, 0xd9, 0x7a, 0x82, 0xb6, 0x34, 0x75, 0x74, 0x58, 0x19, 0xd3, 0x0a, 0xb0, - 0x4e, 0x04, 0x6d, 0xc2, 0x14, 0xfd, 0xeb, 0xc7, 0xae, 0xa2, 0x5b, 0x64, 0x74, 0x9f, 0xc9, 0xa3, - 0xab, 0xa1, 0x2e, 0x9d, 0x3b, 0x3a, 0xac, 0x4c, 0xa5, 0x0a, 0x71, 0x9a, 0xa0, 0xfd, 0x3e, 0x4c, - 0x2e, 0xc6, 0xb1, 0xd3, 0xdc, 0x21, 0x2d, 0x3e, 0x83, 0xe8, 0x15, 0x28, 0xf9, 0xce, 0x1e, 0x11, - 0xf3, 0x7b, 0x59, 0x0c, 0x6c, 0x69, 0xdd, 0xd9, 0x23, 0xc7, 0x87, 0x95, 0xe9, 0x3b, 0xbe, 0xfb, - 0x5e, 0x47, 0xac, 0x0a, 0x5a, 0x86, 0x19, 0x36, 0xba, 0x06, 0xd0, 0x22, 0xfb, 0x6e, 0x93, 0xd4, - 0x9d, 0x78, 0x47, 0xcc, 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0x1f, 0x40, 0x79, - 0x71, 0x3f, 0x70, 0x5b, 0xf5, 0xa0, 0x15, 0xa1, 0x5d, 0x98, 0x6a, 0x87, 0x64, 0x8b, 0x84, 0xaa, - 0x68, 0xd6, 0xba, 0x5c, 0xbc, 0x3a, 0x76, 0xed, 0x6a, 0xe6, 0xc7, 0x9a, 0xa8, 0x2b, 0x7e, 0x1c, - 0x1e, 0x2c, 0x3d, 0x2e, 0xda, 0x9b, 0x4a, 0x41, 0x71, 0x9a, 0xb2, 0xfd, 0xef, 0x0a, 0x70, 0x61, - 0xf1, 0xfd, 0x4e, 0x48, 0xaa, 0x6e, 0xb4, 0x9b, 0x5e, 0xe1, 0x2d, 0x37, 0xda, 0x5d, 0x4f, 0x46, - 0x40, 0x2d, 0xad, 0xaa, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x08, 0x23, 0xf4, 0xf7, 0x1d, 0x5c, 0x13, - 0x9f, 0x7c, 0x4e, 0x20, 0x8f, 0x55, 0x9d, 0xd8, 0xa9, 0x72, 0x10, 0x96, 0x38, 0x68, 0x0d, 0xc6, - 0x9a, 0x6c, 0x43, 0x6e, 0xaf, 0x05, 0x2d, 0xc2, 0x26, 0xb3, 0xbc, 0xf4, 0x3c, 0x45, 0x5f, 0x4e, - 0x8a, 0x8f, 0x0f, 0x2b, 0xb3, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, 0xfd, - 0x55, 0x62, 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd5, 0xb6, 0xca, 0x10, 0xdb, 0x2a, 0xe3, 0xd9, 0xdb, - 0x04, 0xbd, 0x04, 0xa5, 0x5d, 0xd7, 0x6f, 0xcd, 0x0e, 0x33, 0x5a, 0x4f, 0xd1, 0x39, 0xbf, 0xe9, - 0xfa, 0xad, 0xe3, 0xc3, 0xca, 0x8c, 0xd1, 0x1d, 0x5a, 0x88, 0x19, 0xaa, 0xfd, 0x27, 0x16, 0x54, - 0x18, 0x6c, 0xd5, 0xf5, 0x48, 0x9d, 0x84, 0x91, 0x1b, 0xc5, 0xc4, 0x8f, 0x8d, 0x01, 0xbd, 0x06, - 0x10, 0x91, 0x66, 0x48, 0x62, 0x6d, 0x48, 0xd5, 0xc2, 0x68, 0x28, 0x08, 0xd6, 0xb0, 0xe8, 0x81, - 0x10, 0xed, 0x38, 0x21, 0x5b, 0x5f, 0x62, 0x60, 0xd5, 0x81, 0xd0, 0x90, 0x00, 0x9c, 0xe0, 0x18, - 0x07, 0x42, 0xb1, 0xdf, 0x81, 0x80, 0x3e, 0x0f, 0x53, 0x49, 0x63, 0x51, 0xdb, 0x69, 0xca, 0x01, - 0x64, 0x5b, 0xa6, 0x61, 0x82, 0x70, 0x1a, 0xd7, 0xfe, 0x87, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0x3f, - 0xe6, 0xdf, 0x6a, 0xff, 0xba, 0x05, 0x23, 0x4b, 0xae, 0xdf, 0x72, 0xfd, 0x6d, 0xf4, 0x55, 0x18, - 0xa5, 0x77, 0x53, 0xcb, 0x89, 0x1d, 0x71, 0xee, 0x7d, 0x97, 0xb6, 0xb7, 0xd4, 0x55, 0x31, 0xdf, - 0xde, 0xdd, 0xa6, 0x05, 0xd1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xf6, 0xe6, 0xbb, 0xa4, 0x19, 0xaf, - 0x91, 0xd8, 0x49, 0x3e, 0x27, 0x29, 0xc3, 0x8a, 0x2a, 0xba, 0x09, 0xc3, 0xb1, 0x13, 0x6e, 0x93, - 0x58, 0x1c, 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x93, 0x24, 0xd7, 0xc2, - 0x06, 0xab, 0x8a, 0x05, 0x09, 0xfb, 0x6f, 0x0c, 0xc3, 0xc5, 0xe5, 0x46, 0x2d, 0x67, 0x5d, 0x5d, - 0x81, 0xe1, 0x56, 0xe8, 0xee, 0x93, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, 0x40, 0xd1, - 0xeb, 0x30, 0xce, 0x2f, 0xa4, 0x1b, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x17, 0xd8, 0xe3, 0x77, - 0x35, 0x18, 0x36, 0x30, 0x4f, 0xb8, 0xa8, 0xae, 0xa4, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x23, 0x16, - 0x4c, 0xf3, 0x66, 0x16, 0xe3, 0x38, 0x74, 0x37, 0x3b, 0x31, 0x89, 0x66, 0x87, 0xd8, 0x49, 0xb7, - 0x9c, 0x35, 0x5a, 0xb9, 0x23, 0x30, 0x7f, 0x37, 0x45, 0x85, 0x1f, 0x82, 0xb3, 0xa2, 0xdd, 0xe9, - 0x34, 0x18, 0x77, 0x35, 0x8b, 0x7e, 0xc8, 0x82, 0xb9, 0x66, 0xe0, 0xc7, 0x61, 0xe0, 0x79, 0x24, - 0xac, 0x77, 0x36, 0x3d, 0x37, 0xda, 0xe1, 0xeb, 0x14, 0x93, 0x2d, 0x76, 0x12, 0xe4, 0xcc, 0xa1, - 0x42, 0x12, 0x73, 0x78, 0xe9, 0xe8, 0xb0, 0x32, 0xb7, 0x9c, 0x4b, 0x0a, 0xf7, 0x68, 0x06, 0xed, - 0x02, 0xa2, 0x57, 0x69, 0x23, 0x76, 0xb6, 0x49, 0xd2, 0xf8, 0xc8, 0xe0, 0x8d, 0x3f, 0x76, 0x74, - 0x58, 0x41, 0xeb, 0x5d, 0x24, 0x70, 0x06, 0x59, 0xf4, 0x1e, 0x9c, 0xa7, 0xa5, 0x5d, 0xdf, 0x3a, - 0x3a, 0x78, 0x73, 0xb3, 0x47, 0x87, 0x95, 0xf3, 0xeb, 0x19, 0x44, 0x70, 0x26, 0xe9, 0xb9, 0x65, - 0xb8, 0x90, 0x39, 0x55, 0x68, 0x1a, 0x8a, 0xbb, 0x84, 0xb3, 0x20, 0x65, 0x4c, 0x7f, 0xa2, 0xf3, - 0x30, 0xb4, 0xef, 0x78, 0x1d, 0xb1, 0x4a, 0x31, 0xff, 0xf3, 0xd9, 0xc2, 0xeb, 0x96, 0xdd, 0x84, - 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0x42, 0xcf, 0x42, 0xd1, 0x69, 0xb5, - 0xd8, 0x15, 0x59, 0x5e, 0xba, 0x70, 0x74, 0x58, 0x29, 0x2e, 0xb6, 0xe8, 0x59, 0x0d, 0x0a, 0xeb, - 0x00, 0x53, 0x0c, 0xf4, 0x69, 0x28, 0xb5, 0xc2, 0xa0, 0x3d, 0x5b, 0x60, 0x98, 0x74, 0xa8, 0x4a, - 0xd5, 0x30, 0x68, 0xa7, 0x50, 0x19, 0x8e, 0xfd, 0x9b, 0x05, 0x78, 0x72, 0x99, 0xb4, 0x77, 0x56, - 0x1b, 0x39, 0x9b, 0xee, 0x2a, 0x8c, 0xee, 0x05, 0xbe, 0x1b, 0x07, 0x61, 0x24, 0x9a, 0x66, 0xb7, - 0xc9, 0x9a, 0x28, 0xc3, 0x0a, 0x8a, 0x2e, 0x43, 0xa9, 0x9d, 0x70, 0x02, 0xe3, 0x92, 0x8b, 0x60, - 0x3c, 0x00, 0x83, 0x50, 0x8c, 0x4e, 0x44, 0x42, 0x71, 0x0b, 0x2a, 0x8c, 0x3b, 0x11, 0x09, 0x31, - 0x83, 0x24, 0xc7, 0x29, 0x3d, 0x68, 0xc5, 0xb6, 0x4a, 0x1d, 0xa7, 0x14, 0x82, 0x35, 0x2c, 0x54, - 0x87, 0x72, 0xa4, 0x26, 0x75, 0x68, 0xf0, 0x49, 0x9d, 0x60, 0xe7, 0xad, 0x9a, 0xc9, 0x84, 0x88, - 0x71, 0x0c, 0x0c, 0xf7, 0x3d, 0x6f, 0xbf, 0x59, 0x00, 0xc4, 0x87, 0xf0, 0x2f, 0xd8, 0xc0, 0xdd, - 0xe9, 0x1e, 0xb8, 0x4c, 0xce, 0xeb, 0x56, 0xd0, 0x74, 0xbc, 0xf4, 0x11, 0x7e, 0x5a, 0xa3, 0xf7, - 0x93, 0x16, 0xa0, 0x65, 0xd7, 0x6f, 0x91, 0xf0, 0x0c, 0xc4, 0x8e, 0x93, 0x5d, 0xa4, 0xb7, 0x60, - 0x72, 0xd9, 0x73, 0x89, 0x1f, 0xd7, 0xea, 0xcb, 0x81, 0xbf, 0xe5, 0x6e, 0xa3, 0xcf, 0xc2, 0x24, - 0x95, 0xc2, 0x82, 0x4e, 0xdc, 0x20, 0xcd, 0xc0, 0x67, 0x0c, 0x2b, 0x95, 0x5d, 0xd0, 0xd1, 0x61, - 0x65, 0x72, 0xc3, 0x80, 0xe0, 0x14, 0xa6, 0xfd, 0x7b, 0xf4, 0x43, 0x83, 0xbd, 0x76, 0xe0, 0x13, - 0x3f, 0x5e, 0x0e, 0xfc, 0x16, 0x17, 0x6c, 0x3e, 0x0b, 0xa5, 0x98, 0x76, 0x9c, 0x7f, 0xe4, 0x15, - 0x39, 0xb5, 0xb4, 0xbb, 0xc7, 0x87, 0x95, 0xc7, 0xba, 0x6b, 0xb0, 0x0f, 0x62, 0x75, 0xd0, 0x77, - 0xc3, 0x70, 0x14, 0x3b, 0x71, 0x27, 0x12, 0x9f, 0xfd, 0xb4, 0xfc, 0xec, 0x06, 0x2b, 0x3d, 0x3e, - 0xac, 0x4c, 0xa9, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x39, 0x18, 0xd9, 0x23, 0x51, 0xe4, 0x6c, - 0x4b, 0x9e, 0x74, 0x4a, 0xd4, 0x1d, 0x59, 0xe3, 0xc5, 0x58, 0xc2, 0xd1, 0x33, 0x30, 0x44, 0xc2, - 0x30, 0x08, 0xc5, 0xaa, 0x9a, 0x10, 0x88, 0x43, 0x2b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x1f, 0x2c, - 0x98, 0x52, 0x7d, 0xe5, 0x6d, 0x9d, 0x01, 0xf3, 0xf1, 0x25, 0x80, 0xa6, 0xfc, 0xc0, 0x88, 0x9d, - 0x77, 0x63, 0xd7, 0xae, 0x64, 0x5e, 0xa9, 0x5d, 0xc3, 0x98, 0x50, 0x56, 0x45, 0x11, 0xd6, 0xa8, - 0xd9, 0xff, 0xd2, 0x82, 0x73, 0xa9, 0x2f, 0xba, 0xe5, 0x46, 0x31, 0x7a, 0xa7, 0xeb, 0xab, 0xe6, - 0x07, 0xfb, 0x2a, 0x5a, 0x9b, 0x7d, 0x93, 0x5a, 0x73, 0xb2, 0x44, 0xfb, 0xa2, 0x1b, 0x30, 0xe4, - 0xc6, 0x64, 0x4f, 0x7e, 0xcc, 0x33, 0x3d, 0x3f, 0x86, 0xf7, 0x2a, 0x99, 0x91, 0x1a, 0xad, 0x89, - 0x39, 0x01, 0xfb, 0xc7, 0x8b, 0x50, 0xe6, 0xcb, 0x76, 0xcd, 0x69, 0x9f, 0xc1, 0x5c, 0xd4, 0xa0, - 0xc4, 0xa8, 0xf3, 0x8e, 0x3f, 0x9b, 0xdd, 0x71, 0xd1, 0x9d, 0x79, 0x2a, 0x59, 0x70, 0xe6, 0x45, - 0x1d, 0x66, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0x74, 0x7d, 0x27, 0x3c, 0xa0, 0x65, 0xb3, - 0x45, 0x46, 0xf0, 0xc5, 0xde, 0x04, 0x97, 0x14, 0x3e, 0x27, 0xab, 0xfa, 0x9a, 0x00, 0xb0, 0x46, - 0x74, 0xee, 0x35, 0x28, 0x2b, 0xe4, 0x93, 0xdc, 0xca, 0x73, 0x9f, 0x87, 0xa9, 0x54, 0x5b, 0xfd, - 0xaa, 0x8f, 0xeb, 0x97, 0xfa, 0x37, 0xd8, 0x29, 0x20, 0x7a, 0xbd, 0xe2, 0xef, 0x8b, 0xe3, 0xee, - 0x7d, 0x38, 0xef, 0x65, 0x9c, 0xb2, 0x62, 0xaa, 0x06, 0x3f, 0x95, 0x9f, 0x14, 0x9f, 0x7d, 0x3e, - 0x0b, 0x8a, 0x33, 0xdb, 0xa0, 0x17, 0x55, 0xd0, 0xa6, 0x6b, 0xde, 0xf1, 0x58, 0x7f, 0x85, 0xbc, - 0x78, 0x5b, 0x94, 0x61, 0x05, 0xa5, 0x47, 0xd8, 0x79, 0xd5, 0xf9, 0x9b, 0xe4, 0xa0, 0x41, 0x3c, - 0xd2, 0x8c, 0x83, 0xf0, 0x23, 0xed, 0xfe, 0x53, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x26, 0x08, 0x14, - 0x6f, 0x92, 0x03, 0x3e, 0x15, 0xfa, 0xd7, 0x15, 0x7b, 0x7e, 0xdd, 0x2f, 0x59, 0x30, 0xa1, 0xbe, - 0xee, 0x0c, 0xb6, 0xfa, 0x92, 0xb9, 0xd5, 0x9f, 0xea, 0xb9, 0xc0, 0x73, 0x36, 0xf9, 0x37, 0x0b, - 0x70, 0x51, 0xe1, 0x50, 0x06, 0x95, 0xff, 0x11, 0xab, 0x6a, 0x01, 0xca, 0xbe, 0x92, 0x77, 0x2d, - 0x53, 0xd0, 0x4c, 0xa4, 0xdd, 0x04, 0x87, 0xf2, 0x19, 0x7e, 0x22, 0x94, 0x8e, 0xeb, 0x8a, 0x20, - 0xa1, 0xf4, 0x59, 0x82, 0x62, 0xc7, 0x6d, 0x89, 0x3b, 0xe3, 0xbb, 0xe4, 0x68, 0xdf, 0xa9, 0x55, - 0x8f, 0x0f, 0x2b, 0x4f, 0xe7, 0x29, 0x21, 0xe9, 0x65, 0x15, 0xcd, 0xdf, 0xa9, 0x55, 0x31, 0xad, - 0x8c, 0x16, 0x61, 0x4a, 0xea, 0x59, 0xef, 0x52, 0xa6, 0x33, 0xf0, 0xc5, 0xd5, 0xa2, 0xb4, 0x39, - 0xd8, 0x04, 0xe3, 0x34, 0x3e, 0xaa, 0xc2, 0xf4, 0x6e, 0x67, 0x93, 0x78, 0x24, 0xe6, 0x1f, 0x7c, - 0x93, 0x70, 0x5d, 0x47, 0x39, 0x11, 0x86, 0x6e, 0xa6, 0xe0, 0xb8, 0xab, 0x86, 0xfd, 0xe7, 0xec, - 0x88, 0x17, 0xa3, 0x57, 0x0f, 0x03, 0xba, 0xb0, 0x28, 0xf5, 0x8f, 0x72, 0x39, 0x0f, 0xb2, 0x2a, - 0x6e, 0x92, 0x83, 0x8d, 0x80, 0xb2, 0x87, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0xa5, 0x9e, 0x6b, 0xfe, - 0x57, 0x0a, 0x70, 0x41, 0x8d, 0x80, 0xc1, 0x80, 0xfd, 0x45, 0x1f, 0x83, 0x97, 0x60, 0xac, 0x45, - 0xb6, 0x9c, 0x8e, 0x17, 0x2b, 0xc5, 0xdb, 0x10, 0x57, 0xbe, 0x56, 0x93, 0x62, 0xac, 0xe3, 0x9c, - 0x60, 0xd8, 0x7e, 0x6e, 0x8c, 0xdd, 0xad, 0xb1, 0x43, 0xd7, 0xb8, 0xda, 0x35, 0x56, 0xee, 0xae, - 0x79, 0x06, 0x86, 0xdc, 0x3d, 0xca, 0x6b, 0x15, 0x4c, 0x16, 0xaa, 0x46, 0x0b, 0x31, 0x87, 0xa1, - 0x4f, 0xc1, 0x48, 0x33, 0xd8, 0xdb, 0x73, 0xfc, 0x16, 0xbb, 0xf2, 0xca, 0x4b, 0x63, 0x94, 0x1d, - 0x5b, 0xe6, 0x45, 0x58, 0xc2, 0xd0, 0x93, 0x50, 0x72, 0xc2, 0xed, 0x68, 0xb6, 0xc4, 0x70, 0x46, - 0x69, 0x4b, 0x8b, 0xe1, 0x76, 0x84, 0x59, 0x29, 0x95, 0x03, 0xee, 0x07, 0xe1, 0xae, 0xeb, 0x6f, - 0x57, 0xdd, 0x50, 0x6c, 0x09, 0x75, 0x17, 0xde, 0x53, 0x10, 0xac, 0x61, 0xa1, 0x55, 0x18, 0x6a, - 0x07, 0x61, 0x1c, 0xcd, 0x0e, 0xb3, 0xe1, 0x7e, 0x3a, 0xe7, 0x20, 0xe2, 0x5f, 0x5b, 0x0f, 0xc2, - 0x38, 0xf9, 0x00, 0xfa, 0x2f, 0xc2, 0xbc, 0x3a, 0xfa, 0x6e, 0x28, 0x12, 0x7f, 0x7f, 0x76, 0x84, - 0x51, 0x99, 0xcb, 0xa2, 0xb2, 0xe2, 0xef, 0xdf, 0x75, 0xc2, 0xe4, 0x94, 0x5e, 0xf1, 0xf7, 0x31, - 0xad, 0x83, 0xbe, 0x08, 0x65, 0xb9, 0xc5, 0x23, 0x21, 0x98, 0x67, 0x2e, 0x31, 0x79, 0x30, 0x60, - 0xf2, 0x5e, 0xc7, 0x0d, 0xc9, 0x1e, 0xf1, 0xe3, 0x28, 0x39, 0xd3, 0x24, 0x34, 0xc2, 0x09, 0x35, - 0xf4, 0x45, 0xa9, 0x0d, 0x5a, 0x0b, 0x3a, 0x7e, 0x1c, 0xcd, 0x96, 0x59, 0xf7, 0x32, 0xf5, 0xf4, - 0x77, 0x13, 0xbc, 0xb4, 0xba, 0x88, 0x57, 0xc6, 0x06, 0x29, 0x84, 0x61, 0xc2, 0x73, 0xf7, 0x89, - 0x4f, 0xa2, 0xa8, 0x1e, 0x06, 0x9b, 0x64, 0x16, 0x58, 0xcf, 0x2f, 0x66, 0xab, 0xaf, 0x83, 0x4d, - 0xb2, 0x34, 0x73, 0x74, 0x58, 0x99, 0xb8, 0xa5, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x07, 0x26, 0xa9, - 0x00, 0xe2, 0x26, 0x44, 0xc7, 0xfa, 0x11, 0x65, 0xd2, 0x07, 0x36, 0x2a, 0xe1, 0x14, 0x11, 0xf4, - 0x16, 0x94, 0x3d, 0x77, 0x8b, 0x34, 0x0f, 0x9a, 0x1e, 0x99, 0x1d, 0x67, 0x14, 0x33, 0xb7, 0xd5, - 0x2d, 0x89, 0xc4, 0x05, 0x3c, 0xf5, 0x17, 0x27, 0xd5, 0xd1, 0x5d, 0x78, 0x2c, 0x26, 0xe1, 0x9e, - 0xeb, 0x3b, 0x74, 0x3b, 0x08, 0x79, 0x81, 0x3d, 0x02, 0x4c, 0xb0, 0xf5, 0x76, 0x49, 0x0c, 0xdd, - 0x63, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x0d, 0x53, 0x6c, 0x27, 0xd4, 0x3b, 0x9e, 0x57, - 0x0f, 0x3c, 0xb7, 0x79, 0x30, 0x3b, 0xc9, 0x08, 0x7e, 0x4a, 0xde, 0x0b, 0x35, 0x13, 0x7c, 0x7c, - 0x58, 0x81, 0xe4, 0x1f, 0x4e, 0xd7, 0x46, 0x9b, 0x4c, 0xeb, 0xdb, 0x09, 0xdd, 0xf8, 0x80, 0xae, - 0x5f, 0xf2, 0x20, 0x9e, 0x9d, 0xea, 0xa9, 0x1f, 0xd0, 0x51, 0x95, 0x6a, 0x58, 0x2f, 0xc4, 0x69, - 0x82, 0x74, 0x6b, 0x47, 0x71, 0xcb, 0xf5, 0x67, 0xa7, 0xd9, 0x89, 0xa1, 0x76, 0x46, 0x83, 0x16, - 0x62, 0x0e, 0x63, 0x1a, 0x5f, 0xfa, 0xe3, 0x36, 0x3d, 0x41, 0x67, 0x18, 0x62, 0xa2, 0xf1, 0x95, - 0x00, 0x9c, 0xe0, 0x50, 0xa6, 0x26, 0x8e, 0x0f, 0x66, 0x11, 0x43, 0x55, 0xdb, 0x65, 0x63, 0xe3, - 0x8b, 0x98, 0x96, 0xa3, 0x5b, 0x30, 0x42, 0xfc, 0xfd, 0xd5, 0x30, 0xd8, 0x9b, 0x3d, 0x97, 0xbf, - 0x67, 0x57, 0x38, 0x0a, 0x3f, 0xd0, 0x13, 0x01, 0x4f, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0xd9, - 0x8c, 0x19, 0xe1, 0x13, 0x70, 0x9e, 0x4d, 0xc0, 0xe7, 0x44, 0xdd, 0xd9, 0x8d, 0x1c, 0xbc, 0xe3, - 0x1e, 0x30, 0x9c, 0x4b, 0x1d, 0x7d, 0x19, 0x26, 0xf8, 0x86, 0xe2, 0xcf, 0x45, 0xd1, 0xec, 0x05, - 0xf6, 0x35, 0x97, 0xf3, 0x37, 0x27, 0x47, 0x5c, 0xba, 0x20, 0x3a, 0x34, 0xa1, 0x97, 0x46, 0xd8, - 0xa4, 0x66, 0x6f, 0xc2, 0xa4, 0x3a, 0xb7, 0xd8, 0xd2, 0x41, 0x15, 0x18, 0x62, 0xdc, 0x8e, 0xd0, - 0xc8, 0x94, 0xe9, 0x4c, 0x31, 0x4e, 0x08, 0xf3, 0x72, 0x36, 0x53, 0xee, 0xfb, 0x64, 0xe9, 0x20, - 0x26, 0x5c, 0xaa, 0x2e, 0x6a, 0x33, 0x25, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x2f, 0xe7, 0x1a, 0x93, - 0xc3, 0x71, 0x80, 0xeb, 0xe0, 0x05, 0x18, 0xdd, 0x09, 0xa2, 0x98, 0x62, 0xb3, 0x36, 0x86, 0x12, - 0x3e, 0xf1, 0x86, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x01, 0x13, 0x4d, 0xbd, 0x01, 0x71, 0x97, 0xa9, - 0x21, 0x30, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x1d, 0x46, 0xd9, 0x63, 0x6f, 0x33, 0xf0, 0x04, 0x93, - 0x25, 0x2f, 0xe4, 0xd1, 0xba, 0x28, 0x3f, 0xd6, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, - 0x42, 0xad, 0x2e, 0x6e, 0x11, 0xa5, 0x53, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0xb3, 0xa0, - 0x8d, 0x32, 0x95, 0x48, 0x09, 0xaa, 0xc3, 0xc8, 0x7d, 0xc7, 0x8d, 0x5d, 0x7f, 0x5b, 0xb0, 0x0b, - 0xcf, 0xf5, 0xbc, 0x52, 0x58, 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, - 0x8a, 0x61, 0xc7, 0xf7, 0x29, 0xc5, 0xc2, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, - 0x92, 0x41, 0xef, 0x00, 0xc8, 0x65, 0x49, 0x5a, 0xe2, 0x91, 0xf5, 0x85, 0xfe, 0x44, 0x37, 0x54, - 0x9d, 0xa5, 0x49, 0x7a, 0xa5, 0x26, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x19, 0x5b, 0xd5, 0xdd, 0x19, - 0xf4, 0x7d, 0xf4, 0x24, 0x70, 0xc2, 0x98, 0xb4, 0x16, 0x63, 0x31, 0x38, 0x9f, 0x1e, 0x4c, 0xa6, - 0xd8, 0x70, 0xf7, 0x88, 0x7e, 0x6a, 0x08, 0x22, 0x38, 0xa1, 0x67, 0xff, 0x5a, 0x11, 0x66, 0xf3, - 0xba, 0x4b, 0x17, 0x1d, 0x79, 0xe0, 0xc6, 0xcb, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x22, 0xca, - 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, 0xb9, 0xdb, 0x52, 0x24, 0x1c, 0x4a, 0x66, 0xbf, 0xc1, 0x4a, 0xb1, - 0x80, 0x52, 0xbc, 0x90, 0x38, 0x91, 0x78, 0xc5, 0xd7, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, - 0x7d, 0x53, 0xa9, 0x8f, 0xbe, 0xc9, 0x18, 0xa2, 0xa1, 0xd3, 0x1d, 0x22, 0xf4, 0x15, 0x80, 0x2d, - 0xd7, 0x77, 0xa3, 0x1d, 0x46, 0x7d, 0xf8, 0xc4, 0xd4, 0x15, 0x2f, 0xb5, 0xaa, 0xa8, 0x60, 0x8d, - 0x22, 0x7a, 0x15, 0xc6, 0xd4, 0x06, 0xac, 0x55, 0xd9, 0x93, 0x86, 0xf6, 0x44, 0x9c, 0x9c, 0x46, - 0x55, 0xac, 0xe3, 0xd9, 0xef, 0xa6, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, - 0xa1, 0xf7, 0xf8, 0xda, 0xbf, 0x55, 0x84, 0x29, 0xa3, 0xb1, 0x4e, 0x34, 0xc0, 0x99, 0x75, 0x9d, - 0xde, 0x73, 0x4e, 0x4c, 0xc4, 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, - 0x47, 0x5f, 0x81, 0xb2, 0xe7, 0x44, 0x4c, 0x77, 0x45, 0xc4, 0xbe, 0x1b, 0x84, 0x58, 0x22, 0x47, - 0x38, 0x51, 0xac, 0x5d, 0x35, 0x9c, 0x76, 0x42, 0x92, 0x5e, 0xc8, 0x94, 0xf7, 0x91, 0x66, 0x22, - 0xaa, 0x13, 0x94, 0x41, 0x3a, 0xc0, 0x1c, 0x86, 0x5e, 0x87, 0xf1, 0x90, 0xb0, 0x55, 0xb1, 0x4c, - 0x59, 0x39, 0xb6, 0xcc, 0x86, 0x12, 0x9e, 0x0f, 0x6b, 0x30, 0x6c, 0x60, 0x26, 0xac, 0xfc, 0x70, - 0x0f, 0x56, 0xfe, 0x39, 0x18, 0x61, 0x3f, 0xd4, 0x0a, 0x50, 0xb3, 0x51, 0xe3, 0xc5, 0x58, 0xc2, - 0xd3, 0x0b, 0x66, 0x74, 0xc0, 0x05, 0xf3, 0x69, 0x98, 0xac, 0x3a, 0x64, 0x2f, 0xf0, 0x57, 0xfc, - 0x56, 0x3b, 0x70, 0xfd, 0x18, 0xcd, 0x42, 0x89, 0xdd, 0x0e, 0x7c, 0x6f, 0x97, 0x28, 0x05, 0x5c, - 0xa2, 0x8c, 0xb9, 0xbd, 0x0d, 0x17, 0xaa, 0xc1, 0x7d, 0xff, 0xbe, 0x13, 0xb6, 0x16, 0xeb, 0x35, - 0x4d, 0xce, 0x5d, 0x97, 0x72, 0x16, 0x37, 0xbb, 0xc8, 0x3c, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, - 0x75, 0x3d, 0x92, 0xa3, 0x8d, 0xf8, 0xdb, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0x3d, 0x71, 0x58, 0xb9, - 0x4f, 0x1c, 0x6f, 0xc3, 0xe8, 0x96, 0x4b, 0xbc, 0x16, 0x26, 0x5b, 0x62, 0x89, 0x3d, 0x9b, 0xff, - 0x92, 0xbc, 0x4a, 0x31, 0xa5, 0xf6, 0x89, 0x4b, 0x69, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, - 0x4c, 0x4b, 0x31, 0x40, 0x42, 0xc5, 0x82, 0x7b, 0xae, 0x97, 0x6c, 0x61, 0x12, 0x3f, 0x7f, 0x74, - 0x58, 0x99, 0xc6, 0x29, 0x32, 0xb8, 0x8b, 0x30, 0x15, 0xcb, 0xf6, 0xe8, 0xd1, 0x5a, 0x62, 0xc3, - 0xcf, 0xc4, 0x32, 0x26, 0x61, 0xb2, 0x52, 0xfb, 0x67, 0x2c, 0x78, 0xbc, 0x6b, 0x64, 0x84, 0xa4, - 0x7d, 0xca, 0xb3, 0x90, 0x96, 0x7c, 0x0b, 0xfd, 0x25, 0x5f, 0xfb, 0x1f, 0x59, 0x70, 0x7e, 0x65, - 0xaf, 0x1d, 0x1f, 0x54, 0x5d, 0xf3, 0x19, 0xe6, 0x35, 0x18, 0xde, 0x23, 0x2d, 0xb7, 0xb3, 0x27, - 0x66, 0xae, 0x22, 0x8f, 0x9f, 0x35, 0x56, 0x7a, 0x7c, 0x58, 0x99, 0x68, 0xc4, 0x41, 0xe8, 0x6c, - 0x13, 0x5e, 0x80, 0x05, 0x3a, 0x3b, 0xc4, 0xdd, 0xf7, 0xc9, 0x2d, 0x77, 0xcf, 0x95, 0x96, 0x01, - 0x3d, 0x75, 0x67, 0xf3, 0x72, 0x40, 0xe7, 0xdf, 0xee, 0x38, 0x7e, 0xec, 0xc6, 0x07, 0xe2, 0x85, - 0x49, 0x12, 0xc1, 0x09, 0x3d, 0xfb, 0xdb, 0x16, 0x4c, 0xc9, 0x75, 0xbf, 0xd8, 0x6a, 0x85, 0x24, - 0x8a, 0xd0, 0x1c, 0x14, 0xdc, 0xb6, 0xe8, 0x25, 0x88, 0x5e, 0x16, 0x6a, 0x75, 0x5c, 0x70, 0xdb, - 0xa8, 0x0e, 0x65, 0x6e, 0x60, 0x90, 0x2c, 0xae, 0x81, 0xcc, 0x14, 0x58, 0x0f, 0x36, 0x64, 0x4d, - 0x9c, 0x10, 0x91, 0x1c, 0x1c, 0x3b, 0x33, 0x8b, 0xe6, 0xf3, 0xd4, 0x0d, 0x51, 0x8e, 0x15, 0x06, - 0xba, 0x0a, 0xa3, 0x7e, 0xd0, 0xe2, 0xf6, 0x1e, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x5d, 0x94, 0x61, - 0x05, 0xb5, 0x7f, 0xcc, 0x82, 0x71, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0x25, 0x8c, 0x64, - 0xb2, 0xb5, 0x28, 0x33, 0xc8, 0x20, 0x06, 0x0f, 0x58, 0x3c, 0x09, 0x0f, 0x68, 0xff, 0x74, 0x01, - 0x26, 0x65, 0x77, 0x1a, 0x9d, 0xcd, 0x88, 0xc4, 0x68, 0x03, 0xca, 0x0e, 0x1f, 0x72, 0x22, 0x57, - 0xec, 0x33, 0xd9, 0xc2, 0x87, 0x31, 0x3f, 0xc9, 0xb5, 0xbc, 0x28, 0x6b, 0xe3, 0x84, 0x10, 0xf2, - 0x60, 0xc6, 0x0f, 0x62, 0x76, 0x44, 0x2b, 0x78, 0xaf, 0x27, 0x90, 0x34, 0xf5, 0x8b, 0x82, 0xfa, - 0xcc, 0x7a, 0x9a, 0x0a, 0xee, 0x26, 0x8c, 0x56, 0xa4, 0xc2, 0xa3, 0x98, 0x2f, 0x6e, 0xe8, 0xb3, - 0x90, 0xad, 0xef, 0xb0, 0x7f, 0xc3, 0x82, 0xb2, 0x44, 0x3b, 0x8b, 0xd7, 0xae, 0x35, 0x18, 0x89, - 0xd8, 0x24, 0xc8, 0xa1, 0xb1, 0x7b, 0x75, 0x9c, 0xcf, 0x57, 0x72, 0xf3, 0xf0, 0xff, 0x11, 0x96, - 0x34, 0x98, 0xbe, 0x5b, 0x75, 0xff, 0x63, 0xa2, 0xef, 0x56, 0xfd, 0xc9, 0xb9, 0x61, 0xfe, 0x1b, - 0xeb, 0xb3, 0x26, 0xd6, 0x52, 0x06, 0xa9, 0x1d, 0x92, 0x2d, 0xf7, 0x41, 0x9a, 0x41, 0xaa, 0xb3, - 0x52, 0x2c, 0xa0, 0xe8, 0x1d, 0x18, 0x6f, 0x4a, 0x45, 0x67, 0x72, 0x0c, 0x5c, 0xe9, 0xa9, 0x74, - 0x57, 0xef, 0x33, 0xdc, 0x16, 0x74, 0x59, 0xab, 0x8f, 0x0d, 0x6a, 0xa6, 0x0d, 0x42, 0xb1, 0x9f, - 0x0d, 0x42, 0x42, 0x37, 0xf7, 0x15, 0xdd, 0xfe, 0x59, 0x0b, 0x86, 0xb9, 0xba, 0x6c, 0x30, 0xfd, - 0xa2, 0xf6, 0x5c, 0x95, 0x8c, 0xdd, 0x5d, 0x5a, 0x28, 0x9e, 0x9f, 0xd0, 0x1a, 0x94, 0xd9, 0x0f, - 0xa6, 0x36, 0x28, 0xe6, 0x1b, 0xc1, 0xf2, 0x56, 0xf5, 0x0e, 0xde, 0x95, 0xd5, 0x70, 0x42, 0xc1, - 0xfe, 0x89, 0x22, 0x3d, 0xaa, 0x12, 0x54, 0xe3, 0x06, 0xb7, 0x1e, 0xdd, 0x0d, 0x5e, 0x78, 0x54, - 0x37, 0xf8, 0x36, 0x4c, 0x35, 0xb5, 0xc7, 0xad, 0x64, 0x26, 0xaf, 0xf6, 0x5c, 0x24, 0xda, 0x3b, - 0x18, 0x57, 0x19, 0x2d, 0x9b, 0x44, 0x70, 0x9a, 0x2a, 0xfa, 0x3e, 0x18, 0xe7, 0xf3, 0x2c, 0x5a, - 0x29, 0xb1, 0x56, 0x3e, 0x95, 0xbf, 0x5e, 0xf4, 0x26, 0xd8, 0x4a, 0x6c, 0x68, 0xd5, 0xb1, 0x41, - 0xcc, 0xfe, 0xb5, 0x51, 0x18, 0x5a, 0xd9, 0x27, 0x7e, 0x7c, 0x06, 0x07, 0x52, 0x13, 0x26, 0x5d, - 0x7f, 0x3f, 0xf0, 0xf6, 0x49, 0x8b, 0xc3, 0x4f, 0x72, 0xb9, 0x3e, 0x26, 0x48, 0x4f, 0xd6, 0x0c, - 0x12, 0x38, 0x45, 0xf2, 0x51, 0x48, 0x98, 0xd7, 0x61, 0x98, 0xcf, 0xbd, 0x10, 0x2f, 0x33, 0x95, - 0xc1, 0x6c, 0x10, 0xc5, 0x2e, 0x48, 0xa4, 0x5f, 0xae, 0x7d, 0x16, 0xd5, 0xd1, 0xbb, 0x30, 0xb9, - 0xe5, 0x86, 0x51, 0x4c, 0x45, 0xc3, 0x28, 0x76, 0xf6, 0xda, 0x0f, 0x21, 0x51, 0xaa, 0x71, 0x58, - 0x35, 0x28, 0xe1, 0x14, 0x65, 0xb4, 0x0d, 0x13, 0x54, 0xc8, 0x49, 0x9a, 0x1a, 0x39, 0x71, 0x53, - 0x4a, 0x65, 0x74, 0x4b, 0x27, 0x84, 0x4d, 0xba, 0xf4, 0x30, 0x69, 0x32, 0xa1, 0x68, 0x94, 0x71, - 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcd, 0x33, 0x49, - 0x33, 0x4e, 0xf9, 0x2a, 0x94, 0x09, 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0x17, 0x06, 0xeb, 0xeb, - 0x9a, 0xdb, 0x0c, 0x03, 0x53, 0x96, 0x5f, 0x91, 0x94, 0x70, 0x42, 0x14, 0x2d, 0xc3, 0x70, 0x44, - 0x42, 0x97, 0x44, 0x42, 0x45, 0xde, 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x96, 0xe6, 0xbf, 0xb1, 0xa8, - 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x22, 0x2b, 0xc5, 0x02, 0x8a, - 0xde, 0x82, 0x91, 0x90, 0x78, 0x4c, 0x59, 0x34, 0x31, 0xf8, 0x22, 0xe7, 0xba, 0x27, 0x5e, 0x0f, - 0x4b, 0x02, 0xe8, 0x26, 0xa0, 0x90, 0x50, 0x1e, 0xc2, 0xf5, 0xb7, 0x95, 0x31, 0x87, 0xd0, 0x75, - 0x3f, 0x21, 0xda, 0x3f, 0x87, 0x13, 0x0c, 0x69, 0x47, 0x89, 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xa3, - 0x4a, 0x6b, 0x7e, 0x14, 0x3b, 0x7e, 0x93, 0x30, 0x35, 0x77, 0x39, 0xe1, 0x8a, 0x70, 0x1a, 0x01, - 0x77, 0xd7, 0xb1, 0xbf, 0x4e, 0xd9, 0x19, 0x3a, 0x5a, 0x67, 0xc0, 0x0b, 0xbc, 0x69, 0xf2, 0x02, - 0x17, 0x73, 0x67, 0x2e, 0x87, 0x0f, 0x38, 0xb2, 0x60, 0x4c, 0x9b, 0xd9, 0x64, 0xcd, 0x5a, 0x3d, - 0xd6, 0x6c, 0x07, 0xa6, 0xe9, 0x4a, 0xbf, 0xbd, 0xc9, 0x1c, 0x87, 0x5a, 0x6c, 0x61, 0x16, 0x1e, - 0x6e, 0x61, 0xaa, 0x57, 0xe6, 0x5b, 0x29, 0x82, 0xb8, 0xab, 0x09, 0xf4, 0x9a, 0xd4, 0x9c, 0x14, - 0x0d, 0x23, 0x2d, 0xae, 0x15, 0x39, 0x3e, 0xac, 0x4c, 0x6b, 0x1f, 0xa2, 0x6b, 0x4a, 0xec, 0xaf, - 0xca, 0x6f, 0x54, 0xaf, 0xf9, 0x4d, 0xb5, 0x58, 0x52, 0xaf, 0xf9, 0x6a, 0x39, 0xe0, 0x04, 0x87, - 0xee, 0x51, 0x2a, 0x82, 0xa4, 0x5f, 0xf3, 0xa9, 0x80, 0x82, 0x19, 0xc4, 0x7e, 0x19, 0x60, 0xe5, - 0x01, 0x69, 0xf2, 0xa5, 0xae, 0x3f, 0x40, 0x5a, 0xf9, 0x0f, 0x90, 0xf6, 0x7f, 0xb2, 0x60, 0x72, - 0x75, 0xd9, 0x10, 0x13, 0xe7, 0x01, 0xb8, 0x6c, 0x74, 0xef, 0xde, 0xba, 0xd4, 0xad, 0x73, 0xf5, - 0xa8, 0x2a, 0xc5, 0x1a, 0x06, 0xba, 0x08, 0x45, 0xaf, 0xe3, 0x0b, 0x91, 0x65, 0xe4, 0xe8, 0xb0, - 0x52, 0xbc, 0xd5, 0xf1, 0x31, 0x2d, 0xd3, 0x4c, 0xf9, 0x8a, 0x03, 0x9b, 0xf2, 0xf5, 0x75, 0x08, - 0x42, 0x15, 0x18, 0xba, 0x7f, 0xdf, 0x6d, 0x71, 0xb3, 0x6b, 0xa1, 0xf7, 0xbf, 0x77, 0xaf, 0x56, - 0x8d, 0x30, 0x2f, 0xb7, 0xbf, 0x56, 0x84, 0xb9, 0x55, 0x8f, 0x3c, 0xf8, 0x90, 0xa6, 0xe7, 0x83, - 0x1a, 0x22, 0x9e, 0x8c, 0x5f, 0x3c, 0xa9, 0xd5, 0x65, 0xff, 0xf1, 0xd8, 0x82, 0x11, 0xfe, 0x98, - 0x2d, 0x0d, 0xd1, 0xdf, 0xc8, 0x6a, 0x3d, 0x7f, 0x40, 0xe6, 0xf9, 0xa3, 0xb8, 0x30, 0x40, 0x57, - 0x37, 0xad, 0x28, 0xc5, 0x92, 0xf8, 0xdc, 0x67, 0x61, 0x5c, 0xc7, 0x3c, 0x91, 0xfd, 0xf3, 0xff, - 0x5f, 0x84, 0x69, 0xda, 0x83, 0x47, 0x3a, 0x11, 0x77, 0xba, 0x27, 0xe2, 0xb4, 0x6d, 0x60, 0xfb, - 0xcf, 0xc6, 0x3b, 0xe9, 0xd9, 0x78, 0x29, 0x6f, 0x36, 0xce, 0x7a, 0x0e, 0x7e, 0xc8, 0x82, 0x73, - 0xab, 0x5e, 0xd0, 0xdc, 0x4d, 0x99, 0xe7, 0xbe, 0x0a, 0x63, 0xf4, 0x1c, 0x8f, 0x0c, 0xbf, 0x17, - 0xc3, 0x13, 0x4a, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x73, 0xa7, 0x56, 0xcd, 0x72, 0xa0, 0x12, - 0x20, 0xac, 0xe3, 0xd9, 0xdf, 0xb2, 0xe0, 0xa9, 0xeb, 0xcb, 0x2b, 0xc9, 0x52, 0xec, 0xf2, 0xe1, - 0xa2, 0x52, 0x60, 0x4b, 0xeb, 0x4a, 0x22, 0x05, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0xe3, 0xe2, 0x9f, - 0xf8, 0x0b, 0x16, 0x9c, 0xbb, 0xee, 0xc6, 0xf4, 0x5a, 0x4e, 0x7b, 0x13, 0xd1, 0x7b, 0x39, 0x72, - 0xe3, 0x20, 0x3c, 0x48, 0x7b, 0x13, 0x61, 0x05, 0xc1, 0x1a, 0x16, 0x6f, 0x79, 0xdf, 0x65, 0x66, - 0x54, 0x05, 0x53, 0x15, 0x85, 0x45, 0x39, 0x56, 0x18, 0xf4, 0xc3, 0x5a, 0x6e, 0xc8, 0x44, 0x89, - 0x03, 0x71, 0xc2, 0xaa, 0x0f, 0xab, 0x4a, 0x00, 0x4e, 0x70, 0xec, 0x9f, 0xb1, 0xe0, 0xc2, 0x75, - 0xaf, 0x13, 0xc5, 0x24, 0xdc, 0x8a, 0x8c, 0xce, 0xbe, 0x0c, 0x65, 0x22, 0xc5, 0x75, 0xd1, 0x57, - 0xc5, 0x60, 0x2a, 0x39, 0x9e, 0xbb, 0x32, 0x29, 0xbc, 0x01, 0x6c, 0xdd, 0x4f, 0x66, 0xa3, 0xfd, - 0xcb, 0x05, 0x98, 0xb8, 0xb1, 0xb1, 0x51, 0xbf, 0x4e, 0x62, 0x71, 0x8b, 0xf5, 0x57, 0x35, 0x63, - 0x4d, 0x63, 0xd6, 0x4b, 0x28, 0xea, 0xc4, 0xae, 0x37, 0xcf, 0x7d, 0x67, 0xe7, 0x6b, 0x7e, 0x7c, - 0x3b, 0x6c, 0xc4, 0xa1, 0xeb, 0x6f, 0x67, 0xea, 0xd8, 0xe4, 0x5d, 0x5b, 0xcc, 0xbb, 0x6b, 0xd1, - 0xcb, 0x30, 0xcc, 0x9c, 0x77, 0xa5, 0x78, 0xf2, 0x84, 0x92, 0x29, 0x58, 0xe9, 0xf1, 0x61, 0xa5, - 0x7c, 0x07, 0xd7, 0xf8, 0x1f, 0x2c, 0x50, 0xd1, 0x1d, 0x18, 0xdb, 0x89, 0xe3, 0xf6, 0x0d, 0xe2, - 0xb4, 0x48, 0x28, 0x4f, 0x87, 0x4b, 0x59, 0xa7, 0x03, 0x1d, 0x04, 0x8e, 0x96, 0x6c, 0xa8, 0xa4, - 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x01, 0x90, 0xc0, 0x4e, 0x49, 0xbf, 0x60, 0xff, 0xa1, 0x05, 0x23, - 0xdc, 0x8f, 0x2a, 0x44, 0x9f, 0x83, 0x12, 0x79, 0x40, 0x9a, 0x82, 0x73, 0xcc, 0xec, 0x70, 0xc2, - 0x78, 0x70, 0x6d, 0x39, 0xfd, 0x8f, 0x59, 0x2d, 0x74, 0x03, 0x46, 0x68, 0x6f, 0xaf, 0x2b, 0xa7, - 0xb2, 0xa7, 0xf3, 0xbe, 0x58, 0x4d, 0x3b, 0xe7, 0x55, 0x44, 0x11, 0x96, 0xd5, 0x99, 0xe6, 0xb7, - 0xd9, 0x6e, 0xd0, 0x03, 0x2c, 0xee, 0x75, 0xcf, 0x6e, 0x2c, 0xd7, 0x39, 0x92, 0xa0, 0xc6, 0x35, - 0xbf, 0xb2, 0x10, 0x27, 0x44, 0xec, 0x0d, 0x28, 0xd3, 0x49, 0x5d, 0xf4, 0x5c, 0xa7, 0xb7, 0xd2, - 0xf9, 0x79, 0x28, 0x4b, 0x05, 0x70, 0x24, 0x5c, 0x71, 0x18, 0x55, 0xa9, 0x1f, 0x8e, 0x70, 0x02, - 0xb7, 0xb7, 0xe0, 0x3c, 0x7b, 0xf9, 0x77, 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0x2f, 0x08, - 0x41, 0x8c, 0xcf, 0xcc, 0xac, 0xe6, 0x3b, 0x30, 0x2e, 0x29, 0x26, 0x42, 0x99, 0xfd, 0xc7, 0x25, - 0x78, 0xa2, 0xd6, 0xc8, 0x77, 0xb1, 0x7b, 0x1d, 0xc6, 0x39, 0x9b, 0x46, 0x97, 0xb6, 0xe3, 0x89, - 0x76, 0xd5, 0xbb, 0xd8, 0x86, 0x06, 0xc3, 0x06, 0x26, 0x7a, 0x0a, 0x8a, 0xee, 0x7b, 0x7e, 0xda, - 0x0c, 0xb7, 0xf6, 0xf6, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0x39, 0x3e, 0x7e, 0x94, 0x2a, 0xb0, 0xe2, - 0xfa, 0xde, 0x84, 0x49, 0x37, 0x6a, 0x46, 0x6e, 0xcd, 0xa7, 0xe7, 0x4c, 0xe2, 0x9e, 0x99, 0x28, - 0x09, 0x68, 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0xb5, 0x73, 0x7d, 0x68, 0x60, 0xae, 0xb1, 0xaf, 0x6f, - 0x0a, 0x65, 0x88, 0xdb, 0xec, 0xeb, 0x22, 0x66, 0xd4, 0x26, 0x18, 0x62, 0xfe, 0xc1, 0x11, 0x96, - 0x30, 0x2a, 0x81, 0x35, 0x77, 0x9c, 0xf6, 0x62, 0x27, 0xde, 0xa9, 0xba, 0x51, 0x33, 0xd8, 0x27, - 0xe1, 0x01, 0x13, 0x9e, 0x47, 0x13, 0x09, 0x4c, 0x01, 0x96, 0x6f, 0x2c, 0xd6, 0x29, 0x26, 0xee, - 0xae, 0x63, 0x72, 0x85, 0x70, 0x1a, 0x5c, 0xe1, 0x22, 0x4c, 0xc9, 0x66, 0x1a, 0x24, 0x62, 0x77, - 0xc4, 0x18, 0xeb, 0x98, 0x32, 0xb5, 0x15, 0xc5, 0xaa, 0x5b, 0x69, 0x7c, 0xf4, 0x1a, 0x4c, 0xb8, - 0xbe, 0x1b, 0xbb, 0x4e, 0x1c, 0x84, 0xec, 0x86, 0xe5, 0x72, 0x32, 0xb3, 0x64, 0xab, 0xe9, 0x00, - 0x6c, 0xe2, 0xd9, 0x7f, 0x54, 0x82, 0x19, 0x36, 0x6d, 0xdf, 0x59, 0x61, 0x1f, 0x9b, 0x15, 0x76, - 0xa7, 0x7b, 0x85, 0x9d, 0x06, 0xbb, 0xfb, 0x51, 0x2e, 0xb3, 0x77, 0xa1, 0xac, 0x6c, 0x81, 0xa5, - 0x33, 0x80, 0x95, 0xe3, 0x0c, 0xd0, 0x9f, 0xfb, 0x90, 0xcf, 0xb8, 0xc5, 0xcc, 0x67, 0xdc, 0xbf, - 0x63, 0x41, 0x62, 0x12, 0x89, 0x6e, 0x40, 0xb9, 0x1d, 0x30, 0xb3, 0x83, 0x50, 0xda, 0xf2, 0x3c, - 0x91, 0x79, 0x51, 0xf1, 0x4b, 0x91, 0x8f, 0x5f, 0x5d, 0xd6, 0xc0, 0x49, 0x65, 0xb4, 0x04, 0x23, - 0xed, 0x90, 0x34, 0x62, 0xe6, 0xb4, 0xd9, 0x97, 0x0e, 0x5f, 0x23, 0x1c, 0x1f, 0xcb, 0x8a, 0xf6, - 0xaf, 0x58, 0x00, 0xfc, 0xa5, 0xd4, 0xf1, 0xb7, 0xc9, 0x19, 0x68, 0x7f, 0xab, 0x50, 0x8a, 0xda, - 0xa4, 0xd9, 0xcb, 0x20, 0x24, 0xe9, 0x4f, 0xa3, 0x4d, 0x9a, 0xc9, 0x80, 0xd3, 0x7f, 0x98, 0xd5, - 0xb6, 0xff, 0x2a, 0xc0, 0x64, 0x82, 0x56, 0x8b, 0xc9, 0x1e, 0x7a, 0xd1, 0x70, 0x89, 0xbb, 0x98, - 0x72, 0x89, 0x2b, 0x33, 0x6c, 0x4d, 0xd1, 0xf8, 0x2e, 0x14, 0xf7, 0x9c, 0x07, 0x42, 0x93, 0xf4, - 0x7c, 0xef, 0x6e, 0x50, 0xfa, 0xf3, 0x6b, 0xce, 0x03, 0x2e, 0x33, 0x3d, 0x2f, 0x17, 0xc8, 0x9a, - 0xf3, 0xe0, 0x98, 0x9b, 0x7d, 0xb0, 0x43, 0xea, 0x96, 0x1b, 0xc5, 0x1f, 0xfc, 0x97, 0xe4, 0x3f, - 0x5b, 0x76, 0xb4, 0x11, 0xd6, 0x96, 0xeb, 0x8b, 0x77, 0xc3, 0x81, 0xda, 0x72, 0xfd, 0x74, 0x5b, - 0xae, 0x3f, 0x40, 0x5b, 0xae, 0x8f, 0xde, 0x87, 0x11, 0xf1, 0x46, 0xcf, 0x6c, 0xbd, 0x4d, 0x2d, - 0x55, 0x5e, 0x7b, 0xe2, 0x89, 0x9f, 0xb7, 0xb9, 0x20, 0x65, 0x42, 0x51, 0xda, 0xb7, 0x5d, 0xd9, - 0x20, 0xfa, 0x5b, 0x16, 0x4c, 0x8a, 0xdf, 0x98, 0xbc, 0xd7, 0x21, 0x51, 0x2c, 0x78, 0xcf, 0xcf, - 0x0c, 0xde, 0x07, 0x51, 0x91, 0x77, 0xe5, 0x33, 0xf2, 0x98, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, - 0x05, 0xfa, 0x27, 0x16, 0x9c, 0xdf, 0x73, 0x1e, 0xf0, 0x16, 0x79, 0x19, 0x76, 0x62, 0x37, 0x10, - 0xb6, 0xeb, 0x9f, 0x1b, 0x6c, 0xfa, 0xbb, 0xaa, 0xf3, 0x4e, 0x4a, 0x33, 0xd7, 0xf3, 0x59, 0x28, - 0x7d, 0xbb, 0x9a, 0xd9, 0xaf, 0xb9, 0x2d, 0x18, 0x95, 0xeb, 0x2d, 0x43, 0xf2, 0xae, 0xea, 0x8c, - 0xf5, 0x89, 0x4d, 0x24, 0x74, 0xbf, 0x34, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0x77, 0x61, - 0x5c, 0x5f, 0x63, 0x8f, 0xb4, 0xad, 0xf7, 0xe0, 0x5c, 0xc6, 0x5a, 0x7a, 0xa4, 0x4d, 0xde, 0x87, - 0x8b, 0xb9, 0xeb, 0xe3, 0x51, 0x36, 0x6c, 0xff, 0xb2, 0xa5, 0x9f, 0x83, 0x67, 0xa0, 0x82, 0x5f, - 0x36, 0x55, 0xf0, 0x97, 0x7a, 0xef, 0x9c, 0x1c, 0x3d, 0xfc, 0x3b, 0x7a, 0xa7, 0xe9, 0xa9, 0x8e, - 0xde, 0x82, 0x61, 0x8f, 0x96, 0x48, 0xe3, 0x10, 0xbb, 0xff, 0x8e, 0x4c, 0x78, 0x29, 0x56, 0x1e, - 0x61, 0x41, 0xc1, 0xfe, 0x75, 0x0b, 0x4a, 0x67, 0x30, 0x12, 0xd8, 0x1c, 0x89, 0x17, 0x73, 0x49, - 0x8b, 0x20, 0x5c, 0xf3, 0xd8, 0xb9, 0xbf, 0x22, 0x03, 0x8d, 0xe5, 0x0c, 0xcc, 0xf7, 0xc3, 0xb9, - 0x5b, 0x81, 0xd3, 0x5a, 0x72, 0x3c, 0xc7, 0x6f, 0x92, 0xb0, 0xe6, 0x6f, 0xf7, 0xb5, 0x52, 0xd2, - 0x6d, 0x8a, 0x0a, 0xfd, 0x6c, 0x8a, 0xec, 0x1d, 0x40, 0x7a, 0x03, 0xc2, 0x8e, 0x13, 0xc3, 0x88, - 0xcb, 0x9b, 0x12, 0xc3, 0xff, 0x6c, 0x36, 0x77, 0xd7, 0xd5, 0x33, 0xcd, 0x42, 0x91, 0x17, 0x60, - 0x49, 0xc8, 0x7e, 0x1d, 0x32, 0x7d, 0xb7, 0xfa, 0xab, 0x0d, 0xec, 0x57, 0x61, 0x86, 0xd5, 0x3c, - 0x99, 0x48, 0x6b, 0xff, 0x88, 0x05, 0x53, 0xeb, 0xa9, 0x68, 0x0a, 0x57, 0xd8, 0x5b, 0x5f, 0x86, - 0xde, 0xb7, 0xc1, 0x4a, 0xb1, 0x80, 0x9e, 0xba, 0x7e, 0xe9, 0xcf, 0x2d, 0x48, 0x5c, 0x25, 0xcf, - 0x80, 0xa9, 0x5a, 0x36, 0x98, 0xaa, 0x4c, 0xbd, 0x87, 0xea, 0x4e, 0x1e, 0x4f, 0x85, 0x6e, 0xaa, - 0xb8, 0x00, 0x3d, 0x54, 0x1e, 0x09, 0x19, 0xee, 0x45, 0x3e, 0x69, 0x06, 0x0f, 0x90, 0x91, 0x02, - 0x98, 0x99, 0x90, 0xc2, 0xfd, 0x98, 0x98, 0x09, 0xa9, 0xfe, 0xe4, 0xec, 0xbe, 0xba, 0xd6, 0x65, - 0x76, 0x2a, 0x7d, 0x0f, 0x33, 0xfb, 0x76, 0x3c, 0xf7, 0x7d, 0xa2, 0xc2, 0x71, 0x54, 0x84, 0x19, - 0xb7, 0x28, 0x3d, 0x3e, 0xac, 0x4c, 0xa8, 0x7f, 0x3c, 0x66, 0x53, 0x52, 0xc5, 0xbe, 0x01, 0x53, - 0xa9, 0x01, 0x43, 0xaf, 0xc2, 0x50, 0x7b, 0xc7, 0x89, 0x48, 0xca, 0x34, 0x72, 0xa8, 0x4e, 0x0b, - 0x8f, 0x0f, 0x2b, 0x93, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, 0xfe, 0x9f, 0x16, 0x94, 0xd6, 0x83, - 0xd6, 0x59, 0x2c, 0xa6, 0x37, 0x8d, 0xc5, 0xf4, 0x64, 0x5e, 0xc4, 0xbb, 0xdc, 0x75, 0xb4, 0x9a, - 0x5a, 0x47, 0x97, 0x72, 0x29, 0xf4, 0x5e, 0x42, 0x7b, 0x30, 0xc6, 0xe2, 0xe8, 0x09, 0x53, 0xcd, - 0x97, 0x0d, 0xfe, 0xbe, 0x92, 0xe2, 0xef, 0xa7, 0x34, 0x54, 0x8d, 0xcb, 0x7f, 0x0e, 0x46, 0x84, - 0xb9, 0x60, 0xda, 0xc0, 0x5d, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xb6, 0x08, 0x46, 0xdc, 0x3e, 0xf4, - 0x1b, 0x16, 0xcc, 0x87, 0xdc, 0x63, 0xb0, 0x55, 0xed, 0x84, 0xae, 0xbf, 0xdd, 0x68, 0xee, 0x90, - 0x56, 0xc7, 0x73, 0xfd, 0xed, 0xda, 0xb6, 0x1f, 0xa8, 0xe2, 0x95, 0x07, 0xa4, 0xd9, 0x61, 0x3a, - 0xff, 0x3e, 0x41, 0x02, 0x95, 0x39, 0xce, 0xb5, 0xa3, 0xc3, 0xca, 0x3c, 0x3e, 0x11, 0x6d, 0x7c, - 0xc2, 0xbe, 0xa0, 0x6f, 0x59, 0xb0, 0xc0, 0xc3, 0xd9, 0x0d, 0xde, 0xff, 0x1e, 0xd2, 0x50, 0x5d, - 0x92, 0x4a, 0x88, 0x6c, 0x90, 0x70, 0x6f, 0xe9, 0x35, 0x31, 0xa0, 0x0b, 0xf5, 0x93, 0xb5, 0x85, - 0x4f, 0xda, 0x39, 0xfb, 0xdf, 0x14, 0x61, 0x42, 0x38, 0xab, 0x8b, 0x28, 0x28, 0xaf, 0x1a, 0x4b, - 0xe2, 0xe9, 0xd4, 0x92, 0x98, 0x31, 0x90, 0x4f, 0x27, 0x00, 0x4a, 0x04, 0x33, 0x9e, 0x13, 0xc5, - 0x37, 0x88, 0x13, 0xc6, 0x9b, 0xc4, 0xe1, 0x66, 0x2a, 0xc5, 0x13, 0x9b, 0xd4, 0x28, 0xf5, 0xcb, - 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x03, 0x62, 0xb6, 0x36, 0xa1, 0xe3, 0x47, 0xfc, 0x5b, - 0x5c, 0xf1, 0x1e, 0x70, 0xb2, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, 0xd1, 0x82, - 0x66, 0x43, 0x35, 0x34, 0xa8, 0x0d, 0xd5, 0x70, 0x1f, 0x2f, 0x12, 0x1f, 0xa6, 0xbb, 0xe2, 0x0d, - 0x7c, 0x09, 0xca, 0xca, 0xd6, 0x4d, 0x1c, 0x3a, 0xbd, 0xc3, 0x76, 0xa4, 0x29, 0x70, 0x15, 0x49, - 0x62, 0x67, 0x99, 0x90, 0xb3, 0xff, 0x69, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x3a, 0x51, - 0xe4, 0x6e, 0xfb, 0xa4, 0x25, 0x76, 0xec, 0x27, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0xb3, 0x37, 0x5c, - 0x14, 0x35, 0xb1, 0xa2, 0x81, 0x6e, 0x70, 0x63, 0xa0, 0x7d, 0xc9, 0xcf, 0x0f, 0x46, 0x0d, 0xa4, - 0xb9, 0xd0, 0x3e, 0xc1, 0xa2, 0x3e, 0xfa, 0x32, 0xb7, 0xd6, 0xba, 0xe9, 0x07, 0xf7, 0xfd, 0xeb, - 0x41, 0x20, 0x3d, 0xcc, 0x06, 0x23, 0x38, 0x23, 0x6d, 0xb4, 0x54, 0x75, 0x6c, 0x52, 0x1b, 0x2c, - 0x26, 0xcf, 0x0f, 0xc0, 0x39, 0x4a, 0xda, 0xf4, 0x13, 0x89, 0x10, 0x81, 0x29, 0x11, 0x09, 0x41, - 0x96, 0x89, 0xb1, 0xcb, 0x64, 0xd5, 0xcd, 0xda, 0x89, 0x42, 0xef, 0xa6, 0x49, 0x02, 0xa7, 0x69, - 0xda, 0x3f, 0x6f, 0x01, 0xb3, 0x70, 0x3f, 0x03, 0x96, 0xe1, 0xf3, 0x26, 0xcb, 0x30, 0x9b, 0x37, - 0xc8, 0x39, 0xdc, 0xc2, 0x2b, 0x7c, 0x65, 0xd5, 0xc3, 0xe0, 0xc1, 0x81, 0x78, 0x29, 0x1f, 0x80, - 0x4b, 0xfd, 0x3f, 0x16, 0x3f, 0xc4, 0x94, 0xd3, 0x39, 0xfa, 0x41, 0x18, 0x6d, 0x3a, 0x6d, 0xa7, - 0xc9, 0x83, 0xcc, 0xe6, 0x6a, 0x6c, 0x8c, 0x4a, 0xf3, 0xcb, 0xa2, 0x06, 0xd7, 0x40, 0xc8, 0x88, - 0x1a, 0xa3, 0xb2, 0xb8, 0xaf, 0xd6, 0x41, 0x35, 0x39, 0xb7, 0x0b, 0x13, 0x06, 0xb1, 0x47, 0x2a, - 0xae, 0xfe, 0x20, 0xbf, 0x62, 0x55, 0x04, 0x98, 0x3d, 0x98, 0xf1, 0xb5, 0xff, 0xf4, 0x42, 0x91, - 0x22, 0xc8, 0x27, 0xfb, 0x5d, 0xa2, 0xec, 0xf6, 0xd1, 0x2c, 0xf8, 0x53, 0x64, 0x70, 0x37, 0x65, - 0xfb, 0xef, 0x59, 0xf0, 0xb8, 0x8e, 0xa8, 0xc5, 0x03, 0xe8, 0xa7, 0x03, 0xae, 0xc2, 0x68, 0xd0, - 0x26, 0xa1, 0x13, 0x07, 0xa1, 0xb8, 0x35, 0xae, 0xca, 0x41, 0xbf, 0x2d, 0xca, 0x8f, 0x45, 0xb4, - 0x3f, 0x49, 0x5d, 0x96, 0x63, 0x55, 0x13, 0xd9, 0x30, 0xcc, 0x06, 0x23, 0x12, 0xb1, 0x1a, 0xd8, - 0x19, 0xc0, 0x9e, 0x43, 0x23, 0x2c, 0x20, 0xf6, 0x1f, 0x5b, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0xef, - 0xc1, 0xf4, 0x9e, 0x13, 0x37, 0x77, 0x56, 0x1e, 0xb4, 0x43, 0xae, 0xfa, 0x96, 0xe3, 0xf4, 0x7c, - 0xbf, 0x71, 0xd2, 0x3e, 0x32, 0x31, 0x40, 0x5b, 0x4b, 0x11, 0xc3, 0x5d, 0xe4, 0xd1, 0x26, 0x8c, - 0xb1, 0x32, 0x66, 0xe9, 0x1c, 0xf5, 0x62, 0x0d, 0xf2, 0x5a, 0x53, 0x2f, 0xca, 0x6b, 0x09, 0x1d, - 0xac, 0x13, 0xb5, 0x3f, 0x28, 0xf2, 0xdd, 0xce, 0xb8, 0xed, 0xe7, 0x60, 0xa4, 0x1d, 0xb4, 0x96, - 0x6b, 0x55, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0xbd, 0x01, 0x40, 0x1e, - 0xc4, 0x24, 0xf4, 0x1d, 0x4f, 0x19, 0x84, 0x28, 0x13, 0xc8, 0x6a, 0xb0, 0x1e, 0xc4, 0x77, 0x22, - 0xf2, 0xfd, 0x2b, 0x0a, 0x05, 0x6b, 0xe8, 0xe8, 0x1a, 0x40, 0x3b, 0x0c, 0xf6, 0xdd, 0x16, 0x73, - 0x9d, 0x2b, 0x9a, 0xe6, 0x12, 0x75, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x03, 0x26, 0x3a, 0x7e, 0xc4, - 0x39, 0x14, 0x67, 0x53, 0xc4, 0xca, 0x1b, 0x4d, 0x2c, 0x17, 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, - 0x45, 0x18, 0x8e, 0x1d, 0x66, 0xef, 0x30, 0x94, 0x6f, 0xb7, 0xb8, 0x41, 0x31, 0xf4, 0x10, 0xa7, - 0xb4, 0x02, 0x16, 0x15, 0xd1, 0x97, 0xa4, 0x1f, 0x02, 0x3f, 0xeb, 0x85, 0xc1, 0xf0, 0x60, 0xf7, - 0x82, 0xe6, 0x85, 0x20, 0x0c, 0x91, 0x0d, 0x5a, 0xf6, 0xb7, 0xca, 0x00, 0x09, 0x3b, 0x8e, 0xde, - 0xef, 0x3a, 0x8f, 0x5e, 0xe8, 0xcd, 0xc0, 0x9f, 0xde, 0x61, 0x84, 0x7e, 0xd8, 0x82, 0x31, 0xc7, - 0xf3, 0x82, 0xa6, 0x13, 0xb3, 0x51, 0x2e, 0xf4, 0x3e, 0x0f, 0x45, 0xfb, 0x8b, 0x49, 0x0d, 0xde, - 0x85, 0x97, 0xe5, 0xc2, 0xd3, 0x20, 0x7d, 0x7b, 0xa1, 0x37, 0x8c, 0xbe, 0x4b, 0x4a, 0x69, 0x7c, - 0x79, 0xcc, 0xa5, 0xa5, 0xb4, 0x32, 0x3b, 0xfa, 0x35, 0x01, 0x0d, 0xdd, 0x31, 0x82, 0xca, 0x95, - 0xf2, 0xe3, 0x2b, 0x18, 0x5c, 0x69, 0xbf, 0x78, 0x72, 0xa8, 0xae, 0x3b, 0x4e, 0x0d, 0xe5, 0x07, - 0x21, 0xd1, 0xc4, 0x9f, 0x3e, 0x4e, 0x53, 0xef, 0xc2, 0x54, 0xcb, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, - 0x36, 0x8f, 0x6e, 0x8a, 0x15, 0x48, 0x6e, 0xf3, 0x14, 0x00, 0xa7, 0x09, 0xa3, 0x3a, 0x77, 0x61, - 0xab, 0xf9, 0x5b, 0x81, 0x30, 0x3c, 0xb7, 0x73, 0xe7, 0xf2, 0x20, 0x8a, 0xc9, 0x1e, 0xc5, 0x4c, - 0x2e, 0xed, 0x75, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x2d, 0x18, 0x66, 0x3e, 0xb0, 0xd1, 0xec, 0x68, - 0xbe, 0xa2, 0xd0, 0x0c, 0xdf, 0x90, 0x6c, 0x2a, 0xf6, 0x37, 0xc2, 0x82, 0x02, 0xba, 0x21, 0x63, - 0xbc, 0x44, 0x35, 0xff, 0x4e, 0x44, 0x58, 0x8c, 0x97, 0xf2, 0xd2, 0x27, 0x93, 0xf0, 0x2d, 0xbc, - 0x3c, 0x33, 0x98, 0xb9, 0x51, 0x93, 0x32, 0x47, 0xe2, 0xbf, 0x8c, 0x91, 0x3e, 0x0b, 0xf9, 0xdd, - 0x33, 0xe3, 0xa8, 0x27, 0xc3, 0x79, 0xd7, 0x24, 0x81, 0xd3, 0x34, 0x29, 0xa3, 0xc9, 0x77, 0xae, - 0x30, 0x5d, 0xef, 0xb7, 0xff, 0xb9, 0x7c, 0xcd, 0x2e, 0x19, 0x5e, 0x82, 0x45, 0xfd, 0x33, 0xbd, - 0xf5, 0xe7, 0x7c, 0x98, 0x4e, 0x6f, 0xd1, 0x47, 0xca, 0x65, 0xfc, 0x61, 0x09, 0x26, 0xcd, 0x25, - 0x85, 0x16, 0xa0, 0x2c, 0x88, 0xa8, 0xc8, 0xa0, 0x6a, 0x97, 0xac, 0x49, 0x00, 0x4e, 0x70, 0x58, - 0x64, 0x54, 0x56, 0x5d, 0x33, 0x39, 0x4c, 0x22, 0xa3, 0x2a, 0x08, 0xd6, 0xb0, 0xa8, 0xbc, 0xb4, - 0x19, 0x04, 0xb1, 0xba, 0x54, 0xd4, 0xba, 0x5b, 0x62, 0xa5, 0x58, 0x40, 0xe9, 0x65, 0xb2, 0x4b, - 0x42, 0x9f, 0x78, 0x66, 0x1c, 0x33, 0x75, 0x99, 0xdc, 0xd4, 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0x64, - 0x10, 0xb1, 0x85, 0x2c, 0xa4, 0xb2, 0xc4, 0x84, 0xb3, 0xc1, 0xbd, 0xc9, 0x25, 0x1c, 0x7d, 0x11, - 0x1e, 0x57, 0xce, 0xdf, 0x98, 0x2b, 0xa1, 0x65, 0x8b, 0xc3, 0x86, 0x12, 0xe5, 0xf1, 0xe5, 0x6c, - 0x34, 0x9c, 0x57, 0x1f, 0xbd, 0x09, 0x93, 0x82, 0x73, 0x97, 0x14, 0x47, 0x4c, 0xbb, 0x88, 0x9b, - 0x06, 0x14, 0xa7, 0xb0, 0x65, 0x24, 0x36, 0xc6, 0x3c, 0x4b, 0x0a, 0xa3, 0xdd, 0x91, 0xd8, 0x74, - 0x38, 0xee, 0xaa, 0x81, 0x16, 0x61, 0x8a, 0xb3, 0x56, 0xae, 0xbf, 0xcd, 0xe7, 0x44, 0x78, 0x96, - 0xa8, 0x2d, 0x75, 0xdb, 0x04, 0xe3, 0x34, 0x3e, 0x7a, 0x1d, 0xc6, 0x9d, 0xb0, 0xb9, 0xe3, 0xc6, - 0xa4, 0x19, 0x77, 0x42, 0xee, 0x72, 0xa2, 0x19, 0x96, 0x2c, 0x6a, 0x30, 0x6c, 0x60, 0xda, 0xef, - 0xc3, 0xb9, 0x0c, 0xa7, 0x34, 0xba, 0x70, 0x9c, 0xb6, 0x2b, 0xbf, 0x29, 0x65, 0x8c, 0xb9, 0x58, - 0xaf, 0xc9, 0xaf, 0xd1, 0xb0, 0xe8, 0xea, 0x64, 0xce, 0x6b, 0x5a, 0x4a, 0x04, 0xb5, 0x3a, 0x57, - 0x25, 0x00, 0x27, 0x38, 0xf6, 0xff, 0x2a, 0xc0, 0x54, 0x86, 0x62, 0x9d, 0x85, 0xe5, 0x4f, 0xc9, - 0x1e, 0x49, 0x14, 0x7e, 0x33, 0xb0, 0x5f, 0xe1, 0x04, 0x81, 0xfd, 0x8a, 0xfd, 0x02, 0xfb, 0x95, - 0x3e, 0x4c, 0x60, 0x3f, 0x73, 0xc4, 0x86, 0x06, 0x1a, 0xb1, 0x8c, 0x60, 0x80, 0xc3, 0x27, 0x0c, - 0x06, 0x68, 0x0c, 0xfa, 0xc8, 0x00, 0x83, 0xfe, 0x13, 0x05, 0x98, 0x4e, 0x1b, 0xc0, 0x9d, 0x81, - 0x3a, 0xf6, 0x2d, 0x43, 0x1d, 0x9b, 0x9d, 0xe4, 0x22, 0x6d, 0x96, 0x97, 0xa7, 0x9a, 0xc5, 0x29, - 0xd5, 0xec, 0xa7, 0x07, 0xa2, 0xd6, 0x5b, 0x4d, 0xfb, 0xf7, 0x0b, 0x70, 0x21, 0x5d, 0x65, 0xd9, - 0x73, 0xdc, 0xbd, 0x33, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0xbc, 0x38, 0xc8, 0xd7, 0xb0, 0xae, 0xe5, - 0x0e, 0xd0, 0xbd, 0xd4, 0x00, 0x2d, 0x0c, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xbb, 0x08, 0x97, 0x32, - 0xeb, 0x25, 0xda, 0xcc, 0x55, 0x43, 0x9b, 0x79, 0x2d, 0xa5, 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x1d, - 0xf5, 0xa6, 0xf0, 0x16, 0x64, 0xc1, 0xdf, 0x1e, 0x52, 0xb5, 0x69, 0x78, 0x0b, 0x2a, 0x42, 0xd8, - 0xa4, 0xfb, 0x97, 0x49, 0xa5, 0xf9, 0xef, 0x2d, 0xb8, 0x98, 0x39, 0x37, 0x67, 0xa0, 0xc2, 0x5a, - 0x37, 0x55, 0x58, 0xcf, 0x0d, 0xbc, 0x5a, 0x73, 0x74, 0x5a, 0x7f, 0x54, 0xcc, 0xf9, 0x16, 0x26, - 0xa0, 0xdf, 0x86, 0x31, 0xa7, 0xd9, 0x24, 0x51, 0xb4, 0x16, 0xb4, 0x54, 0x30, 0xb4, 0x17, 0x99, - 0x9c, 0x95, 0x14, 0x1f, 0x1f, 0x56, 0xe6, 0xd2, 0x24, 0x12, 0x30, 0xd6, 0x29, 0x98, 0xf1, 0x1b, - 0x0b, 0xa7, 0x1a, 0xbf, 0xf1, 0x1a, 0xc0, 0xbe, 0xe2, 0xd6, 0xd3, 0x42, 0xbe, 0xc6, 0xc7, 0x6b, - 0x58, 0xe8, 0xcb, 0x30, 0x1a, 0x89, 0x6b, 0x5c, 0x2c, 0xc5, 0x97, 0x07, 0x9c, 0x2b, 0x67, 0x93, - 0x78, 0xa6, 0x5b, 0xba, 0xd2, 0x87, 0x28, 0x92, 0xe8, 0x7b, 0x61, 0x3a, 0xe2, 0x51, 0x4f, 0x96, - 0x3d, 0x27, 0x62, 0x3e, 0x0e, 0x62, 0x15, 0x32, 0x5f, 0xf3, 0x46, 0x0a, 0x86, 0xbb, 0xb0, 0xd1, - 0xaa, 0xfc, 0x28, 0x16, 0xa2, 0x85, 0x2f, 0xcc, 0x2b, 0xc9, 0x07, 0x89, 0xa4, 0x40, 0xe7, 0xd3, - 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x69, 0xff, 0x44, 0x09, 0x9e, 0xe8, 0x71, 0x88, 0xa1, 0x45, 0xf3, - 0x8d, 0xf2, 0xf9, 0xb4, 0xf4, 0x3b, 0x97, 0x59, 0xd9, 0x10, 0x87, 0x53, 0x6b, 0xa5, 0xf0, 0xa1, - 0xd7, 0xca, 0x8f, 0x5a, 0x9a, 0x5e, 0x82, 0x5b, 0xd2, 0x7d, 0xfe, 0x84, 0x87, 0xf3, 0x29, 0x2a, - 0x2a, 0xb6, 0x32, 0xa4, 0xfd, 0x6b, 0x03, 0x77, 0x67, 0x60, 0xf1, 0xff, 0x6c, 0xb5, 0xb3, 0x1f, - 0x58, 0xf0, 0x74, 0x66, 0x7f, 0x0d, 0x9b, 0x8a, 0x05, 0x28, 0x37, 0x69, 0xa1, 0xe6, 0x37, 0x95, - 0x38, 0x94, 0x4a, 0x00, 0x4e, 0x70, 0x0c, 0xd3, 0x89, 0x42, 0x5f, 0xd3, 0x89, 0x7f, 0x6d, 0x41, - 0xd7, 0x02, 0x3e, 0x83, 0x93, 0xb4, 0x66, 0x9e, 0xa4, 0x9f, 0x1c, 0x64, 0x2e, 0x73, 0x0e, 0xd1, - 0xdf, 0x9b, 0x82, 0xc7, 0x72, 0x1c, 0x25, 0xf6, 0x61, 0x66, 0xbb, 0x49, 0x4c, 0x8f, 0x34, 0xf1, - 0x31, 0x99, 0xce, 0x7b, 0x3d, 0xdd, 0xd7, 0x58, 0x36, 0x97, 0x99, 0x2e, 0x14, 0xdc, 0xdd, 0x04, - 0xfa, 0xc0, 0x82, 0xf3, 0xce, 0xfd, 0xa8, 0x2b, 0x67, 0x9f, 0x58, 0x33, 0xaf, 0x64, 0x6a, 0x29, - 0xfa, 0xe4, 0xf8, 0xe3, 0xe9, 0x6d, 0xb2, 0xb0, 0x70, 0x66, 0x5b, 0x08, 0x8b, 0xf8, 0x95, 0x94, - 0xdf, 0xee, 0xe1, 0x33, 0x99, 0xe5, 0xd1, 0xc2, 0xcf, 0x54, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, - 0xe5, 0x6d, 0xe9, 0x66, 0x26, 0xce, 0xec, 0xcc, 0x4b, 0x30, 0xd3, 0x17, 0x8d, 0xbf, 0x1b, 0x2a, - 0x10, 0x4e, 0x48, 0xa1, 0x37, 0xa1, 0xe8, 0x6f, 0x45, 0xbd, 0xf2, 0xc2, 0xa4, 0x4c, 0x8d, 0xb8, - 0x3f, 0xf2, 0xfa, 0x6a, 0x03, 0xd3, 0x8a, 0xe8, 0x06, 0x14, 0xc3, 0xcd, 0x96, 0x50, 0xac, 0x65, - 0xf2, 0xa5, 0x78, 0xa9, 0x9a, 0xbd, 0x48, 0x38, 0x25, 0xbc, 0x54, 0xc5, 0x94, 0x04, 0xaa, 0xc3, - 0x10, 0xf3, 0x29, 0x10, 0xfa, 0xb3, 0x4c, 0x86, 0xb4, 0x87, 0x6f, 0x0e, 0x77, 0x5a, 0x66, 0x08, - 0x98, 0x13, 0x42, 0x6f, 0xc1, 0x70, 0x93, 0xa5, 0x4e, 0x11, 0x21, 0x93, 0xb3, 0xa3, 0xd9, 0x74, - 0x25, 0x57, 0x11, 0x7a, 0x24, 0x56, 0x8e, 0x05, 0x05, 0xb4, 0x01, 0xc3, 0x4d, 0xd2, 0xde, 0xd9, - 0x8a, 0x98, 0xe0, 0x6d, 0x32, 0xf8, 0x09, 0xad, 0x1e, 0x99, 0x82, 0x04, 0x55, 0x86, 0x81, 0x05, - 0x2d, 0xf4, 0x59, 0x28, 0x6c, 0x35, 0x85, 0xa3, 0x41, 0xa6, 0x06, 0xcd, 0x74, 0x24, 0x5f, 0x1a, - 0x3e, 0x3a, 0xac, 0x14, 0x56, 0x97, 0x71, 0x61, 0xab, 0x89, 0xd6, 0x61, 0x64, 0x8b, 0xbb, 0x9e, - 0x0a, 0x25, 0xd9, 0xb3, 0xd9, 0x5e, 0xb1, 0x5d, 0xde, 0xa9, 0xdc, 0x40, 0x5e, 0x00, 0xb0, 0x24, - 0xc2, 0x42, 0x3f, 0x2a, 0x17, 0x5a, 0x11, 0x03, 0x79, 0xfe, 0x64, 0x6e, 0xcf, 0xdc, 0xa9, 0x3d, - 0x71, 0xc4, 0xc5, 0x1a, 0x45, 0xf4, 0x55, 0x28, 0x3b, 0x32, 0x47, 0x9c, 0x88, 0x11, 0xf1, 0x72, - 0xe6, 0x76, 0xec, 0x9d, 0x3e, 0x8f, 0xaf, 0x65, 0x85, 0x84, 0x13, 0xa2, 0x68, 0x17, 0x26, 0xf6, - 0xa3, 0xf6, 0x0e, 0x91, 0xdb, 0x97, 0x85, 0x8c, 0xc8, 0xb9, 0xae, 0xee, 0x0a, 0x44, 0x37, 0x8c, - 0x3b, 0x8e, 0xd7, 0x75, 0xe2, 0xb0, 0x27, 0xe6, 0xbb, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0xc3, 0xff, - 0x5e, 0x27, 0xd8, 0x3c, 0x88, 0x89, 0x08, 0x9a, 0x9c, 0x39, 0xfc, 0x6f, 0x73, 0x94, 0xee, 0xe1, - 0x17, 0x00, 0x2c, 0x89, 0xd0, 0x0d, 0xee, 0xc8, 0xfc, 0x8b, 0x2c, 0x58, 0x72, 0xce, 0x06, 0xcf, - 0x4c, 0xd2, 0xa8, 0x0d, 0x0a, 0x3b, 0x19, 0x13, 0x52, 0xec, 0x44, 0x6c, 0xef, 0x04, 0x71, 0xe0, - 0xa7, 0x4e, 0xe3, 0x99, 0xfc, 0x13, 0xb1, 0x9e, 0x81, 0xdf, 0x7d, 0x22, 0x66, 0x61, 0xe1, 0xcc, - 0xb6, 0x50, 0x0b, 0x26, 0xdb, 0x41, 0x18, 0xdf, 0x0f, 0x42, 0xb9, 0xbe, 0x50, 0x0f, 0x21, 0xdf, - 0xc0, 0x14, 0x2d, 0xb2, 0x20, 0xde, 0x26, 0x04, 0xa7, 0x68, 0xa2, 0x2f, 0xc0, 0x48, 0xd4, 0x74, - 0x3c, 0x52, 0xbb, 0x3d, 0x7b, 0x2e, 0xff, 0xaa, 0x69, 0x70, 0x94, 0x9c, 0xd5, 0xc5, 0x26, 0x47, - 0xa0, 0x60, 0x49, 0x0e, 0xad, 0xc2, 0x10, 0x8b, 0xc4, 0xcf, 0xe2, 0x3d, 0xe7, 0xc4, 0x22, 0xea, - 0x32, 0xe5, 0xe4, 0x27, 0x12, 0x2b, 0xc6, 0xbc, 0x3a, 0xdd, 0x03, 0x82, 0xd7, 0x0d, 0xa2, 0xd9, - 0x0b, 0xf9, 0x7b, 0x40, 0xb0, 0xc8, 0xb7, 0x1b, 0xbd, 0xf6, 0x80, 0x42, 0xc2, 0x09, 0x51, 0x7a, - 0x1e, 0xd3, 0x33, 0xf4, 0xb1, 0x1e, 0xd6, 0x25, 0xb9, 0x27, 0x28, 0x3b, 0x8f, 0xe9, 0xf9, 0x49, - 0x49, 0xd8, 0xbf, 0x3f, 0xd2, 0xcd, 0x9f, 0x30, 0xe9, 0xe8, 0xaf, 0x58, 0x5d, 0x0f, 0x67, 0x9f, - 0x19, 0x54, 0x59, 0x73, 0x8a, 0x9c, 0xe9, 0x07, 0x16, 0x3c, 0xd6, 0xce, 0xfc, 0x10, 0x71, 0xd9, - 0x0f, 0xa6, 0xf3, 0xe1, 0x9f, 0xae, 0x62, 0xb2, 0x67, 0xc3, 0x71, 0x4e, 0x4b, 0x69, 0xee, 0xbf, - 0xf8, 0xa1, 0xb9, 0xff, 0x35, 0x18, 0x65, 0x0c, 0x65, 0x12, 0xf8, 0x6a, 0xa0, 0x40, 0x3a, 0x8c, - 0x6d, 0x58, 0x16, 0x15, 0xb1, 0x22, 0x81, 0x7e, 0xcc, 0x82, 0xa7, 0xd2, 0x5d, 0xc7, 0x84, 0x81, - 0x45, 0x04, 0x73, 0x2e, 0x98, 0xad, 0x8a, 0xef, 0x7f, 0xaa, 0xde, 0x0b, 0xf9, 0xb8, 0x1f, 0x02, - 0xee, 0xdd, 0x18, 0xaa, 0x66, 0x48, 0x86, 0xc3, 0xa6, 0x36, 0x7c, 0x00, 0xe9, 0xf0, 0x15, 0x18, - 0xdf, 0x0b, 0x3a, 0x7e, 0x2c, 0x8c, 0x51, 0x84, 0x6b, 0x20, 0x7b, 0xfd, 0x5d, 0xd3, 0xca, 0xb1, - 0x81, 0x95, 0x92, 0x29, 0x47, 0x1f, 0x56, 0xa6, 0x44, 0xef, 0xa4, 0xf2, 0x25, 0x97, 0xf3, 0x79, - 0x0b, 0x21, 0x7e, 0x9f, 0x20, 0x6b, 0xf2, 0xd9, 0xca, 0x41, 0x5f, 0xb7, 0x32, 0x18, 0x78, 0x2e, - 0x19, 0x7f, 0xce, 0x94, 0x8c, 0xaf, 0xa4, 0x25, 0xe3, 0x2e, 0x4d, 0xa8, 0x21, 0x14, 0x0f, 0x1e, - 0x6e, 0x79, 0xd0, 0xf8, 0x65, 0xb6, 0x07, 0x97, 0xfb, 0x5d, 0x4b, 0xcc, 0x2a, 0xa9, 0xa5, 0xde, - 0xbd, 0x12, 0xab, 0xa4, 0x56, 0xad, 0x8a, 0x19, 0x64, 0xd0, 0x00, 0x17, 0xf6, 0x7f, 0xb7, 0xa0, - 0x58, 0x0f, 0x5a, 0x67, 0xa0, 0xd9, 0xfd, 0xbc, 0xa1, 0xd9, 0x7d, 0x22, 0x27, 0x8f, 0x75, 0xae, - 0x1e, 0x77, 0x25, 0xa5, 0xc7, 0x7d, 0x2a, 0x8f, 0x40, 0x6f, 0xad, 0xed, 0xcf, 0x15, 0x41, 0xcf, - 0xba, 0x8d, 0xfe, 0xed, 0xc3, 0x98, 0x04, 0x17, 0x7b, 0x25, 0xe2, 0x16, 0x94, 0x99, 0x31, 0x93, - 0xf4, 0x76, 0xfb, 0x0b, 0x66, 0x19, 0x7c, 0x8f, 0xb8, 0xdb, 0x3b, 0x31, 0x69, 0xa5, 0x3f, 0xe7, - 0xec, 0x2c, 0x83, 0xff, 0xab, 0x05, 0x53, 0xa9, 0xd6, 0x91, 0x07, 0x13, 0x9e, 0xae, 0x96, 0x13, - 0xeb, 0xf4, 0xa1, 0x34, 0x7a, 0xc2, 0xb2, 0x52, 0x2b, 0xc2, 0x26, 0x71, 0x34, 0x0f, 0xa0, 0x9e, - 0xcd, 0xa4, 0xb6, 0x8b, 0x71, 0xfd, 0xea, 0x5d, 0x2d, 0xc2, 0x1a, 0x06, 0x7a, 0x15, 0xc6, 0xe2, - 0xa0, 0x1d, 0x78, 0xc1, 0xf6, 0xc1, 0x4d, 0x22, 0x43, 0xaa, 0x28, 0x7b, 0xa9, 0x8d, 0x04, 0x84, - 0x75, 0x3c, 0xfb, 0x17, 0x8a, 0x90, 0xce, 0xd4, 0xfe, 0x9d, 0x35, 0xf9, 0xf1, 0x5c, 0x93, 0xdf, - 0xb6, 0x60, 0x9a, 0xb6, 0xce, 0x6c, 0x37, 0xe4, 0x65, 0xab, 0xd2, 0xbe, 0x58, 0x3d, 0xd2, 0xbe, - 0x5c, 0xa1, 0x67, 0x57, 0x2b, 0xe8, 0xc4, 0x42, 0x5b, 0xa6, 0x1d, 0x4e, 0xb4, 0x14, 0x0b, 0xa8, - 0xc0, 0x23, 0x61, 0x28, 0x1c, 0x92, 0x74, 0x3c, 0x12, 0x86, 0x58, 0x40, 0x65, 0x56, 0x98, 0x52, - 0x4e, 0x56, 0x18, 0x16, 0x20, 0x4e, 0xbc, 0xf2, 0x0b, 0xb6, 0x47, 0x0b, 0x10, 0x27, 0x9f, 0xff, - 0x13, 0x1c, 0xfb, 0x97, 0x8b, 0x30, 0x5e, 0x0f, 0x5a, 0xc9, 0xc3, 0xd5, 0x2b, 0xc6, 0xc3, 0xd5, - 0xe5, 0xd4, 0xc3, 0xd5, 0xb4, 0x8e, 0xfb, 0x9d, 0x67, 0xaa, 0x8f, 0xea, 0x99, 0xea, 0x5f, 0x59, - 0x6c, 0xd6, 0xaa, 0xeb, 0x0d, 0x91, 0x95, 0xf6, 0x25, 0x18, 0x63, 0x07, 0x12, 0xf3, 0x80, 0x93, - 0xaf, 0x39, 0x2c, 0xe0, 0xfb, 0x7a, 0x52, 0x8c, 0x75, 0x1c, 0x74, 0x15, 0x46, 0x23, 0xe2, 0x84, - 0xcd, 0x1d, 0x75, 0xc6, 0x89, 0xb7, 0x0e, 0x5e, 0x86, 0x15, 0x14, 0xbd, 0x9d, 0xc4, 0x26, 0x2b, - 0xe6, 0xe7, 0x57, 0xd5, 0xfb, 0xc3, 0xb7, 0x48, 0x7e, 0x40, 0x32, 0xfb, 0x1e, 0xa0, 0x6e, 0xfc, - 0x01, 0xa2, 0x10, 0x55, 0xcc, 0x28, 0x44, 0xe5, 0xae, 0x08, 0x44, 0x7f, 0x66, 0xc1, 0x64, 0x3d, - 0x68, 0xd1, 0xad, 0xfb, 0x97, 0x69, 0x9f, 0xea, 0x81, 0x19, 0x87, 0x7b, 0x04, 0x66, 0xfc, 0x07, - 0x16, 0x8c, 0xd4, 0x83, 0xd6, 0x19, 0xe8, 0xd8, 0x3f, 0x67, 0xea, 0xd8, 0x1f, 0xcf, 0x59, 0x12, - 0x39, 0x6a, 0xf5, 0x5f, 0x2d, 0xc2, 0x04, 0xed, 0x67, 0xb0, 0x2d, 0x67, 0xc9, 0x18, 0x11, 0x6b, - 0x80, 0x11, 0xa1, 0x6c, 0x6e, 0xe0, 0x79, 0xc1, 0xfd, 0xf4, 0x8c, 0xad, 0xb2, 0x52, 0x2c, 0xa0, - 0xe8, 0x05, 0x18, 0x6d, 0x87, 0x64, 0xdf, 0x0d, 0x04, 0xff, 0xa8, 0xbd, 0x58, 0xd4, 0x45, 0x39, - 0x56, 0x18, 0x54, 0xee, 0x8a, 0x5c, 0xbf, 0x49, 0x64, 0x72, 0xe7, 0x12, 0xcb, 0xff, 0xc4, 0x23, - 0x2e, 0x6b, 0xe5, 0xd8, 0xc0, 0x42, 0xf7, 0xa0, 0xcc, 0xfe, 0xb3, 0x13, 0xe5, 0xe4, 0xf9, 0x6a, - 0x44, 0x9a, 0x03, 0x41, 0x00, 0x27, 0xb4, 0xd0, 0x35, 0x80, 0x58, 0x46, 0xe5, 0x8d, 0x44, 0x30, - 0x19, 0xc5, 0x6b, 0xab, 0x78, 0xbd, 0x11, 0xd6, 0xb0, 0xd0, 0xf3, 0x50, 0x8e, 0x1d, 0xd7, 0xbb, - 0xe5, 0xfa, 0x24, 0x62, 0x8a, 0xe6, 0xa2, 0xcc, 0x62, 0x20, 0x0a, 0x71, 0x02, 0xa7, 0xbc, 0x0e, - 0xf3, 0xb4, 0xe6, 0xd9, 0xae, 0x46, 0x19, 0x36, 0xe3, 0x75, 0x6e, 0xa9, 0x52, 0xac, 0x61, 0xd8, - 0xaf, 0xc3, 0x85, 0x7a, 0xd0, 0xaa, 0x07, 0x61, 0xbc, 0x1a, 0x84, 0xf7, 0x9d, 0xb0, 0x25, 0xe7, - 0xaf, 0x22, 0x03, 0xea, 0xd3, 0xb3, 0x67, 0x88, 0xef, 0x4c, 0x23, 0x54, 0xfe, 0xcb, 0x8c, 0xdb, - 0x39, 0xa1, 0x87, 0xc5, 0xff, 0x2e, 0xb0, 0x83, 0x22, 0x95, 0x82, 0x0d, 0x7d, 0x05, 0x26, 0x23, - 0x72, 0xcb, 0xf5, 0x3b, 0x0f, 0xa4, 0x7c, 0xdc, 0xc3, 0x7d, 0xa5, 0xb1, 0xa2, 0x63, 0x72, 0x2d, - 0x9b, 0x59, 0x86, 0x53, 0xd4, 0xe8, 0x10, 0x86, 0x1d, 0x7f, 0x31, 0xba, 0x13, 0x91, 0x50, 0xa4, - 0x00, 0x63, 0x43, 0x88, 0x65, 0x21, 0x4e, 0xe0, 0x74, 0xc9, 0xb0, 0x3f, 0xeb, 0x81, 0x8f, 0x83, - 0x20, 0x96, 0x8b, 0x8c, 0x25, 0x91, 0xd1, 0xca, 0xb1, 0x81, 0x85, 0x56, 0x01, 0x45, 0x9d, 0x76, - 0xdb, 0x63, 0x0f, 0xe0, 0x8e, 0x77, 0x3d, 0x0c, 0x3a, 0x6d, 0xfe, 0xf8, 0x58, 0x64, 0xf9, 0xfa, - 0x51, 0xa3, 0x0b, 0x8a, 0x33, 0x6a, 0xd0, 0x83, 0x61, 0x2b, 0x62, 0xbf, 0xd9, 0xc2, 0x2b, 0x0a, - 0xcd, 0x77, 0x83, 0x15, 0x61, 0x09, 0xa3, 0xf3, 0xcc, 0x9a, 0xe7, 0x98, 0xc3, 0xc9, 0x3c, 0x63, - 0x55, 0x8a, 0x35, 0x0c, 0xfb, 0x07, 0xd9, 0x05, 0xc3, 0x32, 0x3d, 0xc5, 0x9d, 0x90, 0xa0, 0x3d, - 0x98, 0x68, 0xb3, 0xab, 0x5f, 0xc4, 0x21, 0x16, 0x03, 0xfe, 0xca, 0x80, 0x92, 0xe2, 0x7d, 0xba, - 0x79, 0x95, 0x26, 0x87, 0xb1, 0xe0, 0x75, 0x9d, 0x1c, 0x36, 0xa9, 0xdb, 0x3f, 0x37, 0xcd, 0xce, - 0xb1, 0x06, 0x17, 0xff, 0x46, 0x84, 0xed, 0xac, 0xe0, 0x75, 0xe7, 0xf2, 0xf5, 0x10, 0xc9, 0x95, - 0x23, 0xec, 0x6f, 0xb1, 0xac, 0x8b, 0xde, 0x66, 0xaf, 0xbc, 0xfc, 0xf0, 0xe8, 0x97, 0x70, 0x97, - 0x63, 0x19, 0x0f, 0xba, 0xa2, 0x22, 0xd6, 0x88, 0xa0, 0x5b, 0x30, 0x21, 0x12, 0x03, 0x09, 0x45, - 0x53, 0xd1, 0x50, 0x24, 0x4c, 0x60, 0x1d, 0x78, 0x9c, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x0d, 0x4f, - 0x69, 0x59, 0xf2, 0xae, 0x87, 0x0e, 0x7b, 0xf9, 0x73, 0xd9, 0xea, 0xd7, 0xce, 0xa2, 0xa7, 0x8f, - 0x0e, 0x2b, 0x4f, 0x6d, 0xf4, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x81, 0xbb, 0xa8, 0x55, - 0x89, 0xd3, 0xf2, 0x5c, 0x5f, 0x1d, 0x76, 0x7c, 0x01, 0x5d, 0x3c, 0x3a, 0xac, 0x5c, 0x58, 0xcc, - 0x42, 0xc0, 0xd9, 0xf5, 0xd0, 0xe7, 0xa0, 0xdc, 0xf2, 0x23, 0x31, 0x06, 0xc3, 0x46, 0x02, 0xc8, - 0x72, 0x75, 0xbd, 0xa1, 0xbe, 0x3f, 0xf9, 0x83, 0x93, 0x0a, 0x68, 0x9b, 0x2b, 0x9b, 0x94, 0x6c, - 0x37, 0x92, 0x9f, 0xec, 0x5b, 0x2c, 0x09, 0xc3, 0x49, 0x85, 0x6b, 0x59, 0x95, 0x91, 0xa7, 0xe1, - 0xbf, 0x62, 0x10, 0x46, 0x6f, 0x01, 0xa2, 0xcc, 0x8f, 0xdb, 0x24, 0x8b, 0x4d, 0x16, 0x0e, 0x9a, - 0xe9, 0xe6, 0x46, 0x0d, 0xa7, 0x00, 0xd4, 0xe8, 0xc2, 0xc0, 0x19, 0xb5, 0xd0, 0x0d, 0x7a, 0x02, - 0xe9, 0xa5, 0xc2, 0x58, 0x55, 0x32, 0xcc, 0xb3, 0x55, 0xd2, 0x0e, 0x49, 0xd3, 0x89, 0x49, 0xcb, - 0xa4, 0x88, 0x53, 0xf5, 0xe8, 0xfd, 0xa4, 0x32, 0xc3, 0x80, 0x19, 0xf3, 0xa1, 0x3b, 0x3b, 0x0c, - 0x95, 0x35, 0x77, 0x82, 0x28, 0x5e, 0x27, 0xf1, 0xfd, 0x20, 0xdc, 0x15, 0x21, 0xb6, 0x92, 0x68, - 0x8f, 0x09, 0x08, 0xeb, 0x78, 0x94, 0xb7, 0x64, 0xcf, 0xac, 0xb5, 0x2a, 0x7b, 0xf5, 0x1a, 0x4d, - 0xf6, 0xc9, 0x0d, 0x5e, 0x8c, 0x25, 0x5c, 0xa2, 0xd6, 0xea, 0xcb, 0xec, 0x05, 0x2b, 0x85, 0x5a, - 0xab, 0x2f, 0x63, 0x09, 0x47, 0xa4, 0x3b, 0xb9, 0xe6, 0x64, 0xbe, 0xa6, 0xb0, 0xfb, 0x1c, 0x1f, - 0x30, 0xbf, 0xa6, 0x0f, 0xd3, 0x2a, 0xad, 0x27, 0x8f, 0x3d, 0x16, 0xcd, 0x4e, 0xb1, 0x45, 0x32, - 0x78, 0xe0, 0x32, 0xa5, 0x7b, 0xad, 0xa5, 0x28, 0xe1, 0x2e, 0xda, 0x46, 0x14, 0x8e, 0xe9, 0xbe, - 0x99, 0x7d, 0x16, 0xa0, 0x1c, 0x75, 0x36, 0x5b, 0xc1, 0x9e, 0xe3, 0xfa, 0xec, 0xc1, 0x49, 0x63, - 0x5c, 0x1a, 0x12, 0x80, 0x13, 0x1c, 0xb4, 0x0a, 0xa3, 0x8e, 0x54, 0xac, 0xa2, 0x7c, 0xb7, 0x7c, - 0xa5, 0x4e, 0xe5, 0x9e, 0xaa, 0x52, 0x95, 0xaa, 0xea, 0xa2, 0x37, 0x60, 0x42, 0x38, 0x26, 0xf1, - 0x60, 0x05, 0xec, 0x41, 0x48, 0xb3, 0x3c, 0x6f, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x2f, 0xc3, 0x24, - 0xa5, 0x92, 0x1c, 0x6c, 0xb3, 0xe7, 0x07, 0x39, 0x11, 0xb5, 0x8c, 0x0d, 0x7a, 0x65, 0x9c, 0x22, - 0x86, 0x5a, 0xf0, 0xa4, 0xd3, 0x89, 0x03, 0xa6, 0x9c, 0x36, 0xd7, 0xff, 0x46, 0xb0, 0x4b, 0x7c, - 0xf6, 0x2e, 0x34, 0xba, 0x74, 0xf9, 0xe8, 0xb0, 0xf2, 0xe4, 0x62, 0x0f, 0x3c, 0xdc, 0x93, 0x0a, - 0xba, 0x03, 0x63, 0x71, 0xe0, 0x31, 0x1b, 0x70, 0xca, 0x03, 0x3c, 0x96, 0x1f, 0xc5, 0x66, 0x43, - 0xa1, 0xe9, 0x8a, 0x19, 0x55, 0x15, 0xeb, 0x74, 0xd0, 0x06, 0xdf, 0x63, 0x2c, 0xbe, 0x27, 0x89, - 0x66, 0x1f, 0xcf, 0x1f, 0x18, 0x15, 0x06, 0xd4, 0xdc, 0x82, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, - 0x30, 0xd3, 0x0e, 0xdd, 0x80, 0x2d, 0x6c, 0xf5, 0x30, 0x30, 0x6b, 0x06, 0xe9, 0xaf, 0xa7, 0x11, - 0x70, 0x77, 0x1d, 0x2a, 0xb8, 0xc9, 0xc2, 0xd9, 0x8b, 0x3c, 0xe3, 0x13, 0x67, 0x66, 0x79, 0x19, - 0x56, 0x50, 0xb4, 0xc6, 0xce, 0x65, 0x2e, 0x62, 0xcd, 0xce, 0xe5, 0x87, 0x33, 0xd0, 0x45, 0x31, - 0xce, 0xe8, 0xa8, 0xbf, 0x38, 0xa1, 0x40, 0xef, 0x8d, 0x68, 0xc7, 0x09, 0x49, 0x3d, 0x0c, 0x9a, - 0x84, 0x77, 0x86, 0x9b, 0x9f, 0x3f, 0xc1, 0xc3, 0x10, 0xd2, 0x7b, 0xa3, 0x91, 0x85, 0x80, 0xb3, - 0xeb, 0xcd, 0x7d, 0x0f, 0xcc, 0x74, 0x9d, 0xe4, 0x27, 0x8a, 0x4d, 0xfd, 0xa7, 0x43, 0x50, 0x56, - 0x8a, 0x5f, 0xb4, 0x60, 0xea, 0xf3, 0x2f, 0xa6, 0xf5, 0xf9, 0xa3, 0x94, 0xff, 0xd4, 0x55, 0xf8, - 0x1b, 0x86, 0xe1, 0x57, 0x21, 0x3f, 0x13, 0x94, 0xae, 0xf5, 0xe8, 0xeb, 0xe5, 0xa5, 0xc9, 0xf1, - 0xc5, 0x81, 0x1f, 0x06, 0x4a, 0x3d, 0x55, 0x03, 0x03, 0x26, 0x62, 0xa5, 0xa2, 0x6e, 0x3b, 0x68, - 0xd5, 0xea, 0xe9, 0xcc, 0x84, 0x75, 0x5a, 0x88, 0x39, 0x8c, 0x09, 0x2b, 0x94, 0xed, 0x60, 0xc2, - 0xca, 0xc8, 0x43, 0x0a, 0x2b, 0x92, 0x00, 0x4e, 0x68, 0x21, 0x0f, 0x66, 0x9a, 0x66, 0x52, 0x49, - 0xe5, 0xd9, 0xf5, 0x4c, 0xdf, 0xf4, 0x8e, 0x1d, 0x2d, 0x83, 0xd7, 0x72, 0x9a, 0x0a, 0xee, 0x26, - 0x8c, 0xde, 0x80, 0xd1, 0xf7, 0x82, 0x88, 0x6d, 0x0b, 0x71, 0xf7, 0x4a, 0x0f, 0x98, 0xd1, 0xb7, - 0x6f, 0x37, 0x58, 0xf9, 0xf1, 0x61, 0x65, 0xac, 0x1e, 0xb4, 0xe4, 0x5f, 0xac, 0x2a, 0xa0, 0x07, - 0x70, 0xc1, 0x38, 0xb1, 0x54, 0x77, 0x61, 0xf0, 0xee, 0x3e, 0x25, 0x9a, 0xbb, 0x50, 0xcb, 0xa2, - 0x84, 0xb3, 0x1b, 0xa0, 0xc7, 0x80, 0x1f, 0x88, 0x84, 0xac, 0xf2, 0x7e, 0x67, 0xd7, 0x78, 0x59, - 0xf7, 0x7f, 0x4e, 0x21, 0xe0, 0xee, 0x3a, 0xf6, 0x37, 0xb8, 0x9e, 0x5c, 0x68, 0xd3, 0x48, 0xd4, - 0xf1, 0xce, 0x22, 0xdf, 0xcf, 0x8a, 0xa1, 0xe8, 0x7b, 0xe8, 0xb7, 0x98, 0xdf, 0xb2, 0xd8, 0x5b, - 0xcc, 0x06, 0xd9, 0x6b, 0x7b, 0x4e, 0x7c, 0x16, 0x9e, 0x17, 0x6f, 0xc3, 0x68, 0x2c, 0x5a, 0xeb, - 0x95, 0xa2, 0x48, 0xeb, 0x14, 0x7b, 0x8f, 0x52, 0x37, 0xbf, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0xcf, - 0xf9, 0x0c, 0x48, 0xc8, 0x19, 0x28, 0x5d, 0xaa, 0xa6, 0xd2, 0xa5, 0xd2, 0xe7, 0x0b, 0x72, 0x94, - 0x2f, 0xff, 0xcc, 0xec, 0x37, 0x13, 0xb2, 0x3e, 0xee, 0x8f, 0x80, 0xf6, 0x4f, 0x59, 0x70, 0x3e, - 0xcb, 0x6a, 0x86, 0x72, 0x6b, 0x5c, 0xc4, 0x53, 0x8f, 0xa2, 0x6a, 0x04, 0xef, 0x8a, 0x72, 0xac, - 0x30, 0x06, 0x8e, 0xfe, 0x7f, 0xb2, 0x10, 0x61, 0xb7, 0x61, 0xa2, 0x1e, 0x12, 0xed, 0x0e, 0x78, - 0x93, 0xbb, 0x52, 0xf1, 0xfe, 0xbc, 0x70, 0x62, 0x37, 0x2a, 0xfb, 0x17, 0x0b, 0x70, 0x9e, 0xbf, - 0x6a, 0x2c, 0xee, 0x07, 0x6e, 0xab, 0x1e, 0xb4, 0x44, 0xe6, 0x86, 0x2f, 0xc1, 0x78, 0x5b, 0x93, - 0xcb, 0x7b, 0x05, 0x29, 0xd2, 0xe5, 0xf7, 0x44, 0x3e, 0xd2, 0x4b, 0xb1, 0x41, 0x0b, 0xb5, 0x60, - 0x9c, 0xec, 0xbb, 0x4d, 0xa5, 0x1a, 0x2f, 0x9c, 0xf8, 0x6e, 0x50, 0xad, 0xac, 0x68, 0x74, 0xb0, - 0x41, 0xf5, 0x11, 0x24, 0xf3, 0xb2, 0x7f, 0xda, 0x82, 0xc7, 0x73, 0x42, 0x1a, 0xd1, 0xe6, 0xee, - 0xb3, 0xf7, 0x23, 0x91, 0x17, 0x48, 0x35, 0xc7, 0x5f, 0x95, 0xb0, 0x80, 0xa2, 0x2f, 0x00, 0xf0, - 0x57, 0x21, 0x2a, 0x2e, 0xf4, 0x8b, 0xfd, 0x62, 0x84, 0xad, 0xd0, 0xc2, 0x0d, 0xc8, 0xfa, 0x58, - 0xa3, 0x65, 0xff, 0x7c, 0x11, 0x86, 0xd8, 0x2b, 0x04, 0x5a, 0x85, 0x91, 0x1d, 0x1e, 0xc0, 0x77, - 0x90, 0x58, 0xc1, 0x89, 0xdc, 0xc5, 0x0b, 0xb0, 0xac, 0x8c, 0xd6, 0xe0, 0x1c, 0x0f, 0x80, 0xec, - 0x55, 0x89, 0xe7, 0x1c, 0x48, 0xf1, 0x9d, 0xe7, 0xd2, 0x51, 0xa1, 0x13, 0x6a, 0xdd, 0x28, 0x38, - 0xab, 0x1e, 0x7a, 0x13, 0x26, 0x63, 0x77, 0x8f, 0x04, 0x9d, 0x58, 0x52, 0xe2, 0xa1, 0x8f, 0x15, - 0xb3, 0xbf, 0x61, 0x40, 0x71, 0x0a, 0x9b, 0x0a, 0x22, 0xed, 0x2e, 0x45, 0x85, 0x96, 0x9d, 0xde, - 0x54, 0x4e, 0x98, 0xb8, 0xcc, 0x5c, 0xa6, 0xc3, 0x8c, 0x83, 0x36, 0x76, 0x42, 0x12, 0xed, 0x04, - 0x5e, 0x4b, 0xa4, 0x62, 0x4e, 0xcc, 0x65, 0x52, 0x70, 0xdc, 0x55, 0x83, 0x52, 0xd9, 0x72, 0x5c, - 0xaf, 0x13, 0x92, 0x84, 0xca, 0xb0, 0x49, 0x65, 0x35, 0x05, 0xc7, 0x5d, 0x35, 0xe8, 0x3a, 0xba, - 0x20, 0x72, 0x23, 0x4b, 0x87, 0x6e, 0x65, 0x03, 0x35, 0x22, 0x5d, 0x5b, 0x7a, 0x44, 0x34, 0x11, - 0x56, 0x22, 0x2a, 0xbb, 0xb2, 0x96, 0x79, 0x53, 0x38, 0xb5, 0x48, 0x2a, 0x0f, 0x93, 0xa1, 0xf7, - 0xf7, 0x2d, 0x38, 0x97, 0x61, 0x6b, 0xc9, 0x8f, 0xaa, 0x6d, 0x37, 0x8a, 0x55, 0xbe, 0x10, 0xed, - 0xa8, 0xe2, 0xe5, 0x58, 0x61, 0xd0, 0xfd, 0xc0, 0x0f, 0xc3, 0xf4, 0x01, 0x28, 0x6c, 0x99, 0x04, - 0xf4, 0x64, 0x07, 0x20, 0xba, 0x0c, 0xa5, 0x4e, 0x44, 0x64, 0x2c, 0x22, 0x75, 0x7e, 0x33, 0x55, - 0x29, 0x83, 0x50, 0xd6, 0x74, 0x5b, 0x69, 0x29, 0x35, 0xd6, 0x94, 0xab, 0x1e, 0x39, 0xcc, 0xfe, - 0x5a, 0x11, 0x2e, 0xe6, 0xda, 0x52, 0xd3, 0x2e, 0xed, 0x05, 0xbe, 0x1b, 0x07, 0xea, 0x85, 0x8b, - 0x47, 0xc3, 0x20, 0xed, 0x9d, 0x35, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x22, 0xb3, 0x74, 0xa7, 0x33, - 0xa2, 0x2c, 0x55, 0x8d, 0x44, 0xdd, 0x83, 0x66, 0x9b, 0x7a, 0x06, 0x4a, 0xed, 0x20, 0xf0, 0xd2, - 0x87, 0x11, 0xed, 0x6e, 0x10, 0x78, 0x98, 0x01, 0xd1, 0xa7, 0xc4, 0x38, 0xa4, 0x9e, 0x74, 0xb0, - 0xd3, 0x0a, 0x22, 0x6d, 0x30, 0x9e, 0x83, 0x91, 0x5d, 0x72, 0x10, 0xba, 0xfe, 0x76, 0xfa, 0xa9, - 0xef, 0x26, 0x2f, 0xc6, 0x12, 0x6e, 0x26, 0x04, 0x18, 0x39, 0xed, 0x34, 0x51, 0xa3, 0x7d, 0xaf, - 0xb6, 0x1f, 0x2d, 0xc2, 0x14, 0x5e, 0xaa, 0x7e, 0x67, 0x22, 0xee, 0x74, 0x4f, 0xc4, 0x69, 0xa7, - 0x89, 0xea, 0x3f, 0x1b, 0xbf, 0x6a, 0xc1, 0x14, 0x0b, 0x9a, 0x2b, 0x62, 0x30, 0xb8, 0x81, 0x7f, - 0x06, 0xac, 0xdb, 0x33, 0x30, 0x14, 0xd2, 0x46, 0xd3, 0xb9, 0x5f, 0x58, 0x4f, 0x30, 0x87, 0xa1, - 0x27, 0xa1, 0xc4, 0xba, 0x40, 0x27, 0x6f, 0x9c, 0x87, 0xcd, 0xaf, 0x3a, 0xb1, 0x83, 0x59, 0x29, - 0x73, 0x2c, 0xc6, 0xa4, 0xed, 0xb9, 0xbc, 0xd3, 0x89, 0xaa, 0xff, 0xe3, 0xe1, 0x58, 0x9c, 0xd9, - 0xb5, 0x0f, 0xe7, 0x58, 0x9c, 0x4d, 0xb2, 0xb7, 0x58, 0xf4, 0x3f, 0x0a, 0x70, 0x29, 0xb3, 0xde, - 0xc0, 0x8e, 0xc5, 0xbd, 0x6b, 0x9f, 0x8e, 0xc5, 0x46, 0xb6, 0x21, 0x45, 0xf1, 0x0c, 0x0d, 0x29, - 0x4a, 0x83, 0x72, 0x8e, 0x43, 0x03, 0xf8, 0xfb, 0x66, 0x0e, 0xd9, 0xc7, 0xc4, 0xdf, 0x37, 0xb3, - 0x6f, 0x39, 0x62, 0xdd, 0x9f, 0x17, 0x72, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, - 0x91, 0xe0, 0x84, 0xc7, 0xf9, 0x19, 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0xf3, 0x9c, 0x2d, 0xe4, - 0x67, 0x06, 0xcc, 0x6d, 0x6a, 0xde, 0x7c, 0x99, 0x51, 0x43, 0x90, 0xe1, 0x45, 0xbb, 0xa6, 0x09, - 0xe5, 0xc5, 0xc1, 0x85, 0xf2, 0xf1, 0x6c, 0x81, 0x1c, 0x2d, 0xc2, 0xd4, 0x9e, 0xeb, 0xb3, 0x4c, - 0xef, 0x26, 0x2b, 0xaa, 0x02, 0x49, 0xac, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7b, 0x03, 0x26, 0x1e, - 0x5e, 0x1d, 0xf9, 0xed, 0x22, 0x3c, 0xd1, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, - 0xef, 0x9a, 0x87, 0x3a, 0x9c, 0xdf, 0xea, 0x78, 0xde, 0x01, 0xb3, 0x55, 0x24, 0x2d, 0x89, 0x21, - 0x78, 0x45, 0x99, 0xbf, 0xff, 0xfc, 0x6a, 0x06, 0x0e, 0xce, 0xac, 0x89, 0xde, 0x02, 0x14, 0x88, - 0xb4, 0xa4, 0xd7, 0x89, 0x2f, 0xf4, 0xdd, 0x6c, 0xe0, 0x8b, 0xc9, 0x66, 0xbc, 0xdd, 0x85, 0x81, - 0x33, 0x6a, 0x51, 0xa6, 0x9f, 0xde, 0x4a, 0x07, 0xaa, 0x5b, 0x29, 0xa6, 0x1f, 0xeb, 0x40, 0x6c, - 0xe2, 0xa2, 0xeb, 0x30, 0xe3, 0xec, 0x3b, 0x2e, 0x0f, 0xb0, 0x26, 0x09, 0x70, 0xae, 0x5f, 0x29, - 0xc1, 0x16, 0xd3, 0x08, 0xb8, 0xbb, 0x4e, 0xca, 0x75, 0x77, 0x38, 0xdf, 0x75, 0xb7, 0xf7, 0xb9, - 0xd8, 0x4f, 0xa7, 0x6b, 0xff, 0x67, 0x8b, 0x5e, 0x5f, 0x19, 0xa9, 0xc5, 0xe9, 0x38, 0x28, 0xdd, - 0xa4, 0xe6, 0x45, 0xab, 0xc6, 0x61, 0x59, 0x07, 0x62, 0x13, 0x97, 0x2f, 0x88, 0x28, 0x71, 0xe8, - 0x30, 0x58, 0x77, 0xe1, 0x26, 0xaf, 0x30, 0xd0, 0x17, 0x61, 0xa4, 0xe5, 0xee, 0xbb, 0x51, 0x10, - 0x8a, 0xcd, 0x72, 0x42, 0xb3, 0xf8, 0xe4, 0x1c, 0xac, 0x72, 0x32, 0x58, 0xd2, 0xb3, 0x7f, 0xb4, - 0x00, 0x13, 0xb2, 0xc5, 0xb7, 0x3b, 0x41, 0xec, 0x9c, 0xc1, 0xb5, 0x7c, 0xdd, 0xb8, 0x96, 0x3f, - 0xd5, 0x2b, 0x56, 0x00, 0xeb, 0x52, 0xee, 0x75, 0x7c, 0x3b, 0x75, 0x1d, 0x3f, 0xdb, 0x9f, 0x54, - 0xef, 0x6b, 0xf8, 0x5f, 0x58, 0x30, 0x63, 0xe0, 0x9f, 0xc1, 0x6d, 0xb0, 0x6a, 0xde, 0x06, 0x4f, - 0xf7, 0xfd, 0x86, 0x9c, 0x5b, 0xe0, 0xeb, 0x85, 0x54, 0xdf, 0xd9, 0xe9, 0xff, 0x1e, 0x94, 0x76, - 0x9c, 0xb0, 0xd5, 0x2b, 0x26, 0x69, 0x57, 0xa5, 0xf9, 0x1b, 0x4e, 0xd8, 0xe2, 0x67, 0xf8, 0x0b, - 0x2a, 0x99, 0xa1, 0x13, 0xb6, 0xfa, 0xfa, 0x2f, 0xb1, 0xa6, 0xd0, 0xeb, 0x30, 0x1c, 0x35, 0x83, - 0xb6, 0xb2, 0x2e, 0xbc, 0xcc, 0x13, 0x1d, 0xd2, 0x92, 0xe3, 0xc3, 0x0a, 0x32, 0x9b, 0xa3, 0xc5, - 0x58, 0xe0, 0xcf, 0x6d, 0x43, 0x59, 0x35, 0xfd, 0x48, 0x7d, 0x43, 0xfe, 0x63, 0x11, 0xce, 0x65, - 0xac, 0x0b, 0x14, 0x19, 0xa3, 0xf5, 0xd2, 0x80, 0xcb, 0xe9, 0x43, 0x8e, 0x57, 0xc4, 0x24, 0x96, - 0x96, 0x98, 0xff, 0x81, 0x1b, 0xbd, 0x13, 0x91, 0x74, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, 0xb1, - 0x33, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0x29, 0xc2, 0xf9, 0xac, 0x10, - 0x23, 0xe8, 0x07, 0x52, 0x59, 0x49, 0x5e, 0x19, 0x34, 0x38, 0x09, 0x4f, 0x55, 0x22, 0x72, 0xec, - 0xce, 0x9b, 0x79, 0x4a, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x14, 0x86, 0x3c, 0x9b, 0x8c, 0xdc, - 0xe2, 0x9f, 0x19, 0xb8, 0x03, 0x22, 0x0d, 0x4d, 0x94, 0x72, 0x28, 0x94, 0xc5, 0xfd, 0x1d, 0x0a, - 0x65, 0xcb, 0x73, 0x2e, 0x8c, 0x69, 0x5f, 0xf3, 0x48, 0x67, 0x7c, 0x97, 0xde, 0x28, 0x5a, 0xbf, - 0x1f, 0xe9, 0xac, 0xff, 0xb4, 0x05, 0x29, 0x9b, 0x3e, 0xa5, 0x92, 0xb2, 0x72, 0x55, 0x52, 0x97, - 0xa1, 0x14, 0x06, 0x1e, 0x49, 0x27, 0x0a, 0xc1, 0x81, 0x47, 0x30, 0x83, 0x50, 0x8c, 0x38, 0x51, - 0x48, 0x8c, 0xeb, 0xc2, 0x96, 0x10, 0xa3, 0x9e, 0x81, 0x21, 0x8f, 0xec, 0x13, 0x2f, 0x1d, 0x85, - 0xfb, 0x16, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x6a, 0x09, 0x9e, 0xea, 0xe9, 0x92, 0x4b, 0x45, 0x96, - 0x6d, 0x27, 0x26, 0xf7, 0x9d, 0x83, 0x74, 0xb8, 0xdc, 0xeb, 0xbc, 0x18, 0x4b, 0x38, 0xb3, 0x40, - 0xe6, 0xe1, 0xf1, 0x52, 0x0a, 0x3c, 0x11, 0x15, 0x4f, 0x40, 0x1f, 0x41, 0x7e, 0xf1, 0x6b, 0x00, - 0x51, 0xe4, 0xad, 0xf8, 0x94, 0x03, 0x6b, 0x09, 0xd3, 0xe6, 0x24, 0x8c, 0x62, 0xe3, 0x96, 0x80, - 0x60, 0x0d, 0x0b, 0x55, 0x61, 0xba, 0x1d, 0x06, 0x31, 0xd7, 0x87, 0x56, 0xb9, 0x91, 0xcc, 0x90, - 0xe9, 0x0d, 0x59, 0x4f, 0xc1, 0x71, 0x57, 0x0d, 0xf4, 0x2a, 0x8c, 0x09, 0x0f, 0xc9, 0x7a, 0x10, - 0x78, 0x42, 0x55, 0xa3, 0x4c, 0x2e, 0x1a, 0x09, 0x08, 0xeb, 0x78, 0x5a, 0x35, 0xa6, 0x64, 0x1d, - 0xc9, 0xac, 0xc6, 0x15, 0xad, 0x1a, 0x5e, 0x2a, 0xdc, 0xd0, 0xe8, 0x40, 0xe1, 0x86, 0x12, 0xe5, - 0x55, 0x79, 0xe0, 0x77, 0x25, 0xe8, 0xab, 0xee, 0xf9, 0xa5, 0x12, 0x9c, 0x13, 0x0b, 0xe7, 0x51, - 0x2f, 0x97, 0x47, 0x94, 0x05, 0xfd, 0x3b, 0x6b, 0xe6, 0xac, 0xd7, 0xcc, 0x37, 0x8a, 0x30, 0xcc, - 0xa7, 0xe2, 0x0c, 0x78, 0xf8, 0x55, 0xa1, 0xf4, 0xeb, 0x11, 0x68, 0x87, 0xf7, 0x65, 0xbe, 0xea, - 0xc4, 0x0e, 0xbf, 0xbf, 0xd4, 0x31, 0x9a, 0xa8, 0x07, 0xd1, 0xbc, 0x71, 0xd0, 0xce, 0xa5, 0xb4, - 0x5a, 0xc0, 0x69, 0x68, 0xc7, 0xee, 0x57, 0x00, 0x22, 0x96, 0x89, 0x9b, 0xd2, 0x10, 0x21, 0x9b, - 0x3e, 0xdd, 0xa3, 0xf5, 0x86, 0x42, 0xe6, 0x7d, 0x48, 0x96, 0xa0, 0x02, 0x60, 0x8d, 0xe2, 0xdc, - 0x6b, 0x50, 0x56, 0xc8, 0xfd, 0x54, 0x00, 0xe3, 0xfa, 0xad, 0xf7, 0x79, 0x98, 0x4a, 0xb5, 0x75, - 0x22, 0x0d, 0xc2, 0xaf, 0x59, 0x30, 0xc5, 0xbb, 0xbc, 0xe2, 0xef, 0x8b, 0xcd, 0xfe, 0x3e, 0x9c, - 0xf7, 0x32, 0x36, 0x9d, 0x98, 0xd1, 0xc1, 0x37, 0xa9, 0xd2, 0x18, 0x64, 0x41, 0x71, 0x66, 0x1b, - 0xe8, 0x2a, 0x8c, 0x72, 0x97, 0x1d, 0xc7, 0x13, 0x6e, 0x16, 0xe3, 0x3c, 0x4a, 0x3e, 0x2f, 0xc3, - 0x0a, 0x6a, 0xff, 0xae, 0x05, 0x33, 0xbc, 0xe7, 0x37, 0xc9, 0x81, 0x92, 0x8e, 0x3f, 0xca, 0xbe, - 0x8b, 0x24, 0x00, 0x85, 0x9c, 0x24, 0x00, 0xfa, 0xa7, 0x15, 0x7b, 0x7e, 0xda, 0x2f, 0x5a, 0x20, - 0x56, 0xe0, 0x19, 0xc8, 0x81, 0xdf, 0x63, 0xca, 0x81, 0x73, 0xf9, 0x8b, 0x3a, 0x47, 0x00, 0xfc, - 0x33, 0x0b, 0xa6, 0x39, 0x42, 0xf2, 0x10, 0xf9, 0x91, 0xce, 0xc3, 0x20, 0xd9, 0xbc, 0x54, 0xfa, - 0xde, 0xec, 0x8f, 0x32, 0x26, 0xab, 0xd4, 0x73, 0xb2, 0x5a, 0x72, 0x03, 0x9d, 0x20, 0x4b, 0xdd, - 0x89, 0x83, 0xe9, 0xda, 0x7f, 0x6c, 0x01, 0xe2, 0xcd, 0x18, 0xf7, 0x32, 0xbd, 0xed, 0x58, 0xa9, - 0xa6, 0x09, 0x4a, 0x8e, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0xa9, 0x0c, 0x4f, 0xea, 0x35, 0xb9, 0xd8, - 0xff, 0x35, 0xf9, 0x04, 0x23, 0xfa, 0x8d, 0x12, 0xa4, 0x6d, 0xb4, 0xd1, 0x5d, 0x18, 0x6f, 0x3a, - 0x6d, 0x67, 0xd3, 0xf5, 0xdc, 0xd8, 0x25, 0x51, 0x2f, 0x33, 0x94, 0x65, 0x0d, 0x4f, 0xbc, 0x13, - 0x6a, 0x25, 0xd8, 0xa0, 0x83, 0xe6, 0x01, 0xda, 0xa1, 0xbb, 0xef, 0x7a, 0x64, 0x9b, 0x89, 0xc2, - 0xcc, 0xb1, 0x8b, 0xdb, 0x56, 0xc8, 0x52, 0xac, 0x61, 0x64, 0x38, 0x02, 0x15, 0x1f, 0x9d, 0x23, - 0x50, 0xe9, 0x84, 0x8e, 0x40, 0x43, 0x03, 0x39, 0x02, 0x61, 0x78, 0x4c, 0xde, 0xdd, 0xf4, 0xff, - 0xaa, 0xeb, 0x11, 0xc1, 0xb0, 0x71, 0x77, 0xaf, 0xb9, 0xa3, 0xc3, 0xca, 0x63, 0x38, 0x13, 0x03, - 0xe7, 0xd4, 0x44, 0x5f, 0x80, 0x59, 0xc7, 0xf3, 0x82, 0xfb, 0x6a, 0xd4, 0x56, 0xa2, 0xa6, 0xe3, - 0x71, 0x75, 0xef, 0x08, 0xa3, 0xfa, 0xe4, 0xd1, 0x61, 0x65, 0x76, 0x31, 0x07, 0x07, 0xe7, 0xd6, - 0x4e, 0xf9, 0x11, 0x8d, 0xf6, 0xf5, 0x23, 0xda, 0x85, 0x73, 0x0d, 0x12, 0xba, 0x2c, 0x87, 0x5e, - 0x2b, 0xd9, 0x92, 0x1b, 0x50, 0x0e, 0x53, 0x87, 0xd0, 0x40, 0x31, 0x60, 0xb4, 0x38, 0xa1, 0xf2, - 0xd0, 0x49, 0x08, 0xd9, 0x7f, 0x6a, 0xc1, 0x88, 0xb0, 0x13, 0x3f, 0x03, 0xde, 0x67, 0xd1, 0xd0, - 0x5f, 0x56, 0xb2, 0x0f, 0x6a, 0xd6, 0x99, 0x5c, 0xcd, 0x65, 0x2d, 0xa5, 0xb9, 0x7c, 0xba, 0x17, - 0x91, 0xde, 0x3a, 0xcb, 0x9f, 0x2c, 0xc2, 0xa4, 0x69, 0x23, 0x7f, 0x06, 0x43, 0xb0, 0x0e, 0x23, - 0x91, 0x70, 0xc8, 0x28, 0xe4, 0x1b, 0xce, 0xa6, 0x27, 0x31, 0xb1, 0x8a, 0x11, 0x2e, 0x18, 0x92, - 0x48, 0xa6, 0xa7, 0x47, 0xf1, 0x11, 0x7a, 0x7a, 0xf4, 0x73, 0x53, 0x28, 0x9d, 0x86, 0x9b, 0x82, - 0xfd, 0x4d, 0x76, 0x59, 0xe8, 0xe5, 0x67, 0xc0, 0x47, 0x5c, 0x37, 0xaf, 0x15, 0xbb, 0xc7, 0xca, - 0x12, 0x9d, 0xca, 0xe1, 0x27, 0xfe, 0xb1, 0x05, 0x63, 0x02, 0xf1, 0x0c, 0xba, 0xfd, 0xbd, 0x66, - 0xb7, 0x9f, 0xe8, 0xd1, 0xed, 0x9c, 0xfe, 0xfe, 0xdd, 0x82, 0xea, 0x6f, 0x3d, 0x08, 0xe3, 0x01, - 0x6e, 0xff, 0xd7, 0x61, 0x94, 0x4a, 0x8f, 0x41, 0x33, 0xf0, 0xc4, 0xe5, 0xff, 0x64, 0xe2, 0x21, - 0xcc, 0xcb, 0x8f, 0xb5, 0xdf, 0x58, 0x61, 0x33, 0x07, 0xd6, 0x20, 0x8c, 0xc5, 0x85, 0x9b, 0x38, - 0xb0, 0x06, 0x61, 0x8c, 0x19, 0x04, 0xb5, 0x00, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xc1, - 0x06, 0xf2, 0x77, 0x61, 0x27, 0x76, 0xbd, 0x79, 0xd7, 0x8f, 0xa3, 0x38, 0x9c, 0xaf, 0xf9, 0xf1, - 0xed, 0x90, 0xcb, 0x12, 0x9a, 0xcb, 0xaf, 0xa2, 0x85, 0x35, 0xba, 0xd2, 0x87, 0x8c, 0xb5, 0x31, - 0x64, 0x3e, 0x2c, 0xae, 0x8b, 0x72, 0xac, 0x30, 0xec, 0xd7, 0xd8, 0x99, 0xcc, 0x06, 0xe8, 0x64, - 0xde, 0xb8, 0xdf, 0x1a, 0x55, 0x43, 0xcb, 0x5e, 0x15, 0xaa, 0xba, 0xcf, 0x6f, 0xef, 0x23, 0x90, - 0x36, 0xac, 0xfb, 0x23, 0x24, 0x8e, 0xc1, 0xe8, 0xfb, 0xba, 0xde, 0x9b, 0x5f, 0xec, 0x73, 0x96, - 0x9e, 0xe0, 0x85, 0x99, 0x05, 0xb8, 0x65, 0x81, 0x40, 0x6b, 0x75, 0x21, 0x5d, 0x6a, 0x01, 0x6e, - 0x05, 0x00, 0x27, 0x38, 0x68, 0x41, 0x48, 0xa2, 0x25, 0x23, 0x0f, 0x95, 0x94, 0x44, 0xe5, 0xe7, - 0x6b, 0xa2, 0xe8, 0x4b, 0x30, 0xa6, 0xf2, 0x51, 0xd5, 0x79, 0x5a, 0x1f, 0x11, 0x7a, 0x61, 0x25, - 0x29, 0xc6, 0x3a, 0x0e, 0xda, 0x80, 0xa9, 0x88, 0x67, 0xe6, 0x52, 0x11, 0xb6, 0xb8, 0x9e, 0xe1, - 0xd3, 0xf2, 0x9d, 0xba, 0x61, 0x82, 0x8f, 0x59, 0x11, 0xdf, 0xac, 0xd2, 0x11, 0x2c, 0x4d, 0x02, - 0xbd, 0x09, 0x93, 0x9e, 0x9e, 0xd5, 0xb9, 0x2e, 0xd4, 0x10, 0xca, 0x8c, 0xd3, 0xc8, 0xf9, 0x5c, - 0xc7, 0x29, 0x6c, 0xca, 0x34, 0xe8, 0x25, 0x22, 0x2a, 0x9c, 0xe3, 0x6f, 0x93, 0x48, 0x64, 0xd3, - 0x61, 0x4c, 0xc3, 0xad, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0xbd, 0x0e, 0xe3, 0xf2, 0xf3, 0x35, 0x37, - 0xc7, 0xc4, 0x58, 0x58, 0x83, 0x61, 0x03, 0x13, 0xdd, 0x87, 0x0b, 0xf2, 0xff, 0x46, 0xe8, 0x6c, - 0x6d, 0xb9, 0x4d, 0xe1, 0x65, 0xca, 0x3d, 0x26, 0x16, 0xa5, 0x0b, 0xc6, 0x4a, 0x16, 0xd2, 0xf1, - 0x61, 0xe5, 0xb2, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0xce, 0xed, 0x10, - 0xc7, 0x8b, 0x77, 0x96, 0x77, 0x48, 0x73, 0x57, 0x6e, 0x22, 0xe6, 0x3c, 0xa9, 0x99, 0xd8, 0xde, - 0xe8, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x3b, 0x30, 0xdb, 0xee, 0x6c, 0x7a, 0x6e, 0xb4, 0xb3, 0x1e, - 0xc4, 0xec, 0x69, 0x5c, 0xa5, 0x73, 0x12, 0x5e, 0x96, 0xca, 0x71, 0xb4, 0x9e, 0x83, 0x87, 0x73, - 0x29, 0xa0, 0xf7, 0xe1, 0x42, 0x6a, 0x31, 0x08, 0x9f, 0xaf, 0xc9, 0xfc, 0x18, 0x9b, 0x8d, 0xac, - 0x0a, 0xc2, 0x87, 0x2b, 0x0b, 0x84, 0xb3, 0x9b, 0xf8, 0x70, 0x06, 0x13, 0xef, 0xd1, 0xca, 0x1a, - 0x77, 0x83, 0xbe, 0x0a, 0xe3, 0xfa, 0x2a, 0x12, 0x17, 0xcc, 0x95, 0x7e, 0x19, 0xcc, 0x05, 0x6f, - 0xa4, 0x56, 0x94, 0x0e, 0xc3, 0x06, 0x45, 0x9b, 0x40, 0xf6, 0xf7, 0xa1, 0x5b, 0x30, 0xda, 0xf4, - 0x5c, 0xe2, 0xc7, 0xb5, 0x7a, 0xaf, 0x68, 0x02, 0xcb, 0x02, 0x47, 0x0c, 0x98, 0x08, 0x4a, 0xc8, - 0xcb, 0xb0, 0xa2, 0x60, 0xff, 0x66, 0x01, 0x2a, 0x7d, 0x22, 0x5c, 0xa6, 0x74, 0x86, 0xd6, 0x40, - 0x3a, 0xc3, 0x45, 0x99, 0x9c, 0x6a, 0x3d, 0x25, 0xaf, 0xa6, 0x12, 0x4f, 0x25, 0x52, 0x6b, 0x1a, - 0x7f, 0x60, 0x3b, 0x4b, 0x5d, 0xed, 0x58, 0xea, 0x6b, 0x01, 0x6c, 0x3c, 0x37, 0x0c, 0x0d, 0xce, - 0xd1, 0xe7, 0xaa, 0x8e, 0xed, 0x6f, 0x16, 0xe0, 0x82, 0x1a, 0xc2, 0xbf, 0xbc, 0x03, 0x77, 0xa7, - 0x7b, 0xe0, 0x4e, 0x41, 0xf1, 0x6e, 0xdf, 0x86, 0xe1, 0xc6, 0x41, 0xd4, 0x8c, 0xbd, 0x01, 0x18, - 0xa0, 0x67, 0xcc, 0xa8, 0x3a, 0xea, 0x9a, 0x36, 0x22, 0xeb, 0xfc, 0x35, 0x0b, 0xa6, 0x36, 0x96, - 0xeb, 0x8d, 0xa0, 0xb9, 0x4b, 0xe2, 0x45, 0xae, 0x56, 0xc2, 0x82, 0xff, 0xb1, 0x1e, 0x92, 0xaf, - 0xc9, 0xe2, 0x98, 0x2e, 0x43, 0x69, 0x27, 0x88, 0xe2, 0xf4, 0xab, 0xdc, 0x8d, 0x20, 0x8a, 0x31, - 0x83, 0xd8, 0xbf, 0x67, 0xc1, 0x10, 0x4b, 0xa9, 0xd8, 0x2f, 0xcf, 0xe7, 0x20, 0xdf, 0x85, 0x5e, - 0x85, 0x61, 0xb2, 0xb5, 0x45, 0x9a, 0xb1, 0x98, 0x55, 0xe9, 0xd6, 0x37, 0xbc, 0xc2, 0x4a, 0xe9, - 0xa5, 0xcf, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0xf7, 0xa0, 0x1c, 0xbb, 0x7b, 0x64, 0xb1, 0xd5, - 0x12, 0xef, 0x1a, 0x0f, 0xe1, 0x45, 0xb9, 0x21, 0x09, 0xe0, 0x84, 0x96, 0xfd, 0xb5, 0x02, 0x40, - 0xe2, 0x9a, 0xdc, 0xef, 0x13, 0x97, 0xba, 0x52, 0x99, 0x5e, 0xc9, 0x48, 0x65, 0x8a, 0x12, 0x82, - 0x19, 0x89, 0x4c, 0xd5, 0x30, 0x15, 0x07, 0x1a, 0xa6, 0xd2, 0x49, 0x86, 0x69, 0x19, 0x66, 0x12, - 0xd7, 0x6a, 0x33, 0xce, 0x04, 0x8b, 0x78, 0xbf, 0x91, 0x06, 0xe2, 0x6e, 0x7c, 0xfb, 0x87, 0x2d, - 0x10, 0xee, 0x09, 0x03, 0x2c, 0xe6, 0x2f, 0xc9, 0x44, 0x80, 0x46, 0xa0, 0xdc, 0xcb, 0xf9, 0xfe, - 0x1a, 0x22, 0x3c, 0xae, 0xba, 0x3c, 0x8c, 0xa0, 0xb8, 0x06, 0x2d, 0xbb, 0x05, 0x02, 0x5a, 0x25, - 0x4c, 0xc9, 0xd0, 0xbf, 0x37, 0xd7, 0x00, 0x5a, 0x0c, 0x57, 0x4b, 0x07, 0xa6, 0x8e, 0xaa, 0xaa, - 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0xf1, 0x02, 0x8c, 0xc9, 0xc0, 0xac, 0x54, 0x8e, 0xef, 0xdf, 0xca, - 0x89, 0xb2, 0x32, 0xb0, 0xfc, 0x79, 0x94, 0xb0, 0x0a, 0xde, 0xaf, 0xe7, 0xcf, 0x93, 0x00, 0x9c, - 0xe0, 0xa0, 0xe7, 0x60, 0x24, 0xea, 0x6c, 0x32, 0xf4, 0x94, 0xd1, 0x7d, 0x83, 0x17, 0x63, 0x09, - 0x47, 0x5f, 0x80, 0x69, 0x5e, 0x2f, 0x0c, 0xda, 0xce, 0x36, 0xd7, 0x38, 0x0d, 0x29, 0x2f, 0xb8, - 0xe9, 0xb5, 0x14, 0xec, 0xf8, 0xb0, 0x72, 0x3e, 0x5d, 0xc6, 0x74, 0x95, 0x5d, 0x54, 0xec, 0xaf, - 0x02, 0xea, 0x8e, 0x35, 0x8b, 0xde, 0xe2, 0x66, 0x15, 0x6e, 0xa8, 0xd2, 0x7f, 0xf7, 0x4f, 0xd8, - 0x3f, 0x2e, 0x8d, 0x27, 0x68, 0x2d, 0xac, 0xea, 0xd3, 0x9d, 0x37, 0x9d, 0x76, 0xe1, 0x41, 0x37, - 0x60, 0x98, 0x1f, 0xaa, 0xbd, 0xb2, 0x8b, 0xa7, 0xf5, 0xed, 0x3c, 0x10, 0xbe, 0x38, 0x97, 0x45, - 0x7d, 0xf4, 0x0e, 0x8c, 0xb5, 0x82, 0xfb, 0xfe, 0x7d, 0x27, 0x6c, 0x2d, 0xd6, 0x6b, 0x62, 0x5d, - 0x66, 0xf2, 0x66, 0xd5, 0x04, 0x4d, 0x77, 0x26, 0x62, 0xfa, 0xdc, 0x04, 0x84, 0x75, 0x72, 0x68, - 0x43, 0x4f, 0xbc, 0xde, 0xc3, 0x0e, 0x4e, 0x65, 0x56, 0xd7, 0x28, 0xe7, 0xa7, 0x5c, 0xff, 0xe0, - 0x1c, 0x18, 0xfb, 0xc1, 0xc8, 0xfb, 0x60, 0x9d, 0x52, 0xde, 0x07, 0x0c, 0xa3, 0x64, 0xaf, 0x1d, - 0x1f, 0x54, 0xdd, 0xb0, 0x57, 0xe2, 0xa0, 0x15, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, 0xa2, 0x93, - 0x9d, 0x9c, 0xa3, 0xf8, 0x11, 0x26, 0xe7, 0x28, 0x9d, 0x61, 0x72, 0x8e, 0x75, 0x18, 0xd9, 0x76, - 0x63, 0x4c, 0xda, 0x81, 0x60, 0x28, 0x32, 0x57, 0xc2, 0x75, 0x8e, 0xd2, 0x1d, 0x1a, 0x5e, 0x00, - 0xb0, 0x24, 0x82, 0xde, 0x52, 0x7b, 0x60, 0x38, 0x9f, 0x1f, 0xef, 0x7e, 0x0e, 0xc9, 0xdc, 0x05, - 0x22, 0x19, 0xc7, 0xc8, 0xc3, 0x26, 0xe3, 0x58, 0x95, 0x29, 0x34, 0x46, 0xf3, 0xcd, 0x46, 0x59, - 0x86, 0x8c, 0x3e, 0x89, 0x33, 0x8c, 0x64, 0x23, 0xe5, 0xd3, 0x4b, 0x36, 0xf2, 0xc3, 0x16, 0x5c, - 0x68, 0x67, 0xe5, 0xdd, 0x11, 0x29, 0x30, 0x5e, 0x1d, 0x38, 0xb1, 0x90, 0xd1, 0x20, 0x13, 0xcc, - 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0xcd, 0x96, 0xc8, 0x9b, 0xf1, 0x4c, 0x4e, 0xd6, - 0x92, 0x1e, 0xb9, 0x4a, 0x36, 0x32, 0x72, 0x65, 0x7c, 0x32, 0x2f, 0x57, 0xc6, 0xc0, 0x19, 0x32, - 0x92, 0x7c, 0x25, 0x13, 0x1f, 0x3a, 0x5f, 0xc9, 0x5b, 0x2a, 0x5f, 0x49, 0x8f, 0xa8, 0x43, 0x3c, - 0x1b, 0x49, 0xdf, 0x2c, 0x25, 0x5a, 0xa6, 0x91, 0xa9, 0xd3, 0xc9, 0x34, 0x62, 0x1c, 0xf6, 0x3c, - 0xd9, 0xc5, 0xf3, 0x7d, 0x0e, 0x7b, 0x83, 0x6e, 0xef, 0xe3, 0x9e, 0x67, 0x55, 0x99, 0x79, 0xa8, - 0xac, 0x2a, 0x77, 0xf5, 0x2c, 0x25, 0xa8, 0x4f, 0x1a, 0x0e, 0x8a, 0x34, 0x60, 0x6e, 0x92, 0xbb, - 0xfa, 0x15, 0x74, 0x2e, 0x9f, 0xae, 0xba, 0x69, 0xba, 0xe9, 0x66, 0x5d, 0x42, 0xdd, 0x39, 0x4f, - 0xce, 0x9f, 0x4d, 0xce, 0x93, 0x0b, 0xa7, 0x9e, 0xf3, 0xe4, 0xb1, 0x33, 0xc8, 0x79, 0xf2, 0xf8, - 0x47, 0x9a, 0xf3, 0x64, 0xf6, 0x11, 0xe4, 0x3c, 0x59, 0x4f, 0x72, 0x9e, 0x5c, 0xcc, 0x9f, 0x92, - 0x0c, 0x3b, 0xb9, 0x9c, 0x4c, 0x27, 0x77, 0xa1, 0xdc, 0x96, 0x5e, 0xde, 0x22, 0x2c, 0x52, 0x76, - 0xb2, 0xc5, 0x2c, 0x57, 0x70, 0x3e, 0x25, 0x0a, 0x84, 0x13, 0x52, 0x94, 0x6e, 0x92, 0xf9, 0xe4, - 0x89, 0x1e, 0xaa, 0xb7, 0x2c, 0xa5, 0x46, 0x7e, 0xbe, 0x13, 0xfb, 0xaf, 0x17, 0xe0, 0x52, 0xef, - 0x75, 0x9d, 0x68, 0x44, 0xea, 0x89, 0x06, 0x3f, 0xa5, 0x11, 0xe1, 0x62, 0x46, 0x82, 0x35, 0x70, - 0x28, 0x8c, 0xeb, 0x30, 0xa3, 0x0c, 0xe4, 0x3c, 0xb7, 0x79, 0xa0, 0x25, 0x61, 0x54, 0xce, 0x3a, - 0x8d, 0x34, 0x02, 0xee, 0xae, 0x83, 0x16, 0x61, 0xca, 0x28, 0xac, 0x55, 0x85, 0x38, 0xa1, 0x54, - 0x30, 0x0d, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0xd7, 0x2d, 0x78, 0x3c, 0x27, 0x1c, 0xf8, 0xc0, 0x91, - 0x1e, 0xb6, 0x60, 0xaa, 0x6d, 0x56, 0xed, 0x13, 0x10, 0xc6, 0x08, 0x3a, 0xae, 0xfa, 0x9a, 0x02, - 0xe0, 0x34, 0xd1, 0xa5, 0xab, 0xbf, 0xfd, 0x07, 0x97, 0x3e, 0xf1, 0x3b, 0x7f, 0x70, 0xe9, 0x13, - 0xbf, 0xfb, 0x07, 0x97, 0x3e, 0xf1, 0xff, 0x1d, 0x5d, 0xb2, 0x7e, 0xfb, 0xe8, 0x92, 0xf5, 0x3b, - 0x47, 0x97, 0xac, 0xdf, 0x3d, 0xba, 0x64, 0xfd, 0xfe, 0xd1, 0x25, 0xeb, 0x6b, 0x7f, 0x78, 0xe9, - 0x13, 0x5f, 0x2a, 0xec, 0xbf, 0xf4, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x14, 0x11, 0x2e, - 0x5f, 0xdf, 0x00, 0x00, + // 12432 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x6c, 0x24, 0x47, + 0x76, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0x79, 0xfc, 0xae, 0xdd, 0x95, 0xb8, 0x94, 0x76, 0x67, 0xd5, + 0xba, 0x5b, 0xad, 0x4e, 0x12, 0x69, 0xad, 0xa4, 0x93, 0x7c, 0xba, 0x93, 0x4d, 0x72, 0xc8, 0x5d, + 0x6a, 0x97, 0xdc, 0x51, 0x0d, 0x77, 0x75, 0x77, 0xd6, 0x9d, 0xaf, 0x39, 0x53, 0x24, 0x5b, 0x6c, + 0x76, 0x8f, 0xba, 0x7b, 0xb8, 0x4b, 0xc1, 0x06, 0x92, 0x73, 0xe2, 0xc4, 0xb1, 0x11, 0x1c, 0x62, + 0x23, 0x71, 0x6c, 0xc3, 0x01, 0x12, 0x07, 0xf6, 0xc5, 0x49, 0x10, 0xc7, 0x8e, 0xed, 0xdc, 0x39, + 0x89, 0xe3, 0xe4, 0x87, 0xf3, 0xe7, 0xe2, 0xe4, 0xcf, 0x19, 0x30, 0xc2, 0xd8, 0xb4, 0x91, 0xc0, + 0x3f, 0x12, 0x04, 0x31, 0x10, 0xc0, 0x8c, 0x11, 0x07, 0xf5, 0xd9, 0x55, 0x3d, 0xdd, 0x33, 0xc3, + 0x15, 0x97, 0x92, 0x0f, 0xf7, 0x6f, 0xa6, 0xde, 0xab, 0x57, 0xd5, 0xf5, 0xf9, 0xde, 0xab, 0xf7, + 0x01, 0xaf, 0xef, 0xbe, 0x16, 0xcd, 0xb9, 0xc1, 0xfc, 0x6e, 0x67, 0x93, 0x84, 0x3e, 0x89, 0x49, + 0x34, 0xbf, 0x4f, 0xfc, 0x56, 0x10, 0xce, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x9b, 0x41, 0x48, 0xe6, + 0xf7, 0x5f, 0x9c, 0xdf, 0x26, 0x3e, 0x09, 0x9d, 0x98, 0xb4, 0xe6, 0xda, 0x61, 0x10, 0x07, 0x08, + 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xfd, 0x17, 0x67, 0x5f, 0xd8, 0x76, 0xe3, + 0x9d, 0xce, 0xe6, 0x5c, 0x33, 0xd8, 0x9b, 0xdf, 0x0e, 0xb6, 0x83, 0x79, 0x86, 0xba, 0xd9, 0xd9, + 0x62, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x5d, 0x4b, 0x9a, 0x21, 0x0f, 0x62, 0xe2, 0x47, + 0x6e, 0xe0, 0x47, 0x2f, 0x38, 0x6d, 0x37, 0x22, 0xe1, 0x3e, 0x09, 0xe7, 0xdb, 0xbb, 0xdb, 0x14, + 0x16, 0x99, 0x08, 0xf3, 0xfb, 0x2f, 0x6e, 0x92, 0xd8, 0xe9, 0xea, 0xd1, 0xec, 0xcb, 0x09, 0xb9, + 0x3d, 0xa7, 0xb9, 0xe3, 0xfa, 0x24, 0x3c, 0x90, 0x34, 0xe6, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x49, + 0x4e, 0x54, 0x2b, 0x9a, 0xdf, 0x23, 0xb1, 0x93, 0xf1, 0xf5, 0xb3, 0xf3, 0x79, 0xb5, 0xc2, 0x8e, + 0x1f, 0xbb, 0x7b, 0xdd, 0xcd, 0x7c, 0xaa, 0x5f, 0x85, 0xa8, 0xb9, 0x43, 0xf6, 0x9c, 0xae, 0x7a, + 0x2f, 0xe5, 0xd5, 0xeb, 0xc4, 0xae, 0x37, 0xef, 0xfa, 0x71, 0x14, 0x87, 0xe9, 0x4a, 0xf6, 0xb7, + 0x2c, 0xb8, 0xb2, 0xf0, 0x76, 0x63, 0xd9, 0x73, 0xa2, 0xd8, 0x6d, 0x2e, 0x7a, 0x41, 0x73, 0xb7, + 0x11, 0x07, 0x21, 0xb9, 0x17, 0x78, 0x9d, 0x3d, 0xd2, 0x60, 0x03, 0x81, 0x9e, 0x87, 0xf2, 0x3e, + 0xfb, 0xbf, 0x5a, 0x9b, 0xb1, 0xae, 0x58, 0xd7, 0x2a, 0x8b, 0x53, 0xbf, 0x7d, 0x58, 0xfd, 0xd8, + 0xd1, 0x61, 0xb5, 0x7c, 0x4f, 0x94, 0x63, 0x85, 0x81, 0xae, 0xc2, 0xf0, 0x56, 0xb4, 0x71, 0xd0, + 0x26, 0x33, 0x05, 0x86, 0x3b, 0x21, 0x70, 0x87, 0x57, 0x1a, 0xb4, 0x14, 0x0b, 0x28, 0x9a, 0x87, + 0x4a, 0xdb, 0x09, 0x63, 0x37, 0x76, 0x03, 0x7f, 0xa6, 0x78, 0xc5, 0xba, 0x36, 0xb4, 0x38, 0x2d, + 0x50, 0x2b, 0x75, 0x09, 0xc0, 0x09, 0x0e, 0xed, 0x46, 0x48, 0x9c, 0xd6, 0x1d, 0xdf, 0x3b, 0x98, + 0x29, 0x5d, 0xb1, 0xae, 0x95, 0x93, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x53, 0x05, 0x28, + 0x2f, 0x6c, 0x6d, 0xb9, 0xbe, 0x1b, 0x1f, 0xa0, 0x7b, 0x30, 0xe6, 0x07, 0x2d, 0x22, 0xff, 0xb3, + 0xaf, 0x18, 0xbd, 0x7e, 0x65, 0xae, 0x7b, 0x65, 0xce, 0xad, 0x6b, 0x78, 0x8b, 0x53, 0x47, 0x87, + 0xd5, 0x31, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0xd1, 0x76, 0xd0, 0x52, 0x64, 0x0b, 0x8c, 0x6c, + 0x35, 0x8b, 0x6c, 0x3d, 0x41, 0x5b, 0x9c, 0x3c, 0x3a, 0xac, 0x8e, 0x6a, 0x05, 0x58, 0x27, 0x82, + 0x36, 0x61, 0x92, 0xfe, 0xf5, 0x63, 0x57, 0xd1, 0x2d, 0x32, 0xba, 0x4f, 0xe7, 0xd1, 0xd5, 0x50, + 0x17, 0xcf, 0x1d, 0x1d, 0x56, 0x27, 0x53, 0x85, 0x38, 0x4d, 0xd0, 0x7e, 0x1f, 0x26, 0x16, 0xe2, + 0xd8, 0x69, 0xee, 0x90, 0x16, 0x9f, 0x41, 0xf4, 0x32, 0x94, 0x7c, 0x67, 0x8f, 0x88, 0xf9, 0xbd, + 0x22, 0x06, 0xb6, 0xb4, 0xee, 0xec, 0x91, 0xe3, 0xc3, 0xea, 0xd4, 0x5d, 0xdf, 0x7d, 0xaf, 0x23, + 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x5d, 0x07, 0x68, 0x91, 0x7d, 0xb7, 0x49, 0xea, 0x4e, 0xbc, + 0x23, 0xe6, 0x1b, 0x89, 0xba, 0x50, 0x53, 0x10, 0xac, 0x61, 0xd9, 0x0f, 0xa0, 0xb2, 0xb0, 0x1f, + 0xb8, 0xad, 0x7a, 0xd0, 0x8a, 0xd0, 0x2e, 0x4c, 0xb6, 0x43, 0xb2, 0x45, 0x42, 0x55, 0x34, 0x63, + 0x5d, 0x29, 0x5e, 0x1b, 0xbd, 0x7e, 0x2d, 0xf3, 0x63, 0x4d, 0xd4, 0x65, 0x3f, 0x0e, 0x0f, 0x16, + 0x1f, 0x17, 0xed, 0x4d, 0xa6, 0xa0, 0x38, 0x4d, 0xd9, 0xfe, 0xf7, 0x05, 0xb8, 0xb0, 0xf0, 0x7e, + 0x27, 0x24, 0x35, 0x37, 0xda, 0x4d, 0xaf, 0xf0, 0x96, 0x1b, 0xed, 0xae, 0x27, 0x23, 0xa0, 0x96, + 0x56, 0x4d, 0x94, 0x63, 0x85, 0x81, 0x5e, 0x80, 0x11, 0xfa, 0xfb, 0x2e, 0x5e, 0x15, 0x9f, 0x7c, + 0x4e, 0x20, 0x8f, 0xd6, 0x9c, 0xd8, 0xa9, 0x71, 0x10, 0x96, 0x38, 0x68, 0x0d, 0x46, 0x9b, 0x6c, + 0x43, 0x6e, 0xaf, 0x05, 0x2d, 0xc2, 0x26, 0xb3, 0xb2, 0xf8, 0x1c, 0x45, 0x5f, 0x4a, 0x8a, 0x8f, + 0x0f, 0xab, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, 0xfd, 0x55, 0x62, + 0x94, 0x20, 0x63, 0x6f, 0x5d, 0xd3, 0xb6, 0xca, 0x10, 0xdb, 0x2a, 0x63, 0xd9, 0xdb, 0x04, 0xbd, + 0x08, 0xa5, 0x5d, 0xd7, 0x6f, 0xcd, 0x0c, 0x33, 0x5a, 0x97, 0xe8, 0x9c, 0xdf, 0x72, 0xfd, 0xd6, + 0xf1, 0x61, 0x75, 0xda, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x13, 0x0b, 0xaa, 0x0c, 0xb6, + 0xe2, 0x7a, 0xa4, 0x4e, 0xc2, 0xc8, 0x8d, 0x62, 0xe2, 0xc7, 0xc6, 0x80, 0x5e, 0x07, 0x88, 0x48, + 0x33, 0x24, 0xb1, 0x36, 0xa4, 0x6a, 0x61, 0x34, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x40, 0x88, 0x76, + 0x9c, 0x90, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x40, 0x68, 0x48, 0x00, 0x4e, 0x70, 0x8c, 0x03, 0xa1, + 0xd8, 0xef, 0x40, 0x40, 0x9f, 0x85, 0xc9, 0xa4, 0xb1, 0xa8, 0xed, 0x34, 0xe5, 0x00, 0xb2, 0x2d, + 0xd3, 0x30, 0x41, 0x38, 0x8d, 0x6b, 0xff, 0x23, 0x4b, 0x2c, 0x1e, 0xfa, 0xd5, 0x1f, 0xf1, 0x6f, + 0xb5, 0x7f, 0xdd, 0x82, 0x91, 0x45, 0xd7, 0x6f, 0xb9, 0xfe, 0x36, 0xfa, 0x32, 0x94, 0xe9, 0xdd, + 0xd4, 0x72, 0x62, 0x47, 0x9c, 0x7b, 0xdf, 0xa5, 0xed, 0x2d, 0x75, 0x55, 0xcc, 0xb5, 0x77, 0xb7, + 0x69, 0x41, 0x34, 0x47, 0xb1, 0xe9, 0x6e, 0xbb, 0xb3, 0xf9, 0x2e, 0x69, 0xc6, 0x6b, 0x24, 0x76, + 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc1, 0x70, 0xec, 0x84, 0xdb, 0x24, 0x16, 0x07, + 0x60, 0xe6, 0x41, 0xc5, 0x6b, 0x62, 0xba, 0x23, 0x89, 0xdf, 0x24, 0xc9, 0xb5, 0xb0, 0xc1, 0xaa, + 0x62, 0x41, 0xc2, 0xfe, 0x1b, 0xc3, 0x70, 0x71, 0xa9, 0xb1, 0x9a, 0xb3, 0xae, 0xae, 0xc2, 0x70, + 0x2b, 0x74, 0xf7, 0x49, 0x28, 0xc6, 0x59, 0x51, 0xa9, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x35, 0x18, + 0xe3, 0x17, 0xd2, 0x4d, 0xc7, 0x6f, 0x79, 0x72, 0x88, 0xcf, 0x0b, 0xec, 0xb1, 0x7b, 0x1a, 0x0c, + 0x1b, 0x98, 0x27, 0x5c, 0x54, 0x57, 0x53, 0x9b, 0x31, 0xef, 0xb2, 0xfb, 0x11, 0x0b, 0xa6, 0x78, + 0x33, 0x0b, 0x71, 0x1c, 0xba, 0x9b, 0x9d, 0x98, 0x44, 0x33, 0x43, 0xec, 0xa4, 0x5b, 0xca, 0x1a, + 0xad, 0xdc, 0x11, 0x98, 0xbb, 0x97, 0xa2, 0xc2, 0x0f, 0xc1, 0x19, 0xd1, 0xee, 0x54, 0x1a, 0x8c, + 0xbb, 0x9a, 0x45, 0x3f, 0x64, 0xc1, 0x6c, 0x33, 0xf0, 0xe3, 0x30, 0xf0, 0x3c, 0x12, 0xd6, 0x3b, + 0x9b, 0x9e, 0x1b, 0xed, 0xf0, 0x75, 0x8a, 0xc9, 0x16, 0x3b, 0x09, 0x72, 0xe6, 0x50, 0x21, 0x89, + 0x39, 0xbc, 0x7c, 0x74, 0x58, 0x9d, 0x5d, 0xca, 0x25, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x01, 0xd1, + 0xab, 0xb4, 0x11, 0x3b, 0xdb, 0x24, 0x69, 0x7c, 0x64, 0xf0, 0xc6, 0x1f, 0x3b, 0x3a, 0xac, 0xa2, + 0xf5, 0x2e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0xce, 0xd3, 0xd2, 0xae, 0x6f, 0x2d, 0x0f, 0xde, + 0xdc, 0xcc, 0xd1, 0x61, 0xf5, 0xfc, 0x7a, 0x06, 0x11, 0x9c, 0x49, 0x7a, 0x76, 0x09, 0x2e, 0x64, + 0x4e, 0x15, 0x9a, 0x82, 0xe2, 0x2e, 0xe1, 0x2c, 0x48, 0x05, 0xd3, 0x9f, 0xe8, 0x3c, 0x0c, 0xed, + 0x3b, 0x5e, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0xba, 0xf0, 0x9a, 0x65, 0x37, 0x61, 0x6c, 0xc9, + 0x69, 0x3b, 0x9b, 0xae, 0xe7, 0xc6, 0x2e, 0x89, 0xd0, 0x33, 0x50, 0x74, 0x5a, 0x2d, 0x76, 0x45, + 0x56, 0x16, 0x2f, 0x1c, 0x1d, 0x56, 0x8b, 0x0b, 0x2d, 0x7a, 0x56, 0x83, 0xc2, 0x3a, 0xc0, 0x14, + 0x03, 0x7d, 0x12, 0x4a, 0xad, 0x30, 0x68, 0xcf, 0x14, 0x18, 0x26, 0x1d, 0xaa, 0x52, 0x2d, 0x0c, + 0xda, 0x29, 0x54, 0x86, 0x63, 0xff, 0x66, 0x01, 0x9e, 0x5c, 0x22, 0xed, 0x9d, 0x95, 0x46, 0xce, + 0xa6, 0xbb, 0x06, 0xe5, 0xbd, 0xc0, 0x77, 0xe3, 0x20, 0x8c, 0x44, 0xd3, 0xec, 0x36, 0x59, 0x13, + 0x65, 0x58, 0x41, 0xd1, 0x15, 0x28, 0xb5, 0x13, 0x4e, 0x60, 0x4c, 0x72, 0x11, 0x8c, 0x07, 0x60, + 0x10, 0x8a, 0xd1, 0x89, 0x48, 0x28, 0x6e, 0x41, 0x85, 0x71, 0x37, 0x22, 0x21, 0x66, 0x90, 0xe4, + 0x38, 0xa5, 0x07, 0xad, 0xd8, 0x56, 0xa9, 0xe3, 0x94, 0x42, 0xb0, 0x86, 0x85, 0xea, 0x50, 0x89, + 0xd4, 0xa4, 0x0e, 0x0d, 0x3e, 0xa9, 0xe3, 0xec, 0xbc, 0x55, 0x33, 0x99, 0x10, 0x31, 0x8e, 0x81, + 0xe1, 0xbe, 0xe7, 0xed, 0x37, 0x0a, 0x80, 0xf8, 0x10, 0xfe, 0x05, 0x1b, 0xb8, 0xbb, 0xdd, 0x03, + 0x97, 0xc9, 0x79, 0xdd, 0x0e, 0x9a, 0x8e, 0x97, 0x3e, 0xc2, 0x4f, 0x6b, 0xf4, 0x7e, 0xd2, 0x02, + 0xb4, 0xe4, 0xfa, 0x2d, 0x12, 0x9e, 0x81, 0xd8, 0x71, 0xb2, 0x8b, 0xf4, 0x36, 0x4c, 0x2c, 0x79, + 0x2e, 0xf1, 0xe3, 0xd5, 0xfa, 0x52, 0xe0, 0x6f, 0xb9, 0xdb, 0xe8, 0xd3, 0x30, 0x41, 0xa5, 0xb0, + 0xa0, 0x13, 0x37, 0x48, 0x33, 0xf0, 0x19, 0xc3, 0x4a, 0x65, 0x17, 0x74, 0x74, 0x58, 0x9d, 0xd8, + 0x30, 0x20, 0x38, 0x85, 0x69, 0xff, 0x1e, 0xfd, 0xd0, 0x60, 0xaf, 0x1d, 0xf8, 0xc4, 0x8f, 0x97, + 0x02, 0xbf, 0xc5, 0x05, 0x9b, 0x4f, 0x43, 0x29, 0xa6, 0x1d, 0xe7, 0x1f, 0x79, 0x55, 0x4e, 0x2d, + 0xed, 0xee, 0xf1, 0x61, 0xf5, 0xb1, 0xee, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0xdd, 0x30, 0x1c, + 0xc5, 0x4e, 0xdc, 0x89, 0xc4, 0x67, 0x3f, 0x25, 0x3f, 0xbb, 0xc1, 0x4a, 0x8f, 0x0f, 0xab, 0x93, + 0xaa, 0x1a, 0x2f, 0xc2, 0xa2, 0x02, 0x7a, 0x16, 0x46, 0xf6, 0x48, 0x14, 0x39, 0xdb, 0x92, 0x27, + 0x9d, 0x14, 0x75, 0x47, 0xd6, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x34, 0x0c, 0x91, 0x30, 0x0c, 0x42, + 0xb1, 0xaa, 0xc6, 0x05, 0xe2, 0xd0, 0x32, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x47, 0x0b, 0x26, 0x55, + 0x5f, 0x79, 0x5b, 0x67, 0xc0, 0x7c, 0x7c, 0x01, 0xa0, 0x29, 0x3f, 0x30, 0x62, 0xe7, 0xdd, 0xe8, + 0xf5, 0xab, 0x99, 0x57, 0x6a, 0xd7, 0x30, 0x26, 0x94, 0x55, 0x51, 0x84, 0x35, 0x6a, 0xf6, 0xbf, + 0xb2, 0xe0, 0x5c, 0xea, 0x8b, 0x6e, 0xbb, 0x51, 0x8c, 0xde, 0xe9, 0xfa, 0xaa, 0xb9, 0xc1, 0xbe, + 0x8a, 0xd6, 0x66, 0xdf, 0xa4, 0xd6, 0x9c, 0x2c, 0xd1, 0xbe, 0xe8, 0x26, 0x0c, 0xb9, 0x31, 0xd9, + 0x93, 0x1f, 0xf3, 0x74, 0xcf, 0x8f, 0xe1, 0xbd, 0x4a, 0x66, 0x64, 0x95, 0xd6, 0xc4, 0x9c, 0x80, + 0xfd, 0xe3, 0x45, 0xa8, 0xf0, 0x65, 0xbb, 0xe6, 0xb4, 0xcf, 0x60, 0x2e, 0x56, 0xa1, 0xc4, 0xa8, + 0xf3, 0x8e, 0x3f, 0x93, 0xdd, 0x71, 0xd1, 0x9d, 0x39, 0x2a, 0x59, 0x70, 0xe6, 0x45, 0x1d, 0x66, + 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0x74, 0x7d, 0x27, 0x3c, 0xa0, 0x65, 0x33, 0x45, 0x46, + 0xf0, 0x85, 0xde, 0x04, 0x17, 0x15, 0x3e, 0x27, 0xab, 0xfa, 0x9a, 0x00, 0xb0, 0x46, 0x74, 0xf6, + 0x55, 0xa8, 0x28, 0xe4, 0x93, 0xdc, 0xca, 0xb3, 0x9f, 0x85, 0xc9, 0x54, 0x5b, 0xfd, 0xaa, 0x8f, + 0xe9, 0x97, 0xfa, 0xd7, 0xd9, 0x29, 0x20, 0x7a, 0xbd, 0xec, 0xef, 0x8b, 0xe3, 0xee, 0x7d, 0x38, + 0xef, 0x65, 0x9c, 0xb2, 0x62, 0xaa, 0x06, 0x3f, 0x95, 0x9f, 0x14, 0x9f, 0x7d, 0x3e, 0x0b, 0x8a, + 0x33, 0xdb, 0xa0, 0x17, 0x55, 0xd0, 0xa6, 0x6b, 0xde, 0xf1, 0x58, 0x7f, 0x85, 0xbc, 0x78, 0x47, + 0x94, 0x61, 0x05, 0xa5, 0x47, 0xd8, 0x79, 0xd5, 0xf9, 0x5b, 0xe4, 0xa0, 0x41, 0x3c, 0xd2, 0x8c, + 0x83, 0xf0, 0x43, 0xed, 0xfe, 0x25, 0x3e, 0xfa, 0xfc, 0x04, 0x1c, 0x15, 0x04, 0x8a, 0xb7, 0xc8, + 0x01, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x3d, 0xbf, 0xee, 0x97, 0x2c, 0x18, 0x57, 0x5f, 0x77, 0x06, + 0x5b, 0x7d, 0xd1, 0xdc, 0xea, 0x97, 0x7a, 0x2e, 0xf0, 0x9c, 0x4d, 0xfe, 0x8d, 0x02, 0x5c, 0x54, + 0x38, 0x94, 0x41, 0xe5, 0x7f, 0xc4, 0xaa, 0x9a, 0x87, 0x8a, 0xaf, 0xe4, 0x5d, 0xcb, 0x14, 0x34, + 0x13, 0x69, 0x37, 0xc1, 0xa1, 0x7c, 0x86, 0x9f, 0x08, 0xa5, 0x63, 0xba, 0x22, 0x48, 0x28, 0x7d, + 0x16, 0xa1, 0xd8, 0x71, 0x5b, 0xe2, 0xce, 0xf8, 0x2e, 0x39, 0xda, 0x77, 0x57, 0x6b, 0xc7, 0x87, + 0xd5, 0xa7, 0xf2, 0x94, 0x90, 0xf4, 0xb2, 0x8a, 0xe6, 0xee, 0xae, 0xd6, 0x30, 0xad, 0x8c, 0x16, + 0x60, 0x52, 0xea, 0x59, 0xef, 0x51, 0xa6, 0x33, 0xf0, 0xc5, 0xd5, 0xa2, 0xb4, 0x39, 0xd8, 0x04, + 0xe3, 0x34, 0x3e, 0xaa, 0xc1, 0xd4, 0x6e, 0x67, 0x93, 0x78, 0x24, 0xe6, 0x1f, 0x7c, 0x8b, 0x70, + 0x5d, 0x47, 0x25, 0x11, 0x86, 0x6e, 0xa5, 0xe0, 0xb8, 0xab, 0x86, 0xfd, 0xe7, 0xec, 0x88, 0x17, + 0xa3, 0x57, 0x0f, 0x03, 0xba, 0xb0, 0x28, 0xf5, 0x0f, 0x73, 0x39, 0x0f, 0xb2, 0x2a, 0x6e, 0x91, + 0x83, 0x8d, 0x80, 0xb2, 0x87, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0xa5, 0x9e, 0x6b, 0xfe, 0x57, 0x0a, + 0x70, 0x41, 0x8d, 0x80, 0xc1, 0x80, 0xfd, 0x45, 0x1f, 0x83, 0x17, 0x61, 0xb4, 0x45, 0xb6, 0x9c, + 0x8e, 0x17, 0x2b, 0xc5, 0xdb, 0x10, 0x57, 0xbe, 0xd6, 0x92, 0x62, 0xac, 0xe3, 0x9c, 0x60, 0xd8, + 0x7e, 0x6e, 0x94, 0xdd, 0xad, 0xb1, 0x43, 0xd7, 0xb8, 0xda, 0x35, 0x56, 0xee, 0xae, 0x79, 0x1a, + 0x86, 0xdc, 0x3d, 0xca, 0x6b, 0x15, 0x4c, 0x16, 0x6a, 0x95, 0x16, 0x62, 0x0e, 0x43, 0x9f, 0x80, + 0x91, 0x66, 0xb0, 0xb7, 0xe7, 0xf8, 0x2d, 0x76, 0xe5, 0x55, 0x16, 0x47, 0x29, 0x3b, 0xb6, 0xc4, + 0x8b, 0xb0, 0x84, 0xa1, 0x27, 0xa1, 0xe4, 0x84, 0xdb, 0xd1, 0x4c, 0x89, 0xe1, 0x94, 0x69, 0x4b, + 0x0b, 0xe1, 0x76, 0x84, 0x59, 0x29, 0x95, 0x03, 0xee, 0x07, 0xe1, 0xae, 0xeb, 0x6f, 0xd7, 0xdc, + 0x50, 0x6c, 0x09, 0x75, 0x17, 0xbe, 0xad, 0x20, 0x58, 0xc3, 0x42, 0x2b, 0x30, 0xd4, 0x0e, 0xc2, + 0x38, 0x9a, 0x19, 0x66, 0xc3, 0xfd, 0x54, 0xce, 0x41, 0xc4, 0xbf, 0xb6, 0x1e, 0x84, 0x71, 0xf2, + 0x01, 0xf4, 0x5f, 0x84, 0x79, 0x75, 0xf4, 0xdd, 0x50, 0x24, 0xfe, 0xfe, 0xcc, 0x08, 0xa3, 0x32, + 0x9b, 0x45, 0x65, 0xd9, 0xdf, 0xbf, 0xe7, 0x84, 0xc9, 0x29, 0xbd, 0xec, 0xef, 0x63, 0x5a, 0x07, + 0x7d, 0x1e, 0x2a, 0x72, 0x8b, 0x47, 0x42, 0x30, 0xcf, 0x5c, 0x62, 0xf2, 0x60, 0xc0, 0xe4, 0xbd, + 0x8e, 0x1b, 0x92, 0x3d, 0xe2, 0xc7, 0x51, 0x72, 0xa6, 0x49, 0x68, 0x84, 0x13, 0x6a, 0xe8, 0xf3, + 0x52, 0x1b, 0xb4, 0x16, 0x74, 0xfc, 0x38, 0x9a, 0xa9, 0xb0, 0xee, 0x65, 0xea, 0xe9, 0xef, 0x25, + 0x78, 0x69, 0x75, 0x11, 0xaf, 0x8c, 0x0d, 0x52, 0x08, 0xc3, 0xb8, 0xe7, 0xee, 0x13, 0x9f, 0x44, + 0x51, 0x3d, 0x0c, 0x36, 0xc9, 0x0c, 0xb0, 0x9e, 0x5f, 0xcc, 0x56, 0x5f, 0x07, 0x9b, 0x64, 0x71, + 0xfa, 0xe8, 0xb0, 0x3a, 0x7e, 0x5b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x2e, 0x4c, 0x50, 0x01, 0xc4, + 0x4d, 0x88, 0x8e, 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x29, 0x22, 0xe8, 0x4d, 0xa8, + 0x78, 0xee, 0x16, 0x69, 0x1e, 0x34, 0x3d, 0x32, 0x33, 0xc6, 0x28, 0x66, 0x6e, 0xab, 0xdb, 0x12, + 0x89, 0x0b, 0x78, 0xea, 0x2f, 0x4e, 0xaa, 0xa3, 0x7b, 0xf0, 0x58, 0x4c, 0xc2, 0x3d, 0xd7, 0x77, + 0xe8, 0x76, 0x10, 0xf2, 0x02, 0x7b, 0x04, 0x18, 0x67, 0xeb, 0xed, 0xb2, 0x18, 0xba, 0xc7, 0x36, + 0x32, 0xb1, 0x70, 0x4e, 0x6d, 0x74, 0x07, 0x26, 0xd9, 0x4e, 0xa8, 0x77, 0x3c, 0xaf, 0x1e, 0x78, + 0x6e, 0xf3, 0x60, 0x66, 0x82, 0x11, 0xfc, 0x84, 0xbc, 0x17, 0x56, 0x4d, 0xf0, 0xf1, 0x61, 0x15, + 0x92, 0x7f, 0x38, 0x5d, 0x1b, 0x6d, 0x32, 0xad, 0x6f, 0x27, 0x74, 0xe3, 0x03, 0xba, 0x7e, 0xc9, + 0x83, 0x78, 0x66, 0xb2, 0xa7, 0x7e, 0x40, 0x47, 0x55, 0xaa, 0x61, 0xbd, 0x10, 0xa7, 0x09, 0xd2, + 0xad, 0x1d, 0xc5, 0x2d, 0xd7, 0x9f, 0x99, 0x62, 0x27, 0x86, 0xda, 0x19, 0x0d, 0x5a, 0x88, 0x39, + 0x8c, 0x69, 0x7c, 0xe9, 0x8f, 0x3b, 0xf4, 0x04, 0x9d, 0x66, 0x88, 0x89, 0xc6, 0x57, 0x02, 0x70, + 0x82, 0x43, 0x99, 0x9a, 0x38, 0x3e, 0x98, 0x41, 0x0c, 0x55, 0x6d, 0x97, 0x8d, 0x8d, 0xcf, 0x63, + 0x5a, 0x8e, 0x6e, 0xc3, 0x08, 0xf1, 0xf7, 0x57, 0xc2, 0x60, 0x6f, 0xe6, 0x5c, 0xfe, 0x9e, 0x5d, + 0xe6, 0x28, 0xfc, 0x40, 0x4f, 0x04, 0x3c, 0x51, 0x8c, 0x25, 0x09, 0xf4, 0x00, 0x66, 0x32, 0x66, + 0x84, 0x4f, 0xc0, 0x79, 0x36, 0x01, 0x9f, 0x11, 0x75, 0x67, 0x36, 0x72, 0xf0, 0x8e, 0x7b, 0xc0, + 0x70, 0x2e, 0x75, 0xf4, 0x45, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0x17, 0x45, 0x33, 0x17, 0xd8, 0xd7, + 0x5c, 0xc9, 0xdf, 0x9c, 0x1c, 0x71, 0xf1, 0x82, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x61, 0x93, 0x9a, + 0xbd, 0x09, 0x13, 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x55, 0x61, 0x88, 0x71, 0x3b, 0x42, 0x23, 0x53, + 0xa1, 0x33, 0xc5, 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0x83, 0x98, 0x70, + 0xa9, 0xba, 0xa8, 0xcd, 0x94, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x4c, 0x0e, 0xc7, + 0x01, 0xae, 0x83, 0xe7, 0xa1, 0xbc, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x25, 0x7c, 0xe2, + 0x4d, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x1d, 0xc6, 0x9b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, + 0xb4, 0x8e, 0x4d, 0x5c, 0xf4, 0x1a, 0x94, 0xd9, 0x63, 0x6f, 0x33, 0xf0, 0x04, 0x93, 0x25, 0x2f, + 0xe4, 0x72, 0x5d, 0x94, 0x1f, 0x6b, 0xbf, 0xb1, 0xc2, 0x46, 0x57, 0x61, 0x98, 0x76, 0x61, 0xb5, + 0x2e, 0x6e, 0x11, 0xa5, 0x53, 0xb9, 0xc9, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0xab, 0xa0, 0x8d, 0x32, + 0x95, 0x48, 0x09, 0xaa, 0xc3, 0xc8, 0x7d, 0xc7, 0x8d, 0x5d, 0x7f, 0x5b, 0xb0, 0x0b, 0xcf, 0xf6, + 0xbc, 0x52, 0x58, 0xa5, 0xb7, 0x79, 0x05, 0x7e, 0xe9, 0x89, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0xc3, + 0x8e, 0xef, 0x53, 0x8a, 0x85, 0x41, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, + 0xde, 0x01, 0x90, 0xcb, 0x92, 0xb4, 0xc4, 0x23, 0xeb, 0xf3, 0xfd, 0x89, 0x6e, 0xa8, 0x3a, 0x8b, + 0x13, 0xf4, 0x4a, 0x4d, 0xfe, 0x63, 0x8d, 0x9e, 0x1d, 0x33, 0xb6, 0xaa, 0xbb, 0x33, 0xe8, 0xfb, + 0xe8, 0x49, 0xe0, 0x84, 0x31, 0x69, 0x2d, 0xc4, 0x62, 0x70, 0x3e, 0x39, 0x98, 0x4c, 0xb1, 0xe1, + 0xee, 0x11, 0xfd, 0xd4, 0x10, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0xb5, 0x22, 0xcc, 0xe4, 0x75, 0x97, + 0x2e, 0x3a, 0xf2, 0xc0, 0x8d, 0x97, 0x28, 0x37, 0x64, 0x99, 0x8b, 0x6e, 0x59, 0x94, 0x63, 0x85, + 0x41, 0x67, 0x3f, 0x72, 0xb7, 0xa5, 0x48, 0x38, 0x94, 0xcc, 0x7e, 0x83, 0x95, 0x62, 0x01, 0xa5, + 0x78, 0x21, 0x71, 0x22, 0xf1, 0x8a, 0xaf, 0xad, 0x12, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0xfa, 0xa6, + 0x52, 0x1f, 0x7d, 0x93, 0x31, 0x44, 0x43, 0xa7, 0x3b, 0x44, 0xe8, 0x4b, 0x00, 0x5b, 0xae, 0xef, + 0x46, 0x3b, 0x8c, 0xfa, 0xf0, 0x89, 0xa9, 0x2b, 0x5e, 0x6a, 0x45, 0x51, 0xc1, 0x1a, 0x45, 0xf4, + 0x0a, 0x8c, 0xaa, 0x0d, 0xb8, 0x5a, 0x63, 0x4f, 0x1a, 0xda, 0x13, 0x71, 0x72, 0x1a, 0xd5, 0xb0, + 0x8e, 0x67, 0xbf, 0x9b, 0x5e, 0x2f, 0x62, 0x07, 0x68, 0xe3, 0x6b, 0x0d, 0x3a, 0xbe, 0x85, 0xde, + 0xe3, 0x6b, 0xff, 0x56, 0x11, 0x26, 0x8d, 0xc6, 0x3a, 0xd1, 0x00, 0x67, 0xd6, 0x0d, 0x7a, 0xcf, + 0x39, 0x31, 0x11, 0xfb, 0xcf, 0xee, 0xbf, 0x55, 0xf4, 0xbb, 0x90, 0xee, 0x00, 0x5e, 0x1f, 0x7d, + 0x09, 0x2a, 0x9e, 0x13, 0x31, 0xdd, 0x15, 0x11, 0xfb, 0x6e, 0x10, 0x62, 0x89, 0x1c, 0xe1, 0x44, + 0xb1, 0x76, 0xd5, 0x70, 0xda, 0x09, 0x49, 0x7a, 0x21, 0x53, 0xde, 0x47, 0x9a, 0x89, 0xa8, 0x4e, + 0x50, 0x06, 0xe9, 0x00, 0x73, 0x18, 0x7a, 0x0d, 0xc6, 0x42, 0xc2, 0x56, 0xc5, 0x12, 0x65, 0xe5, + 0xd8, 0x32, 0x1b, 0x4a, 0x78, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xb0, 0xf2, 0xc3, 0x3d, 0x58, + 0xf9, 0x67, 0x61, 0x84, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x2a, 0x2f, 0xc6, 0x12, 0x9e, 0x5e, + 0x30, 0xe5, 0x01, 0x17, 0xcc, 0x27, 0x61, 0xa2, 0xe6, 0x90, 0xbd, 0xc0, 0x5f, 0xf6, 0x5b, 0xed, + 0xc0, 0xf5, 0x63, 0x34, 0x03, 0x25, 0x76, 0x3b, 0xf0, 0xbd, 0x5d, 0xa2, 0x14, 0x70, 0x89, 0x32, + 0xe6, 0xf6, 0x36, 0x5c, 0xa8, 0x05, 0xf7, 0xfd, 0xfb, 0x4e, 0xd8, 0x5a, 0xa8, 0xaf, 0x6a, 0x72, + 0xee, 0xba, 0x94, 0xb3, 0xb8, 0xd9, 0x45, 0xe6, 0x99, 0xaa, 0xd5, 0xe4, 0x77, 0xed, 0x8a, 0xeb, + 0x91, 0x1c, 0x6d, 0xc4, 0xdf, 0x29, 0x18, 0x2d, 0x25, 0xf8, 0xea, 0x89, 0xc3, 0xca, 0x7d, 0xe2, + 0x78, 0x0b, 0xca, 0x5b, 0x2e, 0xf1, 0x5a, 0x98, 0x6c, 0x89, 0x25, 0xf6, 0x4c, 0xfe, 0x4b, 0xf2, + 0x0a, 0xc5, 0x94, 0xda, 0x27, 0x2e, 0xa5, 0xad, 0x88, 0xca, 0x58, 0x91, 0x41, 0xbb, 0x30, 0x25, + 0xc5, 0x00, 0x09, 0x15, 0x0b, 0xee, 0xd9, 0x5e, 0xb2, 0x85, 0x49, 0xfc, 0xfc, 0xd1, 0x61, 0x75, + 0x0a, 0xa7, 0xc8, 0xe0, 0x2e, 0xc2, 0x54, 0x2c, 0xdb, 0xa3, 0x47, 0x6b, 0x89, 0x0d, 0x3f, 0x13, + 0xcb, 0x98, 0x84, 0xc9, 0x4a, 0xed, 0x9f, 0xb1, 0xe0, 0xf1, 0xae, 0x91, 0x11, 0x92, 0xf6, 0x29, + 0xcf, 0x42, 0x5a, 0xf2, 0x2d, 0xf4, 0x97, 0x7c, 0xed, 0x7f, 0x6c, 0xc1, 0xf9, 0xe5, 0xbd, 0x76, + 0x7c, 0x50, 0x73, 0xcd, 0x67, 0x98, 0x57, 0x61, 0x78, 0x8f, 0xb4, 0xdc, 0xce, 0x9e, 0x98, 0xb9, + 0xaa, 0x3c, 0x7e, 0xd6, 0x58, 0xe9, 0xf1, 0x61, 0x75, 0xbc, 0x11, 0x07, 0xa1, 0xb3, 0x4d, 0x78, + 0x01, 0x16, 0xe8, 0xec, 0x10, 0x77, 0xdf, 0x27, 0xb7, 0xdd, 0x3d, 0x57, 0x5a, 0x06, 0xf4, 0xd4, + 0x9d, 0xcd, 0xc9, 0x01, 0x9d, 0x7b, 0xab, 0xe3, 0xf8, 0xb1, 0x1b, 0x1f, 0x88, 0x17, 0x26, 0x49, + 0x04, 0x27, 0xf4, 0xec, 0x6f, 0x59, 0x30, 0x29, 0xd7, 0xfd, 0x42, 0xab, 0x15, 0x92, 0x28, 0x42, + 0xb3, 0x50, 0x70, 0xdb, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0x58, 0xad, 0xe3, 0x82, 0xdb, 0x46, 0x75, + 0xa8, 0x70, 0x03, 0x83, 0x64, 0x71, 0x0d, 0x64, 0xa6, 0xc0, 0x7a, 0xb0, 0x21, 0x6b, 0xe2, 0x84, + 0x88, 0xe4, 0xe0, 0xd8, 0x99, 0x59, 0x34, 0x9f, 0xa7, 0x6e, 0x8a, 0x72, 0xac, 0x30, 0xd0, 0x35, + 0x28, 0xfb, 0x41, 0x8b, 0xdb, 0x7b, 0xf0, 0xdb, 0x8f, 0x2d, 0xd9, 0x75, 0x51, 0x86, 0x15, 0xd4, + 0xfe, 0x31, 0x0b, 0xc6, 0xe4, 0x97, 0x0d, 0xc8, 0x4c, 0xd2, 0xad, 0x95, 0x30, 0x92, 0xc9, 0xd6, + 0xa2, 0xcc, 0x20, 0x83, 0x18, 0x3c, 0x60, 0xf1, 0x24, 0x3c, 0xa0, 0xfd, 0xd3, 0x05, 0x98, 0x90, + 0xdd, 0x69, 0x74, 0x36, 0x23, 0x12, 0xa3, 0x0d, 0xa8, 0x38, 0x7c, 0xc8, 0x89, 0x5c, 0xb1, 0x4f, + 0x67, 0x0b, 0x1f, 0xc6, 0xfc, 0x24, 0xd7, 0xf2, 0x82, 0xac, 0x8d, 0x13, 0x42, 0xc8, 0x83, 0x69, + 0x3f, 0x88, 0xd9, 0x11, 0xad, 0xe0, 0xbd, 0x9e, 0x40, 0xd2, 0xd4, 0x2f, 0x0a, 0xea, 0xd3, 0xeb, + 0x69, 0x2a, 0xb8, 0x9b, 0x30, 0x5a, 0x96, 0x0a, 0x8f, 0x62, 0xbe, 0xb8, 0xa1, 0xcf, 0x42, 0xb6, + 0xbe, 0xc3, 0xfe, 0x0d, 0x0b, 0x2a, 0x12, 0xed, 0x2c, 0x5e, 0xbb, 0xd6, 0x60, 0x24, 0x62, 0x93, + 0x20, 0x87, 0xc6, 0xee, 0xd5, 0x71, 0x3e, 0x5f, 0xc9, 0xcd, 0xc3, 0xff, 0x47, 0x58, 0xd2, 0x60, + 0xfa, 0x6e, 0xd5, 0xfd, 0x8f, 0x88, 0xbe, 0x5b, 0xf5, 0x27, 0xe7, 0x86, 0xf9, 0xef, 0xac, 0xcf, + 0x9a, 0x58, 0x4b, 0x19, 0xa4, 0x76, 0x48, 0xb6, 0xdc, 0x07, 0x69, 0x06, 0xa9, 0xce, 0x4a, 0xb1, + 0x80, 0xa2, 0x77, 0x60, 0xac, 0x29, 0x15, 0x9d, 0xc9, 0x31, 0x70, 0xb5, 0xa7, 0xd2, 0x5d, 0xbd, + 0xcf, 0x70, 0x5b, 0xd0, 0x25, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x36, 0x08, 0xc5, 0x7e, 0x36, 0x08, + 0x09, 0xdd, 0xdc, 0x57, 0x74, 0xfb, 0x67, 0x2d, 0x18, 0xe6, 0xea, 0xb2, 0xc1, 0xf4, 0x8b, 0xda, + 0x73, 0x55, 0x32, 0x76, 0xf7, 0x68, 0xa1, 0x78, 0x7e, 0x42, 0x6b, 0x50, 0x61, 0x3f, 0x98, 0xda, + 0xa0, 0x98, 0x6f, 0x04, 0xcb, 0x5b, 0xd5, 0x3b, 0x78, 0x4f, 0x56, 0xc3, 0x09, 0x05, 0xfb, 0x27, + 0x8a, 0xf4, 0xa8, 0x4a, 0x50, 0x8d, 0x1b, 0xdc, 0x7a, 0x74, 0x37, 0x78, 0xe1, 0x51, 0xdd, 0xe0, + 0xdb, 0x30, 0xd9, 0xd4, 0x1e, 0xb7, 0x92, 0x99, 0xbc, 0xd6, 0x73, 0x91, 0x68, 0xef, 0x60, 0x5c, + 0x65, 0xb4, 0x64, 0x12, 0xc1, 0x69, 0xaa, 0xe8, 0xfb, 0x60, 0x8c, 0xcf, 0xb3, 0x68, 0xa5, 0xc4, + 0x5a, 0xf9, 0x44, 0xfe, 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xa1, 0x55, 0xc7, 0x06, 0x31, 0xfb, + 0xd7, 0xca, 0x30, 0xb4, 0xbc, 0x4f, 0xfc, 0xf8, 0x0c, 0x0e, 0xa4, 0x26, 0x4c, 0xb8, 0xfe, 0x7e, + 0xe0, 0xed, 0x93, 0x16, 0x87, 0x9f, 0xe4, 0x72, 0x7d, 0x4c, 0x90, 0x9e, 0x58, 0x35, 0x48, 0xe0, + 0x14, 0xc9, 0x47, 0x21, 0x61, 0xde, 0x80, 0x61, 0x3e, 0xf7, 0x42, 0xbc, 0xcc, 0x54, 0x06, 0xb3, + 0x41, 0x14, 0xbb, 0x20, 0x91, 0x7e, 0xb9, 0xf6, 0x59, 0x54, 0x47, 0xef, 0xc2, 0xc4, 0x96, 0x1b, + 0x46, 0x31, 0x15, 0x0d, 0xa3, 0xd8, 0xd9, 0x6b, 0x3f, 0x84, 0x44, 0xa9, 0xc6, 0x61, 0xc5, 0xa0, + 0x84, 0x53, 0x94, 0xd1, 0x36, 0x8c, 0x53, 0x21, 0x27, 0x69, 0x6a, 0xe4, 0xc4, 0x4d, 0x29, 0x95, + 0xd1, 0x6d, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xc3, 0xa4, 0xc9, 0x84, 0xa2, 0x32, 0xe3, 0x28, 0xd4, + 0x61, 0xc2, 0xa5, 0x21, 0x0e, 0xa3, 0x67, 0x12, 0x33, 0x5b, 0xa9, 0x98, 0x67, 0x92, 0x66, 0x9c, + 0xf2, 0x65, 0xa8, 0x10, 0x3a, 0x84, 0x94, 0xb0, 0x50, 0x8c, 0xcf, 0x0f, 0xd6, 0xd7, 0x35, 0xb7, + 0x19, 0x06, 0xa6, 0x2c, 0xbf, 0x2c, 0x29, 0xe1, 0x84, 0x28, 0x5a, 0x82, 0xe1, 0x88, 0x84, 0x2e, + 0x89, 0x84, 0x8a, 0xbc, 0xc7, 0x34, 0x32, 0x34, 0x6e, 0x2d, 0xcd, 0x7f, 0x63, 0x51, 0x95, 0x2e, + 0x2f, 0x87, 0x49, 0x43, 0x4c, 0x2b, 0xae, 0x2d, 0xaf, 0x05, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x09, + 0x23, 0x21, 0xf1, 0x98, 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, + 0xd0, 0x2d, 0x40, 0x21, 0xa1, 0x3c, 0x84, 0xeb, 0x6f, 0x2b, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x42, + 0xb4, 0x7f, 0x0e, 0x27, 0x18, 0xd2, 0x8e, 0x12, 0x67, 0x54, 0x43, 0x37, 0x60, 0x5a, 0x95, 0xae, + 0xfa, 0x51, 0xec, 0xf8, 0x4d, 0xc2, 0xd4, 0xdc, 0x95, 0x84, 0x2b, 0xc2, 0x69, 0x04, 0xdc, 0x5d, + 0xc7, 0xfe, 0x1a, 0x65, 0x67, 0xe8, 0x68, 0x9d, 0x01, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xcc, + 0x9d, 0xb9, 0x1c, 0x3e, 0xe0, 0xc8, 0x82, 0x51, 0x6d, 0x66, 0x93, 0x35, 0x6b, 0xf5, 0x58, 0xb3, + 0x1d, 0x98, 0xa2, 0x2b, 0xfd, 0xce, 0x26, 0x73, 0x1c, 0x6a, 0xb1, 0x85, 0x59, 0x78, 0xb8, 0x85, + 0xa9, 0x5e, 0x99, 0x6f, 0xa7, 0x08, 0xe2, 0xae, 0x26, 0xd0, 0xab, 0x52, 0x73, 0x52, 0x34, 0x8c, + 0xb4, 0xb8, 0x56, 0xe4, 0xf8, 0xb0, 0x3a, 0xa5, 0x7d, 0x88, 0xae, 0x29, 0xb1, 0xbf, 0x2c, 0xbf, + 0x51, 0xbd, 0xe6, 0x37, 0xd5, 0x62, 0x49, 0xbd, 0xe6, 0xab, 0xe5, 0x80, 0x13, 0x1c, 0xba, 0x47, + 0xa9, 0x08, 0x92, 0x7e, 0xcd, 0xa7, 0x02, 0x0a, 0x66, 0x10, 0xfb, 0x25, 0x80, 0xe5, 0x07, 0xa4, + 0xc9, 0x97, 0xba, 0xfe, 0x00, 0x69, 0xe5, 0x3f, 0x40, 0xda, 0xff, 0xd9, 0x82, 0x89, 0x95, 0x25, + 0x43, 0x4c, 0x9c, 0x03, 0xe0, 0xb2, 0xd1, 0xdb, 0x6f, 0xaf, 0x4b, 0xdd, 0x3a, 0x57, 0x8f, 0xaa, + 0x52, 0xac, 0x61, 0xa0, 0x8b, 0x50, 0xf4, 0x3a, 0xbe, 0x10, 0x59, 0x46, 0x8e, 0x0e, 0xab, 0xc5, + 0xdb, 0x1d, 0x1f, 0xd3, 0x32, 0xcd, 0x94, 0xaf, 0x38, 0xb0, 0x29, 0x5f, 0x5f, 0x87, 0x20, 0x54, + 0x85, 0xa1, 0xfb, 0xf7, 0xdd, 0x16, 0x37, 0xbb, 0x16, 0x7a, 0xff, 0xb7, 0xdf, 0x5e, 0xad, 0x45, + 0x98, 0x97, 0xdb, 0x5f, 0x2d, 0xc2, 0xec, 0x8a, 0x47, 0x1e, 0x7c, 0x40, 0xd3, 0xf3, 0x41, 0x0d, + 0x11, 0x4f, 0xc6, 0x2f, 0x9e, 0xd4, 0xea, 0xb2, 0xff, 0x78, 0x6c, 0xc1, 0x08, 0x7f, 0xcc, 0x96, + 0x86, 0xe8, 0xaf, 0x67, 0xb5, 0x9e, 0x3f, 0x20, 0x73, 0xfc, 0x51, 0x5c, 0x18, 0xa0, 0xab, 0x9b, + 0x56, 0x94, 0x62, 0x49, 0x7c, 0xf6, 0xd3, 0x30, 0xa6, 0x63, 0x9e, 0xc8, 0xfe, 0xf9, 0x2f, 0x17, + 0x61, 0x8a, 0xf6, 0xe0, 0x91, 0x4e, 0xc4, 0xdd, 0xee, 0x89, 0x38, 0x6d, 0x1b, 0xd8, 0xfe, 0xb3, + 0xf1, 0x4e, 0x7a, 0x36, 0x5e, 0xcc, 0x9b, 0x8d, 0xb3, 0x9e, 0x83, 0x1f, 0xb2, 0xe0, 0xdc, 0x8a, + 0x17, 0x34, 0x77, 0x53, 0xe6, 0xb9, 0xaf, 0xc0, 0x28, 0x3d, 0xc7, 0x23, 0xc3, 0xef, 0xc5, 0xf0, + 0x84, 0x12, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0xee, 0xde, 0x5d, 0xad, 0x65, 0x39, 0x50, 0x09, 0x10, + 0xd6, 0xf1, 0xec, 0x6f, 0x5a, 0x70, 0xe9, 0xc6, 0xd2, 0x72, 0xb2, 0x14, 0xbb, 0x7c, 0xb8, 0xa8, + 0x14, 0xd8, 0xd2, 0xba, 0x92, 0x48, 0x81, 0x35, 0xd6, 0x0b, 0x01, 0xfd, 0xa8, 0xf8, 0x27, 0xfe, + 0x82, 0x05, 0xe7, 0x6e, 0xb8, 0x31, 0xbd, 0x96, 0xd3, 0xde, 0x44, 0xf4, 0x5e, 0x8e, 0xdc, 0x38, + 0x08, 0x0f, 0xd2, 0xde, 0x44, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0x77, 0x99, 0x19, 0x55, + 0xc1, 0x54, 0x45, 0x61, 0x51, 0x8e, 0x15, 0x06, 0xfd, 0xb0, 0x96, 0x1b, 0x32, 0x51, 0xe2, 0x40, + 0x9c, 0xb0, 0xea, 0xc3, 0x6a, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0x67, 0x2c, 0xb8, 0x70, 0xc3, 0xeb, + 0x44, 0x31, 0x09, 0xb7, 0x22, 0xa3, 0xb3, 0x2f, 0x41, 0x85, 0x48, 0x71, 0x5d, 0xf4, 0x55, 0x31, + 0x98, 0x4a, 0x8e, 0xe7, 0xae, 0x4c, 0x0a, 0x6f, 0x00, 0x5b, 0xf7, 0x93, 0xd9, 0x68, 0xff, 0x72, + 0x01, 0xc6, 0x6f, 0x6e, 0x6c, 0xd4, 0x6f, 0x90, 0x58, 0xdc, 0x62, 0xfd, 0x55, 0xcd, 0x58, 0xd3, + 0x98, 0xf5, 0x12, 0x8a, 0x3a, 0xb1, 0xeb, 0xcd, 0x71, 0xdf, 0xd9, 0xb9, 0x55, 0x3f, 0xbe, 0x13, + 0x36, 0xe2, 0xd0, 0xf5, 0xb7, 0x33, 0x75, 0x6c, 0xf2, 0xae, 0x2d, 0xe6, 0xdd, 0xb5, 0xe8, 0x25, + 0x18, 0x66, 0xce, 0xbb, 0x52, 0x3c, 0x79, 0x42, 0xc9, 0x14, 0xac, 0xf4, 0xf8, 0xb0, 0x5a, 0xb9, + 0x8b, 0x57, 0xf9, 0x1f, 0x2c, 0x50, 0xd1, 0x5d, 0x18, 0xdd, 0x89, 0xe3, 0xf6, 0x4d, 0xe2, 0xb4, + 0x48, 0x28, 0x4f, 0x87, 0xcb, 0x59, 0xa7, 0x03, 0x1d, 0x04, 0x8e, 0x96, 0x6c, 0xa8, 0xa4, 0x2c, + 0xc2, 0x3a, 0x1d, 0xbb, 0x01, 0x90, 0xc0, 0x4e, 0x49, 0xbf, 0x60, 0xff, 0xa1, 0x05, 0x23, 0xdc, + 0x8f, 0x2a, 0x44, 0x9f, 0x81, 0x12, 0x79, 0x40, 0x9a, 0x82, 0x73, 0xcc, 0xec, 0x70, 0xc2, 0x78, + 0x70, 0x6d, 0x39, 0xfd, 0x8f, 0x59, 0x2d, 0x74, 0x13, 0x46, 0x68, 0x6f, 0x6f, 0x28, 0xa7, 0xb2, + 0xa7, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0xe7, 0x55, 0x44, 0x11, 0x96, 0xd5, 0x99, 0xe6, 0xb7, 0xd9, + 0x6e, 0xd0, 0x03, 0x2c, 0xee, 0x75, 0xcf, 0x6e, 0x2c, 0xd5, 0x39, 0x92, 0xa0, 0xc6, 0x35, 0xbf, + 0xb2, 0x10, 0x27, 0x44, 0xec, 0x0d, 0xa8, 0xd0, 0x49, 0x5d, 0xf0, 0x5c, 0xa7, 0xb7, 0xd2, 0xf9, + 0x39, 0xa8, 0x48, 0x05, 0x70, 0x24, 0x5c, 0x71, 0x18, 0x55, 0xa9, 0x1f, 0x8e, 0x70, 0x02, 0xb7, + 0xb7, 0xe0, 0x3c, 0x7b, 0xf9, 0x77, 0xe2, 0x1d, 0x63, 0x8f, 0xf5, 0x5f, 0xcc, 0xcf, 0x0b, 0x41, + 0x8c, 0xcf, 0xcc, 0x8c, 0xe6, 0x3b, 0x30, 0x26, 0x29, 0x26, 0x42, 0x99, 0xfd, 0xc7, 0x25, 0x78, + 0x62, 0xb5, 0x91, 0xef, 0x62, 0xf7, 0x1a, 0x8c, 0x71, 0x36, 0x8d, 0x2e, 0x6d, 0xc7, 0x13, 0xed, + 0xaa, 0x77, 0xb1, 0x0d, 0x0d, 0x86, 0x0d, 0x4c, 0x74, 0x09, 0x8a, 0xee, 0x7b, 0x7e, 0xda, 0x0c, + 0x77, 0xf5, 0xad, 0x75, 0x4c, 0xcb, 0x29, 0x98, 0x72, 0x7c, 0xfc, 0x28, 0x55, 0x60, 0xc5, 0xf5, + 0xbd, 0x01, 0x13, 0x6e, 0xd4, 0x8c, 0xdc, 0x55, 0x9f, 0x9e, 0x33, 0x89, 0x7b, 0x66, 0xa2, 0x24, + 0xa0, 0x9d, 0x56, 0x50, 0x9c, 0xc2, 0xd6, 0xce, 0xf5, 0xa1, 0x81, 0xb9, 0xc6, 0xbe, 0xbe, 0x29, + 0x94, 0x21, 0x6e, 0xb3, 0xaf, 0x8b, 0x98, 0x51, 0x9b, 0x60, 0x88, 0xf9, 0x07, 0x47, 0x58, 0xc2, + 0xa8, 0x04, 0xd6, 0xdc, 0x71, 0xda, 0x0b, 0x9d, 0x78, 0xa7, 0xe6, 0x46, 0xcd, 0x60, 0x9f, 0x84, + 0x07, 0x4c, 0x78, 0x2e, 0x27, 0x12, 0x98, 0x02, 0x2c, 0xdd, 0x5c, 0xa8, 0x53, 0x4c, 0xdc, 0x5d, + 0xc7, 0xe4, 0x0a, 0xe1, 0x34, 0xb8, 0xc2, 0x05, 0x98, 0x94, 0xcd, 0x34, 0x48, 0xc4, 0xee, 0x88, + 0x51, 0xd6, 0x31, 0x65, 0x6a, 0x2b, 0x8a, 0x55, 0xb7, 0xd2, 0xf8, 0xe8, 0x55, 0x18, 0x77, 0x7d, + 0x37, 0x76, 0x9d, 0x38, 0x08, 0xd9, 0x0d, 0xcb, 0xe5, 0x64, 0x66, 0xc9, 0xb6, 0xaa, 0x03, 0xb0, + 0x89, 0x67, 0xff, 0x51, 0x09, 0xa6, 0xd9, 0xb4, 0x7d, 0x67, 0x85, 0x7d, 0x64, 0x56, 0xd8, 0xdd, + 0xee, 0x15, 0x76, 0x1a, 0xec, 0xee, 0x87, 0xb9, 0xcc, 0xde, 0x85, 0x8a, 0xb2, 0x05, 0x96, 0xce, + 0x00, 0x56, 0x8e, 0x33, 0x40, 0x7f, 0xee, 0x43, 0x3e, 0xe3, 0x16, 0x33, 0x9f, 0x71, 0xff, 0xae, + 0x05, 0x89, 0x49, 0x24, 0xba, 0x09, 0x95, 0x76, 0xc0, 0xcc, 0x0e, 0x42, 0x69, 0xcb, 0xf3, 0x44, + 0xe6, 0x45, 0xc5, 0x2f, 0x45, 0x3e, 0x7e, 0x75, 0x59, 0x03, 0x27, 0x95, 0xd1, 0x22, 0x8c, 0xb4, + 0x43, 0xd2, 0x88, 0x99, 0xd3, 0x66, 0x5f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0xbf, + 0x62, 0x01, 0xf0, 0x97, 0x52, 0xc7, 0xdf, 0x26, 0x67, 0xa0, 0xfd, 0xad, 0x41, 0x29, 0x6a, 0x93, + 0x66, 0x2f, 0x83, 0x90, 0xa4, 0x3f, 0x8d, 0x36, 0x69, 0x26, 0x03, 0x4e, 0xff, 0x61, 0x56, 0xdb, + 0xfe, 0xab, 0x00, 0x13, 0x09, 0xda, 0x6a, 0x4c, 0xf6, 0xd0, 0x0b, 0x86, 0x4b, 0xdc, 0xc5, 0x94, + 0x4b, 0x5c, 0x85, 0x61, 0x6b, 0x8a, 0xc6, 0x77, 0xa1, 0xb8, 0xe7, 0x3c, 0x10, 0x9a, 0xa4, 0xe7, + 0x7a, 0x77, 0x83, 0xd2, 0x9f, 0x5b, 0x73, 0x1e, 0x70, 0x99, 0xe9, 0x39, 0xb9, 0x40, 0xd6, 0x9c, + 0x07, 0xc7, 0xdc, 0xec, 0x83, 0x1d, 0x52, 0xb7, 0xdd, 0x28, 0xfe, 0xca, 0x7f, 0x4d, 0xfe, 0xb3, + 0x65, 0x47, 0x1b, 0x61, 0x6d, 0xb9, 0xbe, 0x78, 0x37, 0x1c, 0xa8, 0x2d, 0xd7, 0x4f, 0xb7, 0xe5, + 0xfa, 0x03, 0xb4, 0xe5, 0xfa, 0xe8, 0x7d, 0x18, 0x11, 0x6f, 0xf4, 0xcc, 0xd6, 0xdb, 0xd4, 0x52, + 0xe5, 0xb5, 0x27, 0x9e, 0xf8, 0x79, 0x9b, 0xf3, 0x52, 0x26, 0x14, 0xa5, 0x7d, 0xdb, 0x95, 0x0d, + 0xa2, 0xbf, 0x6d, 0xc1, 0x84, 0xf8, 0x8d, 0xc9, 0x7b, 0x1d, 0x12, 0xc5, 0x82, 0xf7, 0xfc, 0xd4, + 0xe0, 0x7d, 0x10, 0x15, 0x79, 0x57, 0x3e, 0x25, 0x8f, 0x59, 0x13, 0xd8, 0xb7, 0x47, 0xa9, 0x5e, + 0xa0, 0x7f, 0x6a, 0xc1, 0xf9, 0x3d, 0xe7, 0x01, 0x6f, 0x91, 0x97, 0x61, 0x27, 0x76, 0x03, 0x61, + 0xbb, 0xfe, 0x99, 0xc1, 0xa6, 0xbf, 0xab, 0x3a, 0xef, 0xa4, 0x34, 0x73, 0x3d, 0x9f, 0x85, 0xd2, + 0xb7, 0xab, 0x99, 0xfd, 0x9a, 0xdd, 0x82, 0xb2, 0x5c, 0x6f, 0x19, 0x92, 0x77, 0x4d, 0x67, 0xac, + 0x4f, 0x6c, 0x22, 0xa1, 0xfb, 0xa5, 0xd1, 0x76, 0xc4, 0x5a, 0x7b, 0xa4, 0xed, 0xbc, 0x0b, 0x63, + 0xfa, 0x1a, 0x7b, 0xa4, 0x6d, 0xbd, 0x07, 0xe7, 0x32, 0xd6, 0xd2, 0x23, 0x6d, 0xf2, 0x3e, 0x5c, + 0xcc, 0x5d, 0x1f, 0x8f, 0xb2, 0x61, 0xfb, 0x97, 0x2d, 0xfd, 0x1c, 0x3c, 0x03, 0x15, 0xfc, 0x92, + 0xa9, 0x82, 0xbf, 0xdc, 0x7b, 0xe7, 0xe4, 0xe8, 0xe1, 0xdf, 0xd1, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, + 0x26, 0x0c, 0x7b, 0xb4, 0x44, 0x1a, 0x87, 0xd8, 0xfd, 0x77, 0x64, 0xc2, 0x4b, 0xb1, 0xf2, 0x08, + 0x0b, 0x0a, 0xf6, 0xaf, 0x5b, 0x50, 0x3a, 0x83, 0x91, 0xc0, 0xe6, 0x48, 0xbc, 0x90, 0x4b, 0x5a, + 0x04, 0xe1, 0x9a, 0xc3, 0xce, 0xfd, 0x65, 0x19, 0x68, 0x2c, 0x67, 0x60, 0xbe, 0x1f, 0xce, 0xdd, + 0x0e, 0x9c, 0xd6, 0xa2, 0xe3, 0x39, 0x7e, 0x93, 0x84, 0xab, 0xfe, 0x76, 0x5f, 0x2b, 0x25, 0xdd, + 0xa6, 0xa8, 0xd0, 0xcf, 0xa6, 0xc8, 0xde, 0x01, 0xa4, 0x37, 0x20, 0xec, 0x38, 0x31, 0x8c, 0xb8, + 0xbc, 0x29, 0x31, 0xfc, 0xcf, 0x64, 0x73, 0x77, 0x5d, 0x3d, 0xd3, 0x2c, 0x14, 0x79, 0x01, 0x96, + 0x84, 0xec, 0xd7, 0x20, 0xd3, 0x77, 0xab, 0xbf, 0xda, 0xc0, 0x7e, 0x05, 0xa6, 0x59, 0xcd, 0x93, + 0x89, 0xb4, 0xf6, 0x8f, 0x58, 0x30, 0xb9, 0x9e, 0x8a, 0xa6, 0x70, 0x95, 0xbd, 0xf5, 0x65, 0xe8, + 0x7d, 0x1b, 0xac, 0x14, 0x0b, 0xe8, 0xa9, 0xeb, 0x97, 0xfe, 0xdc, 0x82, 0xc4, 0x55, 0xf2, 0x0c, + 0x98, 0xaa, 0x25, 0x83, 0xa9, 0xca, 0xd4, 0x7b, 0xa8, 0xee, 0xe4, 0xf1, 0x54, 0xe8, 0x96, 0x8a, + 0x0b, 0xd0, 0x43, 0xe5, 0x91, 0x90, 0xe1, 0x5e, 0xe4, 0x13, 0x66, 0xf0, 0x00, 0x19, 0x29, 0x80, + 0x99, 0x09, 0x29, 0xdc, 0x8f, 0x88, 0x99, 0x90, 0xea, 0x4f, 0xce, 0xee, 0xab, 0x6b, 0x5d, 0x66, + 0xa7, 0xd2, 0xf7, 0x30, 0xb3, 0x6f, 0xc7, 0x73, 0xdf, 0x27, 0x2a, 0x1c, 0x47, 0x55, 0x98, 0x71, + 0x8b, 0xd2, 0xe3, 0xc3, 0xea, 0xb8, 0xfa, 0xc7, 0x63, 0x36, 0x25, 0x55, 0xec, 0x9b, 0x30, 0x99, + 0x1a, 0x30, 0xf4, 0x0a, 0x0c, 0xb5, 0x77, 0x9c, 0x88, 0xa4, 0x4c, 0x23, 0x87, 0xea, 0xb4, 0xf0, + 0xf8, 0xb0, 0x3a, 0xa1, 0x2a, 0xb0, 0x12, 0xcc, 0xb1, 0xed, 0xff, 0x65, 0x41, 0x69, 0x3d, 0x68, + 0x9d, 0xc5, 0x62, 0x7a, 0xc3, 0x58, 0x4c, 0x4f, 0xe6, 0x45, 0xbc, 0xcb, 0x5d, 0x47, 0x2b, 0xa9, + 0x75, 0x74, 0x39, 0x97, 0x42, 0xef, 0x25, 0xb4, 0x07, 0xa3, 0x2c, 0x8e, 0x9e, 0x30, 0xd5, 0x7c, + 0xc9, 0xe0, 0xef, 0xab, 0x29, 0xfe, 0x7e, 0x52, 0x43, 0xd5, 0xb8, 0xfc, 0x67, 0x61, 0x44, 0x98, + 0x0b, 0xa6, 0x0d, 0xdc, 0x05, 0x2e, 0x96, 0x70, 0xfb, 0x67, 0x8b, 0x60, 0xc4, 0xed, 0x43, 0xbf, + 0x61, 0xc1, 0x5c, 0xc8, 0x3d, 0x06, 0x5b, 0xb5, 0x4e, 0xe8, 0xfa, 0xdb, 0x8d, 0xe6, 0x0e, 0x69, + 0x75, 0x3c, 0xd7, 0xdf, 0x5e, 0xdd, 0xf6, 0x03, 0x55, 0xbc, 0xfc, 0x80, 0x34, 0x3b, 0x4c, 0xe7, + 0xdf, 0x27, 0x48, 0xa0, 0x32, 0xc7, 0xb9, 0x7e, 0x74, 0x58, 0x9d, 0xc3, 0x27, 0xa2, 0x8d, 0x4f, + 0xd8, 0x17, 0xf4, 0x4d, 0x0b, 0xe6, 0x79, 0x38, 0xbb, 0xc1, 0xfb, 0xdf, 0x43, 0x1a, 0xaa, 0x4b, + 0x52, 0x09, 0x91, 0x0d, 0x12, 0xee, 0x2d, 0xbe, 0x2a, 0x06, 0x74, 0xbe, 0x7e, 0xb2, 0xb6, 0xf0, + 0x49, 0x3b, 0x67, 0xff, 0xdb, 0x22, 0x8c, 0x0b, 0x67, 0x75, 0x11, 0x05, 0xe5, 0x15, 0x63, 0x49, + 0x3c, 0x95, 0x5a, 0x12, 0xd3, 0x06, 0xf2, 0xe9, 0x04, 0x40, 0x89, 0x60, 0xda, 0x73, 0xa2, 0xf8, + 0x26, 0x71, 0xc2, 0x78, 0x93, 0x38, 0xdc, 0x4c, 0xa5, 0x78, 0x62, 0x93, 0x1a, 0xa5, 0x7e, 0xb9, + 0x9d, 0x26, 0x86, 0xbb, 0xe9, 0xa3, 0x7d, 0x40, 0xcc, 0xd6, 0x26, 0x74, 0xfc, 0x88, 0x7f, 0x8b, + 0x2b, 0xde, 0x03, 0x4e, 0xd6, 0xea, 0xac, 0x68, 0x15, 0xdd, 0xee, 0xa2, 0x86, 0x33, 0x5a, 0xd0, + 0x6c, 0xa8, 0x86, 0x06, 0xb5, 0xa1, 0x1a, 0xee, 0xe3, 0x45, 0xe2, 0xc3, 0x54, 0x57, 0xbc, 0x81, + 0x2f, 0x40, 0x45, 0xd9, 0xba, 0x89, 0x43, 0xa7, 0x77, 0xd8, 0x8e, 0x34, 0x05, 0xae, 0x22, 0x49, + 0xec, 0x2c, 0x13, 0x72, 0xf6, 0x3f, 0x2b, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x87, 0xb2, 0x13, 0x45, + 0xee, 0xb6, 0x4f, 0x5a, 0x62, 0xc7, 0x7e, 0x3c, 0x6f, 0xc7, 0x1a, 0xcd, 0x30, 0x7b, 0xc3, 0x05, + 0x51, 0x13, 0x2b, 0x1a, 0xe8, 0x26, 0x37, 0x06, 0xda, 0x97, 0xfc, 0xfc, 0x60, 0xd4, 0x40, 0x9a, + 0x0b, 0xed, 0x13, 0x2c, 0xea, 0xa3, 0x2f, 0x72, 0x6b, 0xad, 0x5b, 0x7e, 0x70, 0xdf, 0xbf, 0x11, + 0x04, 0xd2, 0xc3, 0x6c, 0x30, 0x82, 0xd3, 0xd2, 0x46, 0x4b, 0x55, 0xc7, 0x26, 0xb5, 0xc1, 0x62, + 0xf2, 0xfc, 0x00, 0x9c, 0xa3, 0xa4, 0x4d, 0x3f, 0x91, 0x08, 0x11, 0x98, 0x14, 0x91, 0x10, 0x64, + 0x99, 0x18, 0xbb, 0x4c, 0x56, 0xdd, 0xac, 0x9d, 0x28, 0xf4, 0x6e, 0x99, 0x24, 0x70, 0x9a, 0xa6, + 0xfd, 0xf3, 0x16, 0x30, 0x0b, 0xf7, 0x33, 0x60, 0x19, 0x3e, 0x6b, 0xb2, 0x0c, 0x33, 0x79, 0x83, + 0x9c, 0xc3, 0x2d, 0xbc, 0xcc, 0x57, 0x56, 0x3d, 0x0c, 0x1e, 0x1c, 0x88, 0x97, 0xf2, 0x01, 0xb8, + 0xd4, 0xff, 0x6b, 0xf1, 0x43, 0x4c, 0x39, 0x9d, 0xa3, 0x1f, 0x84, 0x72, 0xd3, 0x69, 0x3b, 0x4d, + 0x1e, 0x64, 0x36, 0x57, 0x63, 0x63, 0x54, 0x9a, 0x5b, 0x12, 0x35, 0xb8, 0x06, 0x42, 0x46, 0xd4, + 0x28, 0xcb, 0xe2, 0xbe, 0x5a, 0x07, 0xd5, 0xe4, 0xec, 0x2e, 0x8c, 0x1b, 0xc4, 0x1e, 0xa9, 0xb8, + 0xfa, 0x83, 0xfc, 0x8a, 0x55, 0x11, 0x60, 0xf6, 0x60, 0xda, 0xd7, 0xfe, 0xd3, 0x0b, 0x45, 0x8a, + 0x20, 0x1f, 0xef, 0x77, 0x89, 0xb2, 0xdb, 0x47, 0xb3, 0xe0, 0x4f, 0x91, 0xc1, 0xdd, 0x94, 0xed, + 0xbf, 0x6f, 0xc1, 0xe3, 0x3a, 0xa2, 0x16, 0x0f, 0xa0, 0x9f, 0x0e, 0xb8, 0x06, 0xe5, 0xa0, 0x4d, + 0x42, 0x27, 0x0e, 0x42, 0x71, 0x6b, 0x5c, 0x93, 0x83, 0x7e, 0x47, 0x94, 0x1f, 0x8b, 0x68, 0x7f, + 0x92, 0xba, 0x2c, 0xc7, 0xaa, 0x26, 0xb2, 0x61, 0x98, 0x0d, 0x46, 0x24, 0x62, 0x35, 0xb0, 0x33, + 0x80, 0x3d, 0x87, 0x46, 0x58, 0x40, 0xec, 0x3f, 0xb6, 0xf8, 0xc2, 0xd2, 0xbb, 0x8e, 0xde, 0x83, + 0xa9, 0x3d, 0x27, 0x6e, 0xee, 0x2c, 0x3f, 0x68, 0x87, 0x5c, 0xf5, 0x2d, 0xc7, 0xe9, 0xb9, 0x7e, + 0xe3, 0xa4, 0x7d, 0x64, 0x62, 0x80, 0xb6, 0x96, 0x22, 0x86, 0xbb, 0xc8, 0xa3, 0x4d, 0x18, 0x65, + 0x65, 0xcc, 0xd2, 0x39, 0xea, 0xc5, 0x1a, 0xe4, 0xb5, 0xa6, 0x5e, 0x94, 0xd7, 0x12, 0x3a, 0x58, + 0x27, 0x6a, 0x7f, 0xa5, 0xc8, 0x77, 0x3b, 0xe3, 0xb6, 0x9f, 0x85, 0x91, 0x76, 0xd0, 0x5a, 0x5a, + 0xad, 0x61, 0x31, 0x0b, 0xea, 0x1a, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x75, 0x00, 0xf2, 0x20, + 0x26, 0xa1, 0xef, 0x78, 0xca, 0x20, 0x44, 0x99, 0x40, 0xd6, 0x82, 0xf5, 0x20, 0xbe, 0x1b, 0x91, + 0xef, 0x5f, 0x56, 0x28, 0x58, 0x43, 0x47, 0xd7, 0x01, 0xda, 0x61, 0xb0, 0xef, 0xb6, 0x98, 0xeb, + 0x5c, 0xd1, 0x34, 0x97, 0xa8, 0x2b, 0x08, 0xd6, 0xb0, 0xd0, 0xeb, 0x30, 0xde, 0xf1, 0x23, 0xce, + 0xa1, 0x38, 0x9b, 0x22, 0x56, 0x5e, 0x39, 0xb1, 0x5c, 0xb8, 0xab, 0x03, 0xb1, 0x89, 0x8b, 0x16, + 0x60, 0x38, 0x76, 0x98, 0xbd, 0xc3, 0x50, 0xbe, 0xdd, 0xe2, 0x06, 0xc5, 0xd0, 0x43, 0x9c, 0xd2, + 0x0a, 0x58, 0x54, 0x44, 0x5f, 0x90, 0x7e, 0x08, 0xfc, 0xac, 0x17, 0x06, 0xc3, 0x83, 0xdd, 0x0b, + 0x9a, 0x17, 0x82, 0x30, 0x44, 0x36, 0x68, 0xd9, 0xdf, 0xac, 0x00, 0x24, 0xec, 0x38, 0x7a, 0xbf, + 0xeb, 0x3c, 0x7a, 0xbe, 0x37, 0x03, 0x7f, 0x7a, 0x87, 0x11, 0xfa, 0x61, 0x0b, 0x46, 0x1d, 0xcf, + 0x0b, 0x9a, 0x4e, 0xcc, 0x46, 0xb9, 0xd0, 0xfb, 0x3c, 0x14, 0xed, 0x2f, 0x24, 0x35, 0x78, 0x17, + 0x5e, 0x92, 0x0b, 0x4f, 0x83, 0xf4, 0xed, 0x85, 0xde, 0x30, 0xfa, 0x2e, 0x29, 0xa5, 0xf1, 0xe5, + 0x31, 0x9b, 0x96, 0xd2, 0x2a, 0xec, 0xe8, 0xd7, 0x04, 0x34, 0x74, 0xd7, 0x08, 0x2a, 0x57, 0xca, + 0x8f, 0xaf, 0x60, 0x70, 0xa5, 0xfd, 0xe2, 0xc9, 0xa1, 0xba, 0xee, 0x38, 0x35, 0x94, 0x1f, 0x84, + 0x44, 0x13, 0x7f, 0xfa, 0x38, 0x4d, 0xbd, 0x0b, 0x93, 0x2d, 0xf3, 0x6e, 0x17, 0xab, 0xe9, 0x99, + 0x3c, 0xba, 0x29, 0x56, 0x20, 0xb9, 0xcd, 0x53, 0x00, 0x9c, 0x26, 0x8c, 0xea, 0xdc, 0x85, 0x6d, + 0xd5, 0xdf, 0x0a, 0x84, 0xe1, 0xb9, 0x9d, 0x3b, 0x97, 0x07, 0x51, 0x4c, 0xf6, 0x28, 0x66, 0x72, + 0x69, 0xaf, 0x8b, 0xba, 0x58, 0x51, 0x41, 0x6f, 0xc2, 0x30, 0xf3, 0x81, 0x8d, 0x66, 0xca, 0xf9, + 0x8a, 0x42, 0x33, 0x7c, 0x43, 0xb2, 0xa9, 0xd8, 0xdf, 0x08, 0x0b, 0x0a, 0xe8, 0xa6, 0x8c, 0xf1, + 0x12, 0xad, 0xfa, 0x77, 0x23, 0xc2, 0x62, 0xbc, 0x54, 0x16, 0x3f, 0x9e, 0x84, 0x6f, 0xe1, 0xe5, + 0x99, 0xc1, 0xcc, 0x8d, 0x9a, 0x94, 0x39, 0x12, 0xff, 0x65, 0x8c, 0xf4, 0x19, 0xc8, 0xef, 0x9e, + 0x19, 0x47, 0x3d, 0x19, 0xce, 0x7b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x19, 0x4d, 0xbe, 0x73, 0x85, + 0xe9, 0x7a, 0xbf, 0xfd, 0xcf, 0xe5, 0x6b, 0x76, 0xc9, 0xf0, 0x12, 0x2c, 0xea, 0x9f, 0xe9, 0xad, + 0x3f, 0xeb, 0xc3, 0x54, 0x7a, 0x8b, 0x3e, 0x52, 0x2e, 0xe3, 0x0f, 0x4b, 0x30, 0x61, 0x2e, 0x29, + 0x34, 0x0f, 0x15, 0x41, 0x44, 0x45, 0x06, 0x55, 0xbb, 0x64, 0x4d, 0x02, 0x70, 0x82, 0xc3, 0x22, + 0xa3, 0xb2, 0xea, 0x9a, 0xc9, 0x61, 0x12, 0x19, 0x55, 0x41, 0xb0, 0x86, 0x45, 0xe5, 0xa5, 0xcd, + 0x20, 0x88, 0xd5, 0xa5, 0xa2, 0xd6, 0xdd, 0x22, 0x2b, 0xc5, 0x02, 0x4a, 0x2f, 0x93, 0x5d, 0x12, + 0xfa, 0xc4, 0x33, 0xe3, 0x98, 0xa9, 0xcb, 0xe4, 0x96, 0x0e, 0xc4, 0x26, 0x2e, 0xbd, 0x25, 0x83, + 0x88, 0x2d, 0x64, 0x21, 0x95, 0x25, 0x26, 0x9c, 0x0d, 0xee, 0x4d, 0x2e, 0xe1, 0xe8, 0xf3, 0xf0, + 0xb8, 0x72, 0xfe, 0xc6, 0x5c, 0x09, 0x2d, 0x5b, 0x1c, 0x36, 0x94, 0x28, 0x8f, 0x2f, 0x65, 0xa3, + 0xe1, 0xbc, 0xfa, 0xe8, 0x0d, 0x98, 0x10, 0x9c, 0xbb, 0xa4, 0x38, 0x62, 0xda, 0x45, 0xdc, 0x32, + 0xa0, 0x38, 0x85, 0x2d, 0x23, 0xb1, 0x31, 0xe6, 0x59, 0x52, 0x28, 0x77, 0x47, 0x62, 0xd3, 0xe1, + 0xb8, 0xab, 0x06, 0x5a, 0x80, 0x49, 0xce, 0x5a, 0xb9, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x59, 0xa2, + 0xb6, 0xd4, 0x1d, 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x35, 0x18, 0x73, 0xc2, 0xe6, 0x8e, 0x1b, 0x93, + 0x66, 0xdc, 0x09, 0xb9, 0xcb, 0x89, 0x66, 0x58, 0xb2, 0xa0, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x0f, + 0xe7, 0x32, 0x9c, 0xd2, 0xe8, 0xc2, 0x71, 0xda, 0xae, 0xfc, 0xa6, 0x94, 0x31, 0xe6, 0x42, 0x7d, + 0x55, 0x7e, 0x8d, 0x86, 0x45, 0x57, 0x27, 0x73, 0x5e, 0xd3, 0x52, 0x22, 0xa8, 0xd5, 0xb9, 0x22, + 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x77, 0x01, 0x26, 0x33, 0x14, 0xeb, 0x2c, 0x2c, 0x7f, 0x4a, 0xf6, + 0x48, 0xa2, 0xf0, 0x9b, 0x81, 0xfd, 0x0a, 0x27, 0x08, 0xec, 0x57, 0xec, 0x17, 0xd8, 0xaf, 0xf4, + 0x41, 0x02, 0xfb, 0x99, 0x23, 0x36, 0x34, 0xd0, 0x88, 0x65, 0x04, 0x03, 0x1c, 0x3e, 0x61, 0x30, + 0x40, 0x63, 0xd0, 0x47, 0x06, 0x18, 0xf4, 0x9f, 0x28, 0xc0, 0x54, 0xda, 0x00, 0xee, 0x0c, 0xd4, + 0xb1, 0x6f, 0x1a, 0xea, 0xd8, 0xec, 0x24, 0x17, 0x69, 0xb3, 0xbc, 0x3c, 0xd5, 0x2c, 0x4e, 0xa9, + 0x66, 0x3f, 0x39, 0x10, 0xb5, 0xde, 0x6a, 0xda, 0x7f, 0x50, 0x80, 0x0b, 0xe9, 0x2a, 0x4b, 0x9e, + 0xe3, 0xee, 0x9d, 0xc1, 0xd8, 0xdc, 0x31, 0xc6, 0xe6, 0x85, 0x41, 0xbe, 0x86, 0x75, 0x2d, 0x77, + 0x80, 0xde, 0x4e, 0x0d, 0xd0, 0xfc, 0xe0, 0x24, 0x7b, 0x8f, 0xd2, 0xb7, 0x8a, 0x70, 0x39, 0xb3, + 0x5e, 0xa2, 0xcd, 0x5c, 0x31, 0xb4, 0x99, 0xd7, 0x53, 0xda, 0x4c, 0xbb, 0x77, 0xed, 0xd3, 0x51, + 0x6f, 0x0a, 0x6f, 0x41, 0x16, 0xfc, 0xed, 0x21, 0x55, 0x9b, 0x86, 0xb7, 0xa0, 0x22, 0x84, 0x4d, + 0xba, 0xdf, 0x4e, 0x2a, 0xcd, 0xff, 0x60, 0xc1, 0xc5, 0xcc, 0xb9, 0x39, 0x03, 0x15, 0xd6, 0xba, + 0xa9, 0xc2, 0x7a, 0x76, 0xe0, 0xd5, 0x9a, 0xa3, 0xd3, 0xfa, 0xa3, 0x62, 0xce, 0xb7, 0x30, 0x01, + 0xfd, 0x0e, 0x8c, 0x3a, 0xcd, 0x26, 0x89, 0xa2, 0xb5, 0xa0, 0xa5, 0x82, 0xa1, 0xbd, 0xc0, 0xe4, + 0xac, 0xa4, 0xf8, 0xf8, 0xb0, 0x3a, 0x9b, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x8c, 0xdf, 0x58, + 0x38, 0xd5, 0xf8, 0x8d, 0xd7, 0x01, 0xf6, 0x15, 0xb7, 0x9e, 0x16, 0xf2, 0x35, 0x3e, 0x5e, 0xc3, + 0x42, 0x5f, 0x84, 0x72, 0x24, 0xae, 0x71, 0xb1, 0x14, 0x5f, 0x1a, 0x70, 0xae, 0x9c, 0x4d, 0xe2, + 0x99, 0x6e, 0xe9, 0x4a, 0x1f, 0xa2, 0x48, 0xa2, 0xef, 0x85, 0xa9, 0x88, 0x47, 0x3d, 0x59, 0xf2, + 0x9c, 0x88, 0xf9, 0x38, 0x88, 0x55, 0xc8, 0x7c, 0xcd, 0x1b, 0x29, 0x18, 0xee, 0xc2, 0x46, 0x2b, + 0xf2, 0xa3, 0x58, 0x88, 0x16, 0xbe, 0x30, 0xaf, 0x26, 0x1f, 0x24, 0x92, 0x02, 0x9d, 0x4f, 0x0f, + 0x3f, 0x1b, 0x78, 0xad, 0xa6, 0xfd, 0x13, 0x25, 0x78, 0xa2, 0xc7, 0x21, 0x86, 0x16, 0xcc, 0x37, + 0xca, 0xe7, 0xd2, 0xd2, 0xef, 0x6c, 0x66, 0x65, 0x43, 0x1c, 0x4e, 0xad, 0x95, 0xc2, 0x07, 0x5e, + 0x2b, 0x3f, 0x6a, 0x69, 0x7a, 0x09, 0x6e, 0x49, 0xf7, 0xd9, 0x13, 0x1e, 0xce, 0xa7, 0xa8, 0xa8, + 0xd8, 0xca, 0x90, 0xf6, 0xaf, 0x0f, 0xdc, 0x9d, 0x81, 0xc5, 0xff, 0xb3, 0xd5, 0xce, 0x7e, 0xc5, + 0x82, 0xa7, 0x32, 0xfb, 0x6b, 0xd8, 0x54, 0xcc, 0x43, 0xa5, 0x49, 0x0b, 0x35, 0xbf, 0xa9, 0xc4, + 0xa1, 0x54, 0x02, 0x70, 0x82, 0x63, 0x98, 0x4e, 0x14, 0xfa, 0x9a, 0x4e, 0xfc, 0x1b, 0x0b, 0xba, + 0x16, 0xf0, 0x19, 0x9c, 0xa4, 0xab, 0xe6, 0x49, 0xfa, 0xf1, 0x41, 0xe6, 0x32, 0xe7, 0x10, 0xfd, + 0xbd, 0x49, 0x78, 0x2c, 0xc7, 0x51, 0x62, 0x1f, 0xa6, 0xb7, 0x9b, 0xc4, 0xf4, 0x48, 0x13, 0x1f, + 0x93, 0xe9, 0xbc, 0xd7, 0xd3, 0x7d, 0x8d, 0x65, 0x73, 0x99, 0xee, 0x42, 0xc1, 0xdd, 0x4d, 0xa0, + 0xaf, 0x58, 0x70, 0xde, 0xb9, 0x1f, 0x75, 0xe5, 0xec, 0x13, 0x6b, 0xe6, 0xe5, 0x4c, 0x2d, 0x45, + 0x9f, 0x1c, 0x7f, 0x3c, 0xbd, 0x4d, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x11, 0xbf, 0x92, 0xf2, + 0xdb, 0x3d, 0x7c, 0x26, 0xb3, 0x3c, 0x5a, 0xf8, 0x99, 0x2a, 0x21, 0x58, 0xd1, 0x41, 0xf7, 0xa0, + 0xb2, 0x2d, 0xdd, 0xcc, 0xc4, 0x99, 0x9d, 0x79, 0x09, 0x66, 0xfa, 0xa2, 0xf1, 0x77, 0x43, 0x05, + 0xc2, 0x09, 0x29, 0xf4, 0x06, 0x14, 0xfd, 0xad, 0xa8, 0x57, 0x5e, 0x98, 0x94, 0xa9, 0x11, 0xf7, + 0x47, 0x5e, 0x5f, 0x69, 0x60, 0x5a, 0x11, 0xdd, 0x84, 0x62, 0xb8, 0xd9, 0x12, 0x8a, 0xb5, 0x4c, + 0xbe, 0x14, 0x2f, 0xd6, 0xb2, 0x17, 0x09, 0xa7, 0x84, 0x17, 0x6b, 0x98, 0x92, 0x40, 0x75, 0x18, + 0x62, 0x3e, 0x05, 0x42, 0x7f, 0x96, 0xc9, 0x90, 0xf6, 0xf0, 0xcd, 0xe1, 0x4e, 0xcb, 0x0c, 0x01, + 0x73, 0x42, 0xe8, 0x4d, 0x18, 0x6e, 0xb2, 0xd4, 0x29, 0x22, 0x64, 0x72, 0x76, 0x34, 0x9b, 0xae, + 0xe4, 0x2a, 0x42, 0x8f, 0xc4, 0xca, 0xb1, 0xa0, 0x80, 0x36, 0x60, 0xb8, 0x49, 0xda, 0x3b, 0x5b, + 0x11, 0x13, 0xbc, 0x4d, 0x06, 0x3f, 0xa1, 0xd5, 0x23, 0x53, 0x90, 0xa0, 0xca, 0x30, 0xb0, 0xa0, + 0x85, 0x3e, 0x0d, 0x85, 0xad, 0xa6, 0x70, 0x34, 0xc8, 0xd4, 0xa0, 0x99, 0x8e, 0xe4, 0x8b, 0xc3, + 0x47, 0x87, 0xd5, 0xc2, 0xca, 0x12, 0x2e, 0x6c, 0x35, 0xd1, 0x3a, 0x8c, 0x6c, 0x71, 0xd7, 0x53, + 0xa1, 0x24, 0x7b, 0x26, 0xdb, 0x2b, 0xb6, 0xcb, 0x3b, 0x95, 0x1b, 0xc8, 0x0b, 0x00, 0x96, 0x44, + 0x58, 0xe8, 0x47, 0xe5, 0x42, 0x2b, 0x62, 0x20, 0xcf, 0x9d, 0xcc, 0xed, 0x99, 0x3b, 0xb5, 0x27, + 0x8e, 0xb8, 0x58, 0xa3, 0x88, 0xbe, 0x0c, 0x15, 0x47, 0xe6, 0x88, 0x13, 0x31, 0x22, 0x5e, 0xca, + 0xdc, 0x8e, 0xbd, 0xd3, 0xe7, 0xf1, 0xb5, 0xac, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc2, 0xf8, 0x7e, + 0xd4, 0xde, 0x21, 0x72, 0xfb, 0xb2, 0x90, 0x11, 0x39, 0xd7, 0xd5, 0x3d, 0x81, 0xe8, 0x86, 0x71, + 0xc7, 0xf1, 0xba, 0x4e, 0x1c, 0xf6, 0xc4, 0x7c, 0x4f, 0x27, 0x86, 0x4d, 0xda, 0x74, 0xf8, 0xdf, + 0xeb, 0x04, 0x9b, 0x07, 0x31, 0x11, 0x41, 0x93, 0x33, 0x87, 0xff, 0x2d, 0x8e, 0xd2, 0x3d, 0xfc, + 0x02, 0x80, 0x25, 0x11, 0xba, 0xc1, 0x1d, 0x99, 0x7f, 0x91, 0x05, 0x4b, 0xce, 0xd9, 0xe0, 0x99, + 0x49, 0x1a, 0xb5, 0x41, 0x61, 0x27, 0x63, 0x42, 0x8a, 0x9d, 0x88, 0xed, 0x9d, 0x20, 0x0e, 0xfc, + 0xd4, 0x69, 0x3c, 0x9d, 0x7f, 0x22, 0xd6, 0x33, 0xf0, 0xbb, 0x4f, 0xc4, 0x2c, 0x2c, 0x9c, 0xd9, + 0x16, 0x6a, 0xc1, 0x44, 0x3b, 0x08, 0xe3, 0xfb, 0x41, 0x28, 0xd7, 0x17, 0xea, 0x21, 0xe4, 0x1b, + 0x98, 0xa2, 0x45, 0x16, 0xc4, 0xdb, 0x84, 0xe0, 0x14, 0x4d, 0xf4, 0x39, 0x18, 0x89, 0x9a, 0x8e, + 0x47, 0x56, 0xef, 0xcc, 0x9c, 0xcb, 0xbf, 0x6a, 0x1a, 0x1c, 0x25, 0x67, 0x75, 0xb1, 0xc9, 0x11, + 0x28, 0x58, 0x92, 0x43, 0x2b, 0x30, 0xc4, 0x22, 0xf1, 0xb3, 0x78, 0xcf, 0x39, 0xb1, 0x88, 0xba, + 0x4c, 0x39, 0xf9, 0x89, 0xc4, 0x8a, 0x31, 0xaf, 0x4e, 0xf7, 0x80, 0xe0, 0x75, 0x83, 0x68, 0xe6, + 0x42, 0xfe, 0x1e, 0x10, 0x2c, 0xf2, 0x9d, 0x46, 0xaf, 0x3d, 0xa0, 0x90, 0x70, 0x42, 0x94, 0x9e, + 0xc7, 0xf4, 0x0c, 0x7d, 0xac, 0x87, 0x75, 0x49, 0xee, 0x09, 0xca, 0xce, 0x63, 0x7a, 0x7e, 0x52, + 0x12, 0xf6, 0xef, 0x8f, 0x74, 0xf3, 0x27, 0x4c, 0x3a, 0xfa, 0x2b, 0x56, 0xd7, 0xc3, 0xd9, 0xa7, + 0x06, 0x55, 0xd6, 0x9c, 0x22, 0x67, 0xfa, 0x15, 0x0b, 0x1e, 0x6b, 0x67, 0x7e, 0x88, 0xb8, 0xec, + 0x07, 0xd3, 0xf9, 0xf0, 0x4f, 0x57, 0x31, 0xd9, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0x34, 0xf7, 0x5f, + 0xfc, 0xc0, 0xdc, 0xff, 0x1a, 0x94, 0x19, 0x43, 0x99, 0x04, 0xbe, 0x1a, 0x28, 0x90, 0x0e, 0x63, + 0x1b, 0x96, 0x44, 0x45, 0xac, 0x48, 0xa0, 0x1f, 0xb3, 0xe0, 0x52, 0xba, 0xeb, 0x98, 0x30, 0xb0, + 0x88, 0x60, 0xce, 0x05, 0xb3, 0x15, 0xf1, 0xfd, 0x97, 0xea, 0xbd, 0x90, 0x8f, 0xfb, 0x21, 0xe0, + 0xde, 0x8d, 0xa1, 0x5a, 0x86, 0x64, 0x38, 0x6c, 0x6a, 0xc3, 0x07, 0x90, 0x0e, 0x5f, 0x86, 0xb1, + 0xbd, 0xa0, 0xe3, 0xc7, 0xc2, 0x18, 0x45, 0xb8, 0x06, 0xb2, 0xd7, 0xdf, 0x35, 0xad, 0x1c, 0x1b, + 0x58, 0x29, 0x99, 0xb2, 0xfc, 0xb0, 0x32, 0x25, 0x7a, 0x27, 0x95, 0x2f, 0xb9, 0x92, 0xcf, 0x5b, + 0x08, 0xf1, 0xfb, 0x04, 0x59, 0x93, 0xcf, 0x56, 0x0e, 0xfa, 0x9a, 0x95, 0xc1, 0xc0, 0x73, 0xc9, + 0xf8, 0x33, 0xa6, 0x64, 0x7c, 0x35, 0x2d, 0x19, 0x77, 0x69, 0x42, 0x0d, 0xa1, 0x78, 0xf0, 0x70, + 0xcb, 0x83, 0xc6, 0x2f, 0xb3, 0x3d, 0xb8, 0xd2, 0xef, 0x5a, 0x62, 0x56, 0x49, 0x2d, 0xf5, 0xee, + 0x95, 0x58, 0x25, 0xb5, 0x56, 0x6b, 0x98, 0x41, 0x06, 0x0d, 0x70, 0x61, 0xff, 0x0f, 0x0b, 0x8a, + 0xf5, 0xa0, 0x75, 0x06, 0x9a, 0xdd, 0xcf, 0x1a, 0x9a, 0xdd, 0x27, 0x72, 0xf2, 0x58, 0xe7, 0xea, + 0x71, 0x97, 0x53, 0x7a, 0xdc, 0x4b, 0x79, 0x04, 0x7a, 0x6b, 0x6d, 0x7f, 0xae, 0x08, 0x7a, 0xd6, + 0x6d, 0xf4, 0xef, 0x1e, 0xc6, 0x24, 0xb8, 0xd8, 0x2b, 0x11, 0xb7, 0xa0, 0xcc, 0x8c, 0x99, 0xa4, + 0xb7, 0xdb, 0x5f, 0x30, 0xcb, 0xe0, 0xb7, 0x89, 0xbb, 0xbd, 0x13, 0x93, 0x56, 0xfa, 0x73, 0xce, + 0xce, 0x32, 0xf8, 0xbf, 0x59, 0x30, 0x99, 0x6a, 0x1d, 0x79, 0x30, 0xee, 0xe9, 0x6a, 0x39, 0xb1, + 0x4e, 0x1f, 0x4a, 0xa3, 0x27, 0x2c, 0x2b, 0xb5, 0x22, 0x6c, 0x12, 0x47, 0x73, 0x00, 0xea, 0xd9, + 0x4c, 0x6a, 0xbb, 0x18, 0xd7, 0xaf, 0xde, 0xd5, 0x22, 0xac, 0x61, 0xa0, 0x57, 0x60, 0x34, 0x0e, + 0xda, 0x81, 0x17, 0x6c, 0x1f, 0xdc, 0x22, 0x32, 0xa4, 0x8a, 0xb2, 0x97, 0xda, 0x48, 0x40, 0x58, + 0xc7, 0xb3, 0x7f, 0xa1, 0x08, 0xe9, 0x4c, 0xed, 0xdf, 0x59, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x2d, + 0x0b, 0xa6, 0x68, 0xeb, 0xcc, 0x76, 0x43, 0x5e, 0xb6, 0x2a, 0xed, 0x8b, 0xd5, 0x23, 0xed, 0xcb, + 0x55, 0x7a, 0x76, 0xb5, 0x82, 0x4e, 0x2c, 0xb4, 0x65, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, + 0x3c, 0x12, 0x86, 0xc2, 0x21, 0x49, 0xc7, 0x23, 0x61, 0x88, 0x05, 0x54, 0x66, 0x85, 0x29, 0xe5, + 0x64, 0x85, 0x61, 0x01, 0xe2, 0xc4, 0x2b, 0xbf, 0x60, 0x7b, 0xb4, 0x00, 0x71, 0xf2, 0xf9, 0x3f, + 0xc1, 0xb1, 0x7f, 0xb9, 0x08, 0x63, 0xf5, 0xa0, 0x95, 0x3c, 0x5c, 0xbd, 0x6c, 0x3c, 0x5c, 0x5d, + 0x49, 0x3d, 0x5c, 0x4d, 0xe9, 0xb8, 0xdf, 0x79, 0xa6, 0xfa, 0xb0, 0x9e, 0xa9, 0xfe, 0xb5, 0xc5, + 0x66, 0xad, 0xb6, 0xde, 0x10, 0x59, 0x69, 0x5f, 0x84, 0x51, 0x76, 0x20, 0x31, 0x0f, 0x38, 0xf9, + 0x9a, 0xc3, 0x02, 0xbe, 0xaf, 0x27, 0xc5, 0x58, 0xc7, 0x41, 0xd7, 0xa0, 0x1c, 0x11, 0x27, 0x6c, + 0xee, 0xa8, 0x33, 0x4e, 0xbc, 0x75, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x24, 0x36, 0x59, 0x31, + 0x3f, 0xbf, 0xaa, 0xde, 0x1f, 0xbe, 0x45, 0xf2, 0x03, 0x92, 0xd9, 0x6f, 0x03, 0xea, 0xc6, 0x1f, + 0x20, 0x0a, 0x51, 0xd5, 0x8c, 0x42, 0x54, 0xe9, 0x8a, 0x40, 0xf4, 0x67, 0x16, 0x4c, 0xd4, 0x83, + 0x16, 0xdd, 0xba, 0xdf, 0x4e, 0xfb, 0x54, 0x0f, 0xcc, 0x38, 0xdc, 0x23, 0x30, 0xe3, 0x3f, 0xb4, + 0x60, 0xa4, 0x1e, 0xb4, 0xce, 0x40, 0xc7, 0xfe, 0x19, 0x53, 0xc7, 0xfe, 0x78, 0xce, 0x92, 0xc8, + 0x51, 0xab, 0xff, 0x6a, 0x11, 0xc6, 0x69, 0x3f, 0x83, 0x6d, 0x39, 0x4b, 0xc6, 0x88, 0x58, 0x03, + 0x8c, 0x08, 0x65, 0x73, 0x03, 0xcf, 0x0b, 0xee, 0xa7, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, + 0xcf, 0x43, 0xb9, 0x1d, 0x92, 0x7d, 0x37, 0x10, 0xfc, 0xa3, 0xf6, 0x62, 0x51, 0x17, 0xe5, 0x58, + 0x61, 0x50, 0xb9, 0x2b, 0x72, 0xfd, 0x26, 0x91, 0xc9, 0x9d, 0x4b, 0x2c, 0xff, 0x13, 0x8f, 0xb8, + 0xac, 0x95, 0x63, 0x03, 0x0b, 0xbd, 0x0d, 0x15, 0xf6, 0x9f, 0x9d, 0x28, 0x27, 0xcf, 0x57, 0x23, + 0xd2, 0x1c, 0x08, 0x02, 0x38, 0xa1, 0x85, 0xae, 0x03, 0xc4, 0x32, 0x2a, 0x6f, 0x24, 0x82, 0xc9, + 0x28, 0x5e, 0x5b, 0xc5, 0xeb, 0x8d, 0xb0, 0x86, 0x85, 0x9e, 0x83, 0x4a, 0xec, 0xb8, 0xde, 0x6d, + 0xd7, 0x27, 0x11, 0x53, 0x34, 0x17, 0x65, 0x16, 0x03, 0x51, 0x88, 0x13, 0x38, 0xe5, 0x75, 0x98, + 0xa7, 0x35, 0xcf, 0x76, 0x55, 0x66, 0xd8, 0x8c, 0xd7, 0xb9, 0xad, 0x4a, 0xb1, 0x86, 0x61, 0xbf, + 0x06, 0x17, 0xea, 0x41, 0xab, 0x1e, 0x84, 0xf1, 0x4a, 0x10, 0xde, 0x77, 0xc2, 0x96, 0x9c, 0xbf, + 0xaa, 0x0c, 0xa8, 0x4f, 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0x50, 0xf9, 0x2f, 0x31, 0x6e, 0xe7, + 0x84, 0x1e, 0x16, 0xff, 0xa7, 0xc0, 0x0e, 0x8a, 0x54, 0x0a, 0x36, 0xf4, 0x25, 0x98, 0x88, 0xc8, + 0x6d, 0xd7, 0xef, 0x3c, 0x90, 0xf2, 0x71, 0x0f, 0xf7, 0x95, 0xc6, 0xb2, 0x8e, 0xc9, 0xb5, 0x6c, + 0x66, 0x19, 0x4e, 0x51, 0xa3, 0x43, 0x18, 0x76, 0xfc, 0x85, 0xe8, 0x6e, 0x44, 0x42, 0x91, 0x02, + 0x8c, 0x0d, 0x21, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x25, 0xc3, 0xfe, 0xac, 0x07, 0x3e, 0x0e, 0x82, + 0x58, 0x2e, 0x32, 0x96, 0x44, 0x46, 0x2b, 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x75, 0xda, 0x6d, + 0x8f, 0x3d, 0x80, 0x3b, 0xde, 0x8d, 0x30, 0xe8, 0xb4, 0xf9, 0xe3, 0x63, 0x91, 0xe5, 0xeb, 0x47, + 0x8d, 0x2e, 0x28, 0xce, 0xa8, 0x41, 0x0f, 0x86, 0xad, 0x88, 0xfd, 0x66, 0x0b, 0xaf, 0x28, 0x34, + 0xdf, 0x0d, 0x56, 0x84, 0x25, 0x8c, 0xce, 0x33, 0x6b, 0x9e, 0x63, 0x0e, 0x27, 0xf3, 0x8c, 0x55, + 0x29, 0xd6, 0x30, 0xec, 0x1f, 0x64, 0x17, 0x0c, 0xcb, 0xf4, 0x14, 0x77, 0x42, 0x82, 0xf6, 0x60, + 0xbc, 0xcd, 0xae, 0x7e, 0x11, 0x87, 0x58, 0x0c, 0xf8, 0xcb, 0x03, 0x4a, 0x8a, 0xf7, 0xe9, 0xe6, + 0x55, 0x9a, 0x1c, 0xc6, 0x82, 0xd7, 0x75, 0x72, 0xd8, 0xa4, 0x6e, 0xff, 0xdc, 0x14, 0x3b, 0xc7, + 0x1a, 0x5c, 0xfc, 0x1b, 0x11, 0xb6, 0xb3, 0x82, 0xd7, 0x9d, 0xcd, 0xd7, 0x43, 0x24, 0x57, 0x8e, + 0xb0, 0xbf, 0xc5, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0xf2, 0xf2, 0xc3, 0xa3, 0x5f, 0xc2, 0x5d, 0x8e, + 0x65, 0x3c, 0xe8, 0x8a, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc3, 0xb8, 0x48, 0x0c, 0x24, 0x14, 0x4d, + 0x45, 0x43, 0x91, 0x30, 0x8e, 0x75, 0xe0, 0x71, 0xba, 0x00, 0x9b, 0x95, 0xd1, 0x36, 0x5c, 0xd2, + 0xb2, 0xe4, 0xdd, 0x08, 0x1d, 0xf6, 0xf2, 0xe7, 0xb2, 0xd5, 0xaf, 0x9d, 0x45, 0x4f, 0x1d, 0x1d, + 0x56, 0x2f, 0x6d, 0xf4, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0x3b, 0x70, 0x81, 0xbb, 0xa8, 0xd5, 0x88, + 0xd3, 0xf2, 0x5c, 0x5f, 0x1d, 0x76, 0x7c, 0x01, 0x5d, 0x3c, 0x3a, 0xac, 0x5e, 0x58, 0xc8, 0x42, + 0xc0, 0xd9, 0xf5, 0xd0, 0x67, 0xa0, 0xd2, 0xf2, 0x23, 0x31, 0x06, 0xc3, 0x46, 0x02, 0xc8, 0x4a, + 0x6d, 0xbd, 0xa1, 0xbe, 0x3f, 0xf9, 0x83, 0x93, 0x0a, 0x68, 0x9b, 0x2b, 0x9b, 0x94, 0x6c, 0x37, + 0x92, 0x9f, 0xec, 0x5b, 0x2c, 0x09, 0xc3, 0x49, 0x85, 0x6b, 0x59, 0x95, 0x91, 0xa7, 0xe1, 0xbf, + 0x62, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0xcc, 0x8f, 0xdb, 0x24, 0x0b, 0x4d, 0x16, 0x0e, 0x9a, 0xe9, + 0xe6, 0xca, 0x86, 0x53, 0x00, 0x6a, 0x74, 0x61, 0xe0, 0x8c, 0x5a, 0xe8, 0x26, 0x3d, 0x81, 0xf4, + 0x52, 0x61, 0xac, 0x2a, 0x19, 0xe6, 0x99, 0x1a, 0x69, 0x87, 0xa4, 0xe9, 0xc4, 0xa4, 0x65, 0x52, + 0xc4, 0xa9, 0x7a, 0xf4, 0x7e, 0x52, 0x99, 0x61, 0xc0, 0x8c, 0xf9, 0xd0, 0x9d, 0x1d, 0x86, 0xca, + 0x9a, 0x3b, 0x41, 0x14, 0xaf, 0x93, 0xf8, 0x7e, 0x10, 0xee, 0x8a, 0x10, 0x5b, 0x49, 0xb4, 0xc7, + 0x04, 0x84, 0x75, 0x3c, 0xca, 0x5b, 0xb2, 0x67, 0xd6, 0xd5, 0x1a, 0x7b, 0xf5, 0x2a, 0x27, 0xfb, + 0xe4, 0x26, 0x2f, 0xc6, 0x12, 0x2e, 0x51, 0x57, 0xeb, 0x4b, 0xec, 0x05, 0x2b, 0x85, 0xba, 0x5a, + 0x5f, 0xc2, 0x12, 0x8e, 0x48, 0x77, 0x72, 0xcd, 0x89, 0x7c, 0x4d, 0x61, 0xf7, 0x39, 0x3e, 0x60, + 0x7e, 0x4d, 0x1f, 0xa6, 0x54, 0x5a, 0x4f, 0x1e, 0x7b, 0x2c, 0x9a, 0x99, 0x64, 0x8b, 0x64, 0xf0, + 0xc0, 0x65, 0x4a, 0xf7, 0xba, 0x9a, 0xa2, 0x84, 0xbb, 0x68, 0x1b, 0x51, 0x38, 0xa6, 0xfa, 0x66, + 0xf6, 0x99, 0x87, 0x4a, 0xd4, 0xd9, 0x6c, 0x05, 0x7b, 0x8e, 0xeb, 0xb3, 0x07, 0x27, 0x8d, 0x71, + 0x69, 0x48, 0x00, 0x4e, 0x70, 0xd0, 0x0a, 0x94, 0x1d, 0xa9, 0x58, 0x45, 0xf9, 0x6e, 0xf9, 0x4a, + 0x9d, 0xca, 0x3d, 0x55, 0xa5, 0x2a, 0x55, 0xd5, 0x45, 0xaf, 0xc3, 0xb8, 0x70, 0x4c, 0xe2, 0xc1, + 0x0a, 0xd8, 0x83, 0x90, 0x66, 0x79, 0xde, 0xd0, 0x81, 0xd8, 0xc4, 0x45, 0x5f, 0x84, 0x09, 0x4a, + 0x25, 0x39, 0xd8, 0x66, 0xce, 0x0f, 0x72, 0x22, 0x6a, 0x19, 0x1b, 0xf4, 0xca, 0x38, 0x45, 0x0c, + 0xb5, 0xe0, 0x49, 0xa7, 0x13, 0x07, 0x4c, 0x39, 0x6d, 0xae, 0xff, 0x8d, 0x60, 0x97, 0xf8, 0xec, + 0x5d, 0xa8, 0xbc, 0x78, 0xe5, 0xe8, 0xb0, 0xfa, 0xe4, 0x42, 0x0f, 0x3c, 0xdc, 0x93, 0x0a, 0xba, + 0x0b, 0xa3, 0x71, 0xe0, 0x31, 0x1b, 0x70, 0xca, 0x03, 0x3c, 0x96, 0x1f, 0xc5, 0x66, 0x43, 0xa1, + 0xe9, 0x8a, 0x19, 0x55, 0x15, 0xeb, 0x74, 0xd0, 0x06, 0xdf, 0x63, 0x2c, 0xbe, 0x27, 0x89, 0x66, + 0x1e, 0xcf, 0x1f, 0x18, 0x15, 0x06, 0xd4, 0xdc, 0x82, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0x1b, 0x30, + 0xdd, 0x0e, 0xdd, 0x80, 0x2d, 0x6c, 0xf5, 0x30, 0x30, 0x63, 0x06, 0xe9, 0xaf, 0xa7, 0x11, 0x70, + 0x77, 0x1d, 0x2a, 0xb8, 0xc9, 0xc2, 0x99, 0x8b, 0x3c, 0xe3, 0x13, 0x67, 0x66, 0x79, 0x19, 0x56, + 0x50, 0xb4, 0xc6, 0xce, 0x65, 0x2e, 0x62, 0xcd, 0xcc, 0xe6, 0x87, 0x33, 0xd0, 0x45, 0x31, 0xce, + 0xe8, 0xa8, 0xbf, 0x38, 0xa1, 0x40, 0xef, 0x8d, 0x68, 0xc7, 0x09, 0x49, 0x3d, 0x0c, 0x9a, 0x84, + 0x77, 0x86, 0x9b, 0x9f, 0x3f, 0xc1, 0xc3, 0x10, 0xd2, 0x7b, 0xa3, 0x91, 0x85, 0x80, 0xb3, 0xeb, + 0xcd, 0x7e, 0x0f, 0x4c, 0x77, 0x9d, 0xe4, 0x27, 0x8a, 0x4d, 0xfd, 0xa7, 0x43, 0x50, 0x51, 0x8a, + 0x5f, 0x34, 0x6f, 0xea, 0xf3, 0x2f, 0xa6, 0xf5, 0xf9, 0x65, 0xca, 0x7f, 0xea, 0x2a, 0xfc, 0x0d, + 0xc3, 0xf0, 0xab, 0x90, 0x9f, 0x09, 0x4a, 0xd7, 0x7a, 0xf4, 0xf5, 0xf2, 0xd2, 0xe4, 0xf8, 0xe2, + 0xc0, 0x0f, 0x03, 0xa5, 0x9e, 0xaa, 0x81, 0x01, 0x13, 0xb1, 0x52, 0x51, 0xb7, 0x1d, 0xb4, 0x56, + 0xeb, 0xe9, 0xcc, 0x84, 0x75, 0x5a, 0x88, 0x39, 0x8c, 0x09, 0x2b, 0x94, 0xed, 0x60, 0xc2, 0xca, + 0xc8, 0x43, 0x0a, 0x2b, 0x92, 0x00, 0x4e, 0x68, 0x21, 0x0f, 0xa6, 0x9b, 0x66, 0x52, 0x49, 0xe5, + 0xd9, 0xf5, 0x74, 0xdf, 0xf4, 0x8e, 0x1d, 0x2d, 0x83, 0xd7, 0x52, 0x9a, 0x0a, 0xee, 0x26, 0x8c, + 0x5e, 0x87, 0xf2, 0x7b, 0x41, 0xc4, 0xb6, 0x85, 0xb8, 0x7b, 0xa5, 0x07, 0x4c, 0xf9, 0xad, 0x3b, + 0x0d, 0x56, 0x7e, 0x7c, 0x58, 0x1d, 0xad, 0x07, 0x2d, 0xf9, 0x17, 0xab, 0x0a, 0xe8, 0x01, 0x5c, + 0x30, 0x4e, 0x2c, 0xd5, 0x5d, 0x18, 0xbc, 0xbb, 0x97, 0x44, 0x73, 0x17, 0x56, 0xb3, 0x28, 0xe1, + 0xec, 0x06, 0xe8, 0x31, 0xe0, 0x07, 0x22, 0x21, 0xab, 0xbc, 0xdf, 0xd9, 0x35, 0x5e, 0xd1, 0xfd, + 0x9f, 0x53, 0x08, 0xb8, 0xbb, 0x8e, 0xfd, 0x75, 0xae, 0x27, 0x17, 0xda, 0x34, 0x12, 0x75, 0xbc, + 0xb3, 0xc8, 0xf7, 0xb3, 0x6c, 0x28, 0xfa, 0x1e, 0xfa, 0x2d, 0xe6, 0xb7, 0x2c, 0xf6, 0x16, 0xb3, + 0x41, 0xf6, 0xda, 0x9e, 0x13, 0x9f, 0x85, 0xe7, 0xc5, 0x5b, 0x50, 0x8e, 0x45, 0x6b, 0xbd, 0x52, + 0x14, 0x69, 0x9d, 0x62, 0xef, 0x51, 0xea, 0xe6, 0x97, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x05, 0x9f, + 0x01, 0x09, 0x39, 0x03, 0xa5, 0x4b, 0xcd, 0x54, 0xba, 0x54, 0xfb, 0x7c, 0x41, 0x8e, 0xf2, 0xe5, + 0x9f, 0x9b, 0xfd, 0x66, 0x42, 0xd6, 0x47, 0xfd, 0x11, 0xd0, 0xfe, 0x29, 0x0b, 0xce, 0x67, 0x59, + 0xcd, 0x50, 0x6e, 0x8d, 0x8b, 0x78, 0xea, 0x51, 0x54, 0x8d, 0xe0, 0x3d, 0x51, 0x8e, 0x15, 0xc6, + 0xc0, 0xd1, 0xff, 0x4f, 0x16, 0x22, 0xec, 0x0e, 0x8c, 0xd7, 0x43, 0xa2, 0xdd, 0x01, 0x6f, 0x70, + 0x57, 0x2a, 0xde, 0x9f, 0xe7, 0x4f, 0xec, 0x46, 0x65, 0xff, 0x62, 0x01, 0xce, 0xf3, 0x57, 0x8d, + 0x85, 0xfd, 0xc0, 0x6d, 0xd5, 0x83, 0x96, 0xc8, 0xdc, 0xf0, 0x05, 0x18, 0x6b, 0x6b, 0x72, 0x79, + 0xaf, 0x20, 0x45, 0xba, 0xfc, 0x9e, 0xc8, 0x47, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x16, 0x8c, 0x91, + 0x7d, 0xb7, 0xa9, 0x54, 0xe3, 0x85, 0x13, 0xdf, 0x0d, 0xaa, 0x95, 0x65, 0x8d, 0x0e, 0x36, 0xa8, + 0x3e, 0x82, 0x64, 0x5e, 0xf6, 0x4f, 0x5b, 0xf0, 0x78, 0x4e, 0x48, 0x23, 0xda, 0xdc, 0x7d, 0xf6, + 0x7e, 0x24, 0xf2, 0x02, 0xa9, 0xe6, 0xf8, 0xab, 0x12, 0x16, 0x50, 0xf4, 0x39, 0x00, 0xfe, 0x2a, + 0x44, 0xc5, 0x85, 0x7e, 0xb1, 0x5f, 0x8c, 0xb0, 0x15, 0x5a, 0xb8, 0x01, 0x59, 0x1f, 0x6b, 0xb4, + 0xec, 0x9f, 0x2f, 0xc2, 0x10, 0x7b, 0x85, 0x40, 0x2b, 0x30, 0xb2, 0xc3, 0x03, 0xf8, 0x0e, 0x12, + 0x2b, 0x38, 0x91, 0xbb, 0x78, 0x01, 0x96, 0x95, 0xd1, 0x1a, 0x9c, 0xe3, 0x01, 0x90, 0xbd, 0x1a, + 0xf1, 0x9c, 0x03, 0x29, 0xbe, 0xf3, 0x5c, 0x3a, 0x2a, 0x74, 0xc2, 0x6a, 0x37, 0x0a, 0xce, 0xaa, + 0x87, 0xde, 0x80, 0x89, 0xd8, 0xdd, 0x23, 0x41, 0x27, 0x96, 0x94, 0x78, 0xe8, 0x63, 0xc5, 0xec, + 0x6f, 0x18, 0x50, 0x9c, 0xc2, 0xa6, 0x82, 0x48, 0xbb, 0x4b, 0x51, 0xa1, 0x65, 0xa7, 0x37, 0x95, + 0x13, 0x26, 0x2e, 0x33, 0x97, 0xe9, 0x30, 0xe3, 0xa0, 0x8d, 0x9d, 0x90, 0x44, 0x3b, 0x81, 0xd7, + 0x12, 0xa9, 0x98, 0x13, 0x73, 0x99, 0x14, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0xb6, 0x1c, 0xd7, 0xeb, + 0x84, 0x24, 0xa1, 0x32, 0x6c, 0x52, 0x59, 0x49, 0xc1, 0x71, 0x57, 0x0d, 0xba, 0x8e, 0x2e, 0x88, + 0xdc, 0xc8, 0xd2, 0xa1, 0x5b, 0xd9, 0x40, 0x8d, 0x48, 0xd7, 0x96, 0x1e, 0x11, 0x4d, 0x84, 0x95, + 0x88, 0xca, 0xae, 0xac, 0x65, 0xde, 0x14, 0x4e, 0x2d, 0x92, 0xca, 0xc3, 0x64, 0xe8, 0xfd, 0x7d, + 0x0b, 0xce, 0x65, 0xd8, 0x5a, 0xf2, 0xa3, 0x6a, 0xdb, 0x8d, 0x62, 0x95, 0x2f, 0x44, 0x3b, 0xaa, + 0x78, 0x39, 0x56, 0x18, 0x74, 0x3f, 0xf0, 0xc3, 0x30, 0x7d, 0x00, 0x0a, 0x5b, 0x26, 0x01, 0x3d, + 0xd9, 0x01, 0x88, 0xae, 0x40, 0xa9, 0x13, 0x11, 0x19, 0x8b, 0x48, 0x9d, 0xdf, 0x4c, 0x55, 0xca, + 0x20, 0x94, 0x35, 0xdd, 0x56, 0x5a, 0x4a, 0x8d, 0x35, 0xe5, 0xaa, 0x47, 0x0e, 0xb3, 0xbf, 0x5a, + 0x84, 0x8b, 0xb9, 0xb6, 0xd4, 0xb4, 0x4b, 0x7b, 0x81, 0xef, 0xc6, 0x81, 0x7a, 0xe1, 0xe2, 0xd1, + 0x30, 0x48, 0x7b, 0x67, 0x4d, 0x94, 0x63, 0x85, 0x81, 0xae, 0xca, 0x2c, 0xdd, 0xe9, 0x8c, 0x28, + 0x8b, 0x35, 0x23, 0x51, 0xf7, 0xa0, 0xd9, 0xa6, 0x9e, 0x86, 0x52, 0x3b, 0x08, 0xbc, 0xf4, 0x61, + 0x44, 0xbb, 0x1b, 0x04, 0x1e, 0x66, 0x40, 0xf4, 0x09, 0x31, 0x0e, 0xa9, 0x27, 0x1d, 0xec, 0xb4, + 0x82, 0x48, 0x1b, 0x8c, 0x67, 0x61, 0x64, 0x97, 0x1c, 0x84, 0xae, 0xbf, 0x9d, 0x7e, 0xea, 0xbb, + 0xc5, 0x8b, 0xb1, 0x84, 0x9b, 0x09, 0x01, 0x46, 0x4e, 0x3b, 0x4d, 0x54, 0xb9, 0xef, 0xd5, 0xf6, + 0xa3, 0x45, 0x98, 0xc4, 0x8b, 0xb5, 0xef, 0x4c, 0xc4, 0xdd, 0xee, 0x89, 0x38, 0xed, 0x34, 0x51, + 0xfd, 0x67, 0xe3, 0x57, 0x2d, 0x98, 0x64, 0x41, 0x73, 0x45, 0x0c, 0x06, 0x37, 0xf0, 0xcf, 0x80, + 0x75, 0x7b, 0x1a, 0x86, 0x42, 0xda, 0x68, 0x3a, 0xf7, 0x0b, 0xeb, 0x09, 0xe6, 0x30, 0xf4, 0x24, + 0x94, 0x58, 0x17, 0xe8, 0xe4, 0x8d, 0xf1, 0xb0, 0xf9, 0x35, 0x27, 0x76, 0x30, 0x2b, 0x65, 0x8e, + 0xc5, 0x98, 0xb4, 0x3d, 0x97, 0x77, 0x3a, 0x51, 0xf5, 0x7f, 0x34, 0x1c, 0x8b, 0x33, 0xbb, 0xf6, + 0xc1, 0x1c, 0x8b, 0xb3, 0x49, 0xf6, 0x16, 0x8b, 0xfe, 0x67, 0x01, 0x2e, 0x67, 0xd6, 0x1b, 0xd8, + 0xb1, 0xb8, 0x77, 0xed, 0xd3, 0xb1, 0xd8, 0xc8, 0x36, 0xa4, 0x28, 0x9e, 0xa1, 0x21, 0x45, 0x69, + 0x50, 0xce, 0x71, 0x68, 0x00, 0x7f, 0xdf, 0xcc, 0x21, 0xfb, 0x88, 0xf8, 0xfb, 0x66, 0xf6, 0x2d, + 0x47, 0xac, 0xfb, 0xf3, 0x42, 0xce, 0xb7, 0x30, 0x01, 0xef, 0x1a, 0x3d, 0x67, 0x18, 0x30, 0x12, + 0x9c, 0xf0, 0x18, 0x3f, 0x63, 0x78, 0x19, 0x56, 0x50, 0xe4, 0x6a, 0x9e, 0xb3, 0x85, 0xfc, 0xcc, + 0x80, 0xb9, 0x4d, 0xcd, 0x99, 0x2f, 0x33, 0x6a, 0x08, 0x32, 0xbc, 0x68, 0xd7, 0x34, 0xa1, 0xbc, + 0x38, 0xb8, 0x50, 0x3e, 0x96, 0x2d, 0x90, 0xa3, 0x05, 0x98, 0xdc, 0x73, 0x7d, 0x96, 0xe9, 0xdd, + 0x64, 0x45, 0x55, 0x20, 0x89, 0x35, 0x13, 0x8c, 0xd3, 0xf8, 0xb3, 0xaf, 0xc3, 0xf8, 0xc3, 0xab, + 0x23, 0xbf, 0x55, 0x84, 0x27, 0x7a, 0x6c, 0x7b, 0x7e, 0xd6, 0x1b, 0x73, 0xa0, 0x9d, 0xf5, 0x5d, + 0xf3, 0x50, 0x87, 0xf3, 0x5b, 0x1d, 0xcf, 0x3b, 0x60, 0xb6, 0x8a, 0xa4, 0x25, 0x31, 0x04, 0xaf, + 0x28, 0xf3, 0xf7, 0x9f, 0x5f, 0xc9, 0xc0, 0xc1, 0x99, 0x35, 0xd1, 0x9b, 0x80, 0x02, 0x91, 0x96, + 0xf4, 0x06, 0xf1, 0x85, 0xbe, 0x9b, 0x0d, 0x7c, 0x31, 0xd9, 0x8c, 0x77, 0xba, 0x30, 0x70, 0x46, + 0x2d, 0xca, 0xf4, 0xd3, 0x5b, 0xe9, 0x40, 0x75, 0x2b, 0xc5, 0xf4, 0x63, 0x1d, 0x88, 0x4d, 0x5c, + 0x74, 0x03, 0xa6, 0x9d, 0x7d, 0xc7, 0xe5, 0x01, 0xd6, 0x24, 0x01, 0xce, 0xf5, 0x2b, 0x25, 0xd8, + 0x42, 0x1a, 0x01, 0x77, 0xd7, 0x49, 0xb9, 0xee, 0x0e, 0xe7, 0xbb, 0xee, 0xf6, 0x3e, 0x17, 0xfb, + 0xe9, 0x74, 0xed, 0xff, 0x62, 0xd1, 0xeb, 0x2b, 0x23, 0xb5, 0x38, 0x1d, 0x07, 0xa5, 0x9b, 0xd4, + 0xbc, 0x68, 0xd5, 0x38, 0x2c, 0xe9, 0x40, 0x6c, 0xe2, 0xf2, 0x05, 0x11, 0x25, 0x0e, 0x1d, 0x06, + 0xeb, 0x2e, 0xdc, 0xe4, 0x15, 0x06, 0xfa, 0x3c, 0x8c, 0xb4, 0xdc, 0x7d, 0x37, 0x0a, 0x42, 0xb1, + 0x59, 0x4e, 0x68, 0x16, 0x9f, 0x9c, 0x83, 0x35, 0x4e, 0x06, 0x4b, 0x7a, 0xf6, 0x8f, 0x16, 0x60, + 0x5c, 0xb6, 0xf8, 0x56, 0x27, 0x88, 0x9d, 0x33, 0xb8, 0x96, 0x6f, 0x18, 0xd7, 0xf2, 0x27, 0x7a, + 0xc5, 0x0a, 0x60, 0x5d, 0xca, 0xbd, 0x8e, 0xef, 0xa4, 0xae, 0xe3, 0x67, 0xfa, 0x93, 0xea, 0x7d, + 0x0d, 0xff, 0x4b, 0x0b, 0xa6, 0x0d, 0xfc, 0x33, 0xb8, 0x0d, 0x56, 0xcc, 0xdb, 0xe0, 0xa9, 0xbe, + 0xdf, 0x90, 0x73, 0x0b, 0x7c, 0xad, 0x90, 0xea, 0x3b, 0x3b, 0xfd, 0xdf, 0x83, 0xd2, 0x8e, 0x13, + 0xb6, 0x7a, 0xc5, 0x24, 0xed, 0xaa, 0x34, 0x77, 0xd3, 0x09, 0x5b, 0xfc, 0x0c, 0x7f, 0x5e, 0x25, + 0x33, 0x74, 0xc2, 0x56, 0x5f, 0xff, 0x25, 0xd6, 0x14, 0x7a, 0x0d, 0x86, 0xa3, 0x66, 0xd0, 0x56, + 0xd6, 0x85, 0x57, 0x78, 0xa2, 0x43, 0x5a, 0x72, 0x7c, 0x58, 0x45, 0x66, 0x73, 0xb4, 0x18, 0x0b, + 0xfc, 0xd9, 0x6d, 0xa8, 0xa8, 0xa6, 0x1f, 0xa9, 0x6f, 0xc8, 0x7f, 0x2a, 0xc2, 0xb9, 0x8c, 0x75, + 0x81, 0x22, 0x63, 0xb4, 0x5e, 0x1c, 0x70, 0x39, 0x7d, 0xc0, 0xf1, 0x8a, 0x98, 0xc4, 0xd2, 0x12, + 0xf3, 0x3f, 0x70, 0xa3, 0x77, 0x23, 0x92, 0x6e, 0x94, 0x16, 0xf5, 0x6f, 0x94, 0x36, 0x76, 0x66, + 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa4, 0x73, 0xfa, 0x27, 0x45, 0x38, 0x9f, 0x15, 0x62, 0x04, + 0xfd, 0x40, 0x2a, 0x2b, 0xc9, 0xcb, 0x83, 0x06, 0x27, 0xe1, 0xa9, 0x4a, 0x44, 0x8e, 0xdd, 0x39, + 0x33, 0x4f, 0x49, 0xdf, 0x61, 0x16, 0x6d, 0x32, 0x87, 0xc2, 0x90, 0x67, 0x93, 0x91, 0x5b, 0xfc, + 0x53, 0x03, 0x77, 0x40, 0xa4, 0xa1, 0x89, 0x52, 0x0e, 0x85, 0xb2, 0xb8, 0xbf, 0x43, 0xa1, 0x6c, + 0x79, 0xd6, 0x85, 0x51, 0xed, 0x6b, 0x1e, 0xe9, 0x8c, 0xef, 0xd2, 0x1b, 0x45, 0xeb, 0xf7, 0x23, + 0x9d, 0xf5, 0x9f, 0xb6, 0x20, 0x65, 0xd3, 0xa7, 0x54, 0x52, 0x56, 0xae, 0x4a, 0xea, 0x0a, 0x94, + 0xc2, 0xc0, 0x23, 0xe9, 0x44, 0x21, 0x38, 0xf0, 0x08, 0x66, 0x10, 0x8a, 0x11, 0x27, 0x0a, 0x89, + 0x31, 0x5d, 0xd8, 0x12, 0x62, 0xd4, 0xd3, 0x30, 0xe4, 0x91, 0x7d, 0xe2, 0xa5, 0xa3, 0x70, 0xdf, + 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2d, 0xc1, 0xa5, 0x9e, 0x2e, 0xb9, 0x54, 0x64, 0xd9, 0x76, + 0x62, 0x72, 0xdf, 0x39, 0x48, 0x87, 0xcb, 0xbd, 0xc1, 0x8b, 0xb1, 0x84, 0x33, 0x0b, 0x64, 0x1e, + 0x1e, 0x2f, 0xa5, 0xc0, 0x13, 0x51, 0xf1, 0x04, 0xf4, 0x11, 0xe4, 0x17, 0xbf, 0x0e, 0x10, 0x45, + 0xde, 0xb2, 0x4f, 0x39, 0xb0, 0x96, 0x30, 0x6d, 0x4e, 0xc2, 0x28, 0x36, 0x6e, 0x0b, 0x08, 0xd6, + 0xb0, 0x50, 0x0d, 0xa6, 0xda, 0x61, 0x10, 0x73, 0x7d, 0x68, 0x8d, 0x1b, 0xc9, 0x0c, 0x99, 0xde, + 0x90, 0xf5, 0x14, 0x1c, 0x77, 0xd5, 0x40, 0xaf, 0xc0, 0xa8, 0xf0, 0x90, 0xac, 0x07, 0x81, 0x27, + 0x54, 0x35, 0xca, 0xe4, 0xa2, 0x91, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0x4a, 0xd6, 0x91, 0xcc, + 0x6a, 0x5c, 0xd1, 0xaa, 0xe1, 0xa5, 0xc2, 0x0d, 0x95, 0x07, 0x0a, 0x37, 0x94, 0x28, 0xaf, 0x2a, + 0x03, 0xbf, 0x2b, 0x41, 0x5f, 0x75, 0xcf, 0x2f, 0x95, 0xe0, 0x9c, 0x58, 0x38, 0x8f, 0x7a, 0xb9, + 0x3c, 0xa2, 0x2c, 0xe8, 0xdf, 0x59, 0x33, 0x67, 0xbd, 0x66, 0xbe, 0x5e, 0x84, 0x61, 0x3e, 0x15, + 0x67, 0xc0, 0xc3, 0xaf, 0x08, 0xa5, 0x5f, 0x8f, 0x40, 0x3b, 0xbc, 0x2f, 0x73, 0x35, 0x27, 0x76, + 0xf8, 0xfd, 0xa5, 0x8e, 0xd1, 0x44, 0x3d, 0x88, 0xe6, 0x8c, 0x83, 0x76, 0x36, 0xa5, 0xd5, 0x02, + 0x4e, 0x43, 0x3b, 0x76, 0xbf, 0x04, 0x10, 0xb1, 0x4c, 0xdc, 0x94, 0x86, 0x08, 0xd9, 0xf4, 0xc9, + 0x1e, 0xad, 0x37, 0x14, 0x32, 0xef, 0x43, 0xb2, 0x04, 0x15, 0x00, 0x6b, 0x14, 0x67, 0x5f, 0x85, + 0x8a, 0x42, 0xee, 0xa7, 0x02, 0x18, 0xd3, 0x6f, 0xbd, 0xcf, 0xc2, 0x64, 0xaa, 0xad, 0x13, 0x69, + 0x10, 0x7e, 0xcd, 0x82, 0x49, 0xde, 0xe5, 0x65, 0x7f, 0x5f, 0x6c, 0xf6, 0xf7, 0xe1, 0xbc, 0x97, + 0xb1, 0xe9, 0xc4, 0x8c, 0x0e, 0xbe, 0x49, 0x95, 0xc6, 0x20, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0xd7, + 0xa0, 0xcc, 0x5d, 0x76, 0x1c, 0x4f, 0xb8, 0x59, 0x8c, 0xf1, 0x28, 0xf9, 0xbc, 0x0c, 0x2b, 0xa8, + 0xfd, 0xbb, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x22, 0x07, 0x4a, 0x3a, 0xfe, 0x30, 0xfb, 0x2e, 0x92, + 0x00, 0x14, 0x72, 0x92, 0x00, 0xe8, 0x9f, 0x56, 0xec, 0xf9, 0x69, 0xbf, 0x68, 0x81, 0x58, 0x81, + 0x67, 0x20, 0x07, 0x7e, 0x8f, 0x29, 0x07, 0xce, 0xe6, 0x2f, 0xea, 0x1c, 0x01, 0xf0, 0xcf, 0x2c, + 0x98, 0xe2, 0x08, 0xc9, 0x43, 0xe4, 0x87, 0x3a, 0x0f, 0x83, 0x64, 0xf3, 0x52, 0xe9, 0x7b, 0xb3, + 0x3f, 0xca, 0x98, 0xac, 0x52, 0xcf, 0xc9, 0x6a, 0xc9, 0x0d, 0x74, 0x82, 0x2c, 0x75, 0x27, 0x0e, + 0xa6, 0x6b, 0xff, 0xb1, 0x05, 0x88, 0x37, 0x63, 0xdc, 0xcb, 0xf4, 0xb6, 0x63, 0xa5, 0x9a, 0x26, + 0x28, 0x39, 0x6a, 0x14, 0x04, 0x6b, 0x58, 0xa7, 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xff, 0xd7, + 0xe4, 0x13, 0x8c, 0xe8, 0xd7, 0x4b, 0x90, 0xb6, 0xd1, 0x46, 0xf7, 0x60, 0xac, 0xe9, 0xb4, 0x9d, + 0x4d, 0xd7, 0x73, 0x63, 0x97, 0x44, 0xbd, 0xcc, 0x50, 0x96, 0x34, 0x3c, 0xf1, 0x4e, 0xa8, 0x95, + 0x60, 0x83, 0x0e, 0x9a, 0x03, 0x68, 0x87, 0xee, 0xbe, 0xeb, 0x91, 0x6d, 0x26, 0x0a, 0x33, 0xc7, + 0x2e, 0x6e, 0x5b, 0x21, 0x4b, 0xb1, 0x86, 0x91, 0xe1, 0x08, 0x54, 0x7c, 0x74, 0x8e, 0x40, 0xa5, + 0x13, 0x3a, 0x02, 0x0d, 0x0d, 0xe4, 0x08, 0x84, 0xe1, 0x31, 0x79, 0x77, 0xd3, 0xff, 0x2b, 0xae, + 0x47, 0x04, 0xc3, 0xc6, 0xdd, 0xbd, 0x66, 0x8f, 0x0e, 0xab, 0x8f, 0xe1, 0x4c, 0x0c, 0x9c, 0x53, + 0x13, 0x7d, 0x0e, 0x66, 0x1c, 0xcf, 0x0b, 0xee, 0xab, 0x51, 0x5b, 0x8e, 0x9a, 0x8e, 0xc7, 0xd5, + 0xbd, 0x23, 0x8c, 0xea, 0x93, 0x47, 0x87, 0xd5, 0x99, 0x85, 0x1c, 0x1c, 0x9c, 0x5b, 0x3b, 0xe5, + 0x47, 0x54, 0xee, 0xeb, 0x47, 0xb4, 0x0b, 0xe7, 0x1a, 0x24, 0x74, 0x59, 0x0e, 0xbd, 0x56, 0xb2, + 0x25, 0x37, 0xa0, 0x12, 0xa6, 0x0e, 0xa1, 0x81, 0x62, 0xc0, 0x68, 0x71, 0x42, 0xe5, 0xa1, 0x93, + 0x10, 0xb2, 0xff, 0xd4, 0x82, 0x11, 0x61, 0x27, 0x7e, 0x06, 0xbc, 0xcf, 0x82, 0xa1, 0xbf, 0xac, + 0x66, 0x1f, 0xd4, 0xac, 0x33, 0xb9, 0x9a, 0xcb, 0xd5, 0x94, 0xe6, 0xf2, 0xa9, 0x5e, 0x44, 0x7a, + 0xeb, 0x2c, 0x7f, 0xb2, 0x08, 0x13, 0xa6, 0x8d, 0xfc, 0x19, 0x0c, 0xc1, 0x3a, 0x8c, 0x44, 0xc2, + 0x21, 0xa3, 0x90, 0x6f, 0x38, 0x9b, 0x9e, 0xc4, 0xc4, 0x2a, 0x46, 0xb8, 0x60, 0x48, 0x22, 0x99, + 0x9e, 0x1e, 0xc5, 0x47, 0xe8, 0xe9, 0xd1, 0xcf, 0x4d, 0xa1, 0x74, 0x1a, 0x6e, 0x0a, 0xf6, 0x37, + 0xd8, 0x65, 0xa1, 0x97, 0x9f, 0x01, 0x1f, 0x71, 0xc3, 0xbc, 0x56, 0xec, 0x1e, 0x2b, 0x4b, 0x74, + 0x2a, 0x87, 0x9f, 0xf8, 0x15, 0x0b, 0x2e, 0x65, 0x7c, 0x95, 0xc6, 0x5c, 0x3c, 0x0f, 0x65, 0xa7, + 0xd3, 0x72, 0xd5, 0x5e, 0xd6, 0x5e, 0x31, 0x16, 0x44, 0x39, 0x56, 0x18, 0x68, 0x09, 0xa6, 0xc9, + 0x83, 0xb6, 0xcb, 0x9f, 0x91, 0x74, 0xd3, 0xb5, 0x22, 0x8f, 0x81, 0xb9, 0x9c, 0x06, 0xe2, 0x6e, + 0x7c, 0xe5, 0x39, 0x5a, 0xcc, 0xf5, 0x1c, 0xfd, 0x27, 0x16, 0x8c, 0x8a, 0x6e, 0x9f, 0xc1, 0x68, + 0x7f, 0xaf, 0x39, 0xda, 0x4f, 0xf4, 0x18, 0xed, 0x9c, 0x61, 0xfe, 0x7b, 0x05, 0xd5, 0xdf, 0x7a, + 0x10, 0xc6, 0x03, 0x30, 0x2d, 0xaf, 0x41, 0x99, 0x0a, 0xbd, 0x41, 0x33, 0xf0, 0x04, 0xcf, 0xf2, + 0x64, 0xe2, 0xd8, 0xcc, 0xcb, 0x8f, 0xb5, 0xdf, 0x58, 0x61, 0xb3, 0xd1, 0x0b, 0xc2, 0x58, 0xf0, + 0x09, 0xc9, 0xe8, 0x05, 0x61, 0x8c, 0x19, 0x04, 0xb5, 0x00, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, + 0x13, 0x31, 0x12, 0xf2, 0x0f, 0x8f, 0x4e, 0xec, 0x7a, 0x73, 0xae, 0x1f, 0x47, 0x71, 0x38, 0xb7, + 0xea, 0xc7, 0x77, 0x42, 0x2e, 0x02, 0x69, 0x9e, 0xca, 0x8a, 0x16, 0xd6, 0xe8, 0x4a, 0xd7, 0x37, + 0xd6, 0xc6, 0x90, 0xf9, 0x1e, 0xba, 0x2e, 0xca, 0xb1, 0xc2, 0xb0, 0x5f, 0x65, 0x57, 0x09, 0x1b, + 0xa0, 0x93, 0x39, 0x11, 0x7f, 0xb3, 0xac, 0x86, 0x96, 0x3d, 0x86, 0xd4, 0x74, 0x57, 0xe5, 0xde, + 0x27, 0x37, 0x6d, 0x58, 0x77, 0xa3, 0x48, 0xfc, 0x99, 0xd1, 0xf7, 0x75, 0x3d, 0x93, 0xbf, 0xd0, + 0xe7, 0x0a, 0x38, 0xc1, 0xc3, 0x38, 0x8b, 0xcb, 0xcb, 0xe2, 0x97, 0xae, 0xd6, 0xc5, 0x22, 0xd7, + 0xe2, 0xf2, 0x0a, 0x00, 0x4e, 0x70, 0xd0, 0xbc, 0x10, 0xa0, 0x4b, 0x46, 0xfa, 0x2c, 0x29, 0x40, + 0xcb, 0xcf, 0xd7, 0x24, 0xe8, 0x17, 0x61, 0x54, 0xa5, 0xd1, 0xaa, 0xf3, 0x6c, 0x44, 0x22, 0x62, + 0xc4, 0x72, 0x52, 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x93, 0x11, 0x4f, 0x28, 0xa6, 0x02, 0x83, 0x71, + 0xf5, 0xc8, 0x27, 0xe5, 0xf3, 0x7a, 0xc3, 0x04, 0x1f, 0xb3, 0x22, 0x7e, 0x74, 0x48, 0xff, 0xb5, + 0x34, 0x09, 0xf4, 0x06, 0x4c, 0x78, 0x7a, 0x32, 0xea, 0xba, 0xd0, 0x9e, 0x28, 0xeb, 0x53, 0x23, + 0x55, 0x75, 0x1d, 0xa7, 0xb0, 0x29, 0xaf, 0xa3, 0x97, 0x88, 0x60, 0x76, 0x8e, 0xbf, 0x4d, 0x22, + 0x91, 0x04, 0x88, 0xf1, 0x3a, 0xb7, 0x73, 0x70, 0x70, 0x6e, 0x6d, 0xf4, 0x1a, 0x8c, 0xc9, 0xcf, + 0xd7, 0xbc, 0x33, 0x13, 0x1b, 0x67, 0x0d, 0x86, 0x0d, 0x4c, 0x74, 0x1f, 0x2e, 0xc8, 0xff, 0x1b, + 0xa1, 0xb3, 0xb5, 0xe5, 0x36, 0x85, 0x73, 0x2c, 0x77, 0xf4, 0x58, 0x90, 0x9e, 0x23, 0xcb, 0x59, + 0x48, 0xc7, 0x87, 0xd5, 0x2b, 0x62, 0xd4, 0x32, 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x35, 0x38, + 0xb7, 0x43, 0x1c, 0x2f, 0xde, 0x59, 0xda, 0x21, 0xcd, 0x5d, 0xb9, 0x89, 0x98, 0xcf, 0xa7, 0x66, + 0x19, 0x7c, 0xb3, 0x1b, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x4c, 0xbb, 0xb3, 0xe9, 0xb9, 0xd1, + 0xce, 0x7a, 0x10, 0xb3, 0x17, 0x7d, 0x95, 0x85, 0x4a, 0x38, 0x87, 0x2a, 0x7f, 0xd7, 0x7a, 0x0e, + 0x1e, 0xce, 0xa5, 0x80, 0xde, 0x87, 0x0b, 0xa9, 0xc5, 0x20, 0x5c, 0xd5, 0x26, 0xf2, 0x43, 0x83, + 0x36, 0xb2, 0x2a, 0x08, 0xd7, 0xb3, 0x2c, 0x10, 0xce, 0x6e, 0xe2, 0x83, 0xd9, 0x79, 0xbc, 0x47, + 0x2b, 0x6b, 0x4c, 0x19, 0xfa, 0x32, 0x8c, 0xe9, 0xab, 0x48, 0x5c, 0x30, 0x57, 0xfb, 0x25, 0x5e, + 0x17, 0x2c, 0x9d, 0x5a, 0x51, 0x3a, 0x0c, 0x1b, 0x14, 0x6d, 0x02, 0xd9, 0xdf, 0x87, 0x6e, 0x43, + 0xb9, 0xe9, 0xb9, 0xc4, 0x8f, 0x57, 0xeb, 0xbd, 0x82, 0x20, 0x2c, 0x09, 0x1c, 0x31, 0x60, 0x22, + 0x96, 0x22, 0x2f, 0xc3, 0x8a, 0x82, 0xfd, 0x9b, 0x05, 0xa8, 0xf6, 0x09, 0xcc, 0x99, 0x52, 0x75, + 0x5a, 0x03, 0xa9, 0x3a, 0x17, 0x64, 0x4e, 0xad, 0xf5, 0x94, 0x98, 0x9d, 0xca, 0x97, 0x95, 0x08, + 0xdb, 0x69, 0xfc, 0x81, 0xcd, 0x43, 0x75, 0x6d, 0x69, 0xa9, 0xaf, 0xe1, 0xb2, 0xf1, 0x4a, 0x32, + 0x34, 0xb8, 0x20, 0x92, 0xab, 0xf1, 0xb6, 0xbf, 0x51, 0x80, 0x0b, 0x6a, 0x08, 0xbf, 0x7d, 0x07, + 0xee, 0x6e, 0xf7, 0xc0, 0x9d, 0xc2, 0x7b, 0x81, 0x7d, 0x07, 0x86, 0x1b, 0x07, 0x51, 0x33, 0xf6, + 0x06, 0x60, 0x80, 0x9e, 0x36, 0x83, 0x01, 0xa9, 0x6b, 0xda, 0x08, 0x08, 0xf4, 0xd7, 0x2c, 0x98, + 0xdc, 0x58, 0xaa, 0x37, 0x82, 0xe6, 0x2e, 0x89, 0x17, 0x38, 0xc3, 0x8a, 0x05, 0xff, 0x63, 0x3d, + 0x24, 0x5f, 0x93, 0xc5, 0x31, 0x5d, 0x81, 0xd2, 0x4e, 0x10, 0xc5, 0xe9, 0xc7, 0xc4, 0x9b, 0x41, + 0x14, 0x63, 0x06, 0xb1, 0x7f, 0xcf, 0x82, 0x21, 0x96, 0x09, 0xb2, 0x5f, 0x7a, 0xd2, 0x41, 0xbe, + 0x0b, 0xbd, 0x02, 0xc3, 0x64, 0x6b, 0x8b, 0x34, 0x63, 0x31, 0xab, 0xd2, 0x1b, 0x71, 0x78, 0x99, + 0x95, 0xd2, 0x4b, 0x9f, 0x35, 0xc6, 0xff, 0x62, 0x81, 0x8c, 0xde, 0x86, 0x4a, 0xec, 0xee, 0x91, + 0x85, 0x56, 0x4b, 0x3c, 0xc7, 0x3c, 0x84, 0xf3, 0xe7, 0x86, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x57, + 0x0b, 0x00, 0x89, 0x47, 0x75, 0xbf, 0x4f, 0x5c, 0xec, 0xca, 0xc0, 0x7a, 0x35, 0x23, 0x03, 0x2b, + 0x4a, 0x08, 0x66, 0xe4, 0x5f, 0x55, 0xc3, 0x54, 0x1c, 0x68, 0x98, 0x4a, 0x27, 0x19, 0xa6, 0x25, + 0x98, 0x4e, 0x3c, 0xc2, 0xcd, 0xf0, 0x18, 0x4c, 0x48, 0xd9, 0x48, 0x03, 0x71, 0x37, 0xbe, 0xfd, + 0xc3, 0x16, 0x08, 0xaf, 0x8a, 0x01, 0x16, 0xf3, 0x17, 0x64, 0xfe, 0x42, 0x23, 0xbe, 0xef, 0x95, + 0x7c, 0x37, 0x13, 0x11, 0xd5, 0x57, 0x5d, 0x1e, 0x46, 0x2c, 0x5f, 0x83, 0x96, 0xdd, 0x02, 0x01, + 0xad, 0x11, 0xa6, 0x1b, 0xe9, 0xdf, 0x9b, 0xeb, 0x00, 0x2d, 0x86, 0xab, 0x65, 0x31, 0x53, 0x47, + 0x55, 0x4d, 0x41, 0xb0, 0x86, 0x65, 0xff, 0x78, 0x01, 0x46, 0x65, 0x3c, 0xd9, 0x8e, 0x3f, 0x88, + 0x04, 0x73, 0xa2, 0x64, 0x12, 0x2c, 0xed, 0x1f, 0x25, 0x5c, 0x4f, 0x04, 0xbf, 0x24, 0xed, 0x9f, + 0x04, 0xe0, 0x04, 0x07, 0x3d, 0x0b, 0x23, 0x51, 0x67, 0x93, 0xa1, 0xa7, 0x7c, 0x05, 0x1a, 0xbc, + 0x18, 0x4b, 0x38, 0xfa, 0x1c, 0x4c, 0xf1, 0x7a, 0x61, 0xd0, 0x76, 0xb6, 0xb9, 0xa2, 0x6c, 0x48, + 0x39, 0xef, 0x4d, 0xad, 0xa5, 0x60, 0xc7, 0x87, 0xd5, 0xf3, 0xe9, 0x32, 0xa6, 0x62, 0xed, 0xa2, + 0x62, 0x7f, 0x19, 0x50, 0x77, 0x88, 0x5c, 0xf4, 0x26, 0xb7, 0x06, 0x71, 0x43, 0x95, 0xb5, 0xfc, + 0x4a, 0x3f, 0x5f, 0x33, 0x69, 0xaf, 0xcb, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0x9b, 0x45, 0x98, 0x4a, + 0x7b, 0x1e, 0xa1, 0x9b, 0x30, 0xcc, 0x0f, 0xd5, 0x5e, 0x49, 0xd1, 0xd3, 0xcf, 0x04, 0x3c, 0x7e, + 0xbf, 0x38, 0x97, 0x45, 0x7d, 0xf4, 0x0e, 0x8c, 0xb6, 0x82, 0xfb, 0xfe, 0x7d, 0x27, 0x6c, 0x2d, + 0xd4, 0x57, 0xc5, 0xba, 0xcc, 0xe4, 0xcd, 0x6a, 0x09, 0x9a, 0xee, 0x03, 0xc5, 0xd4, 0xd0, 0x09, + 0x08, 0xeb, 0xe4, 0xd0, 0x86, 0x9e, 0x2f, 0xbe, 0x87, 0xf9, 0x9e, 0x4a, 0x08, 0xaf, 0x51, 0xce, + 0xcd, 0x14, 0x8f, 0x7e, 0x00, 0xce, 0x45, 0x39, 0xea, 0x9c, 0xbc, 0x88, 0xe9, 0xbd, 0x34, 0x1c, + 0x8b, 0x8f, 0x53, 0xae, 0x39, 0x4b, 0xf1, 0x93, 0xd5, 0x8c, 0xfd, 0x95, 0x73, 0x60, 0xec, 0x46, + 0x23, 0x59, 0x86, 0x75, 0x4a, 0xc9, 0x32, 0x30, 0x94, 0xc9, 0x5e, 0x3b, 0x3e, 0xa8, 0xb9, 0x61, + 0xaf, 0x6c, 0x4b, 0xcb, 0x02, 0xa7, 0x9b, 0xa6, 0x84, 0x60, 0x45, 0x27, 0x3b, 0xa3, 0x49, 0xf1, + 0x43, 0xcc, 0x68, 0x52, 0x3a, 0xc3, 0x8c, 0x26, 0xeb, 0x30, 0xb2, 0xed, 0xc6, 0x98, 0xb4, 0x03, + 0xc1, 0xce, 0x64, 0xae, 0xc3, 0x1b, 0x1c, 0xa5, 0x3b, 0x9e, 0xbe, 0x00, 0x60, 0x49, 0x04, 0xbd, + 0xa9, 0x76, 0xe0, 0x70, 0xbe, 0x34, 0xd0, 0xfd, 0x86, 0x94, 0xb9, 0x07, 0x45, 0x06, 0x93, 0x91, + 0x87, 0xcd, 0x60, 0xb2, 0x22, 0xf3, 0x8e, 0x94, 0xf3, 0x6d, 0x6d, 0x59, 0x5a, 0x91, 0x3e, 0xd9, + 0x46, 0x8c, 0x0c, 0x2d, 0x95, 0xd3, 0xcb, 0xd0, 0xf2, 0xc3, 0x16, 0x5c, 0x68, 0x67, 0x25, 0x2b, + 0x12, 0x79, 0x43, 0x5e, 0x19, 0x38, 0x1b, 0x93, 0xd1, 0x20, 0x13, 0x0b, 0x33, 0xd1, 0x70, 0x76, + 0x73, 0x74, 0xa0, 0xc3, 0xcd, 0x96, 0x48, 0x36, 0xf2, 0x74, 0x4e, 0xaa, 0x97, 0x1e, 0x09, 0x5e, + 0x36, 0x32, 0x12, 0x8c, 0x7c, 0x3c, 0x2f, 0xc1, 0xc8, 0xc0, 0x69, 0x45, 0x92, 0x24, 0x2f, 0xe3, + 0x1f, 0x38, 0xc9, 0xcb, 0x9b, 0x2a, 0xc9, 0x4b, 0x8f, 0x50, 0x4d, 0x3c, 0x85, 0x4b, 0xdf, 0xd4, + 0x2e, 0x5a, 0x7a, 0x96, 0xc9, 0xd3, 0x49, 0xcf, 0x62, 0x5c, 0x35, 0x3c, 0x43, 0xc8, 0x73, 0x7d, + 0xae, 0x1a, 0x83, 0x6e, 0xef, 0xcb, 0x86, 0xa7, 0xa2, 0x99, 0x7e, 0xa8, 0x54, 0x34, 0xf7, 0xf4, + 0xd4, 0x2e, 0xa8, 0x4f, 0xee, 0x12, 0x8a, 0x34, 0x60, 0x42, 0x97, 0x7b, 0xfa, 0x05, 0x78, 0x2e, + 0x9f, 0xae, 0xba, 0xe7, 0xba, 0xe9, 0x66, 0x5e, 0x81, 0x5d, 0x89, 0x62, 0xce, 0x9f, 0x4d, 0xa2, + 0x98, 0x0b, 0xa7, 0x9e, 0x28, 0xe6, 0xb1, 0x33, 0x48, 0x14, 0xf3, 0xf8, 0x87, 0x9a, 0x28, 0x66, + 0xe6, 0x11, 0x24, 0x8a, 0x59, 0x4f, 0x12, 0xc5, 0x5c, 0xcc, 0x9f, 0x92, 0x0c, 0xe3, 0xc2, 0x9c, + 0xf4, 0x30, 0xf7, 0xa0, 0xd2, 0x96, 0xae, 0xf1, 0x22, 0x96, 0x54, 0x76, 0x86, 0xca, 0x2c, 0xff, + 0x79, 0x3e, 0x25, 0x0a, 0x84, 0x13, 0x52, 0x94, 0x6e, 0x92, 0x2e, 0xe6, 0x89, 0x1e, 0x8a, 0xbf, + 0x2c, 0x95, 0x4a, 0x7e, 0x92, 0x18, 0xfb, 0xaf, 0x17, 0xe0, 0x72, 0xef, 0x75, 0x9d, 0xe8, 0x63, + 0xea, 0xc9, 0xfb, 0x41, 0x4a, 0x1f, 0xc3, 0x85, 0x9c, 0x04, 0x6b, 0xe0, 0xf8, 0x21, 0x37, 0x60, + 0x5a, 0x59, 0x15, 0x7a, 0x6e, 0xf3, 0x40, 0xcb, 0x5c, 0xa9, 0x3c, 0x9c, 0x1a, 0x69, 0x04, 0xdc, + 0x5d, 0x07, 0x2d, 0xc0, 0xa4, 0x51, 0xb8, 0x5a, 0x13, 0xc2, 0x8c, 0x52, 0x00, 0x35, 0x4c, 0x30, + 0x4e, 0xe3, 0xdb, 0x5f, 0xb3, 0xe0, 0xf1, 0x9c, 0x18, 0xea, 0x03, 0x87, 0xc7, 0xd8, 0x82, 0xc9, + 0xb6, 0x59, 0xb5, 0x4f, 0x14, 0x1d, 0x23, 0x52, 0xbb, 0xea, 0x6b, 0x0a, 0x80, 0xd3, 0x44, 0x17, + 0xaf, 0xfd, 0xf6, 0x1f, 0x5c, 0xfe, 0xd8, 0xef, 0xfc, 0xc1, 0xe5, 0x8f, 0xfd, 0xee, 0x1f, 0x5c, + 0xfe, 0xd8, 0x5f, 0x3a, 0xba, 0x6c, 0xfd, 0xf6, 0xd1, 0x65, 0xeb, 0x77, 0x8e, 0x2e, 0x5b, 0xbf, + 0x7b, 0x74, 0xd9, 0xfa, 0xfd, 0xa3, 0xcb, 0xd6, 0x57, 0xff, 0xf0, 0xf2, 0xc7, 0xbe, 0x50, 0xd8, + 0x7f, 0xf1, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x30, 0xb0, 0xa9, 0x66, 0x94, 0xe0, 0x00, 0x00, } diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index a92d7392c6e..fcdba32143d 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -3986,6 +3986,32 @@ message ServiceAccountList { repeated ServiceAccount items = 2; } +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +message ServiceAccountTokenProjection { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + // +optional + optional string audience = 1; + + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + // +optional + optional int64 expirationSeconds = 2; + + // Path is the path relative to the mount point of the file to project the + // token into. + optional string path = 3; +} + // ServiceList holds a list of services. message ServiceList { // Standard list metadata. @@ -4372,13 +4398,20 @@ message VolumeNodeAffinity { // Projection that may be projected along with other supported volume types message VolumeProjection { // information about the secret data to project + // +optional optional SecretProjection secret = 1; // information about the downwardAPI data to project + // +optional optional DownwardAPIProjection downwardAPI = 2; // information about the configMap data to project + // +optional optional ConfigMapProjection configMap = 3; + + // information about the serviceAccountToken data to project + // +optional + optional ServiceAccountTokenProjection serviceAccountToken = 4; } // Represents the source of a volume to mount. diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 567f37dd22d..3df3461c202 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1988,6 +1988,17 @@ func (ServiceAccountList) SwaggerDoc() map[string]string { return map_ServiceAccountList } +var map_ServiceAccountTokenProjection = map[string]string{ + "": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + "audience": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "path": "Path is the path relative to the mount point of the file to project the token into.", +} + +func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { + return map_ServiceAccountTokenProjection +} + var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", @@ -2172,10 +2183,11 @@ func (VolumeNodeAffinity) SwaggerDoc() map[string]string { } var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", - "secret": "information about the secret data to project", - "downwardAPI": "information about the downwardAPI data to project", - "configMap": "information about the configMap data to project", + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", + "serviceAccountToken": "information about the serviceAccountToken data to project", } func (VolumeProjection) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index 0fbcc9a7f31..907dcccea15 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -5159,6 +5159,31 @@ func (in *ServiceAccountList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in @@ -5568,6 +5593,15 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { (*in).DeepCopyInto(*out) } } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + if *in == nil { + *out = nil + } else { + *out = new(ServiceAccountTokenProjection) + (*in).DeepCopyInto(*out) + } + } return } From 212a16eccc7de3f90c0b2b86bee11b8b602d5c41 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Mon, 9 Apr 2018 17:36:08 -0700 Subject: [PATCH 280/307] add utils to patch pod status --- pkg/util/BUILD | 1 + pkg/util/pod/BUILD | 39 +++++++++++++ pkg/util/pod/pod.go | 63 +++++++++++++++++++++ pkg/util/pod/pod_test.go | 116 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 219 insertions(+) create mode 100644 pkg/util/pod/BUILD create mode 100644 pkg/util/pod/pod.go create mode 100644 pkg/util/pod/pod_test.go diff --git a/pkg/util/BUILD b/pkg/util/BUILD index 1622fe7150d..27bee9d6cf6 100644 --- a/pkg/util/BUILD +++ b/pkg/util/BUILD @@ -45,6 +45,7 @@ filegroup( "//pkg/util/nsenter:all-srcs", "//pkg/util/oom:all-srcs", "//pkg/util/parsers:all-srcs", + "//pkg/util/pod:all-srcs", "//pkg/util/pointer:all-srcs", "//pkg/util/procfs:all-srcs", "//pkg/util/reflector/prometheus:all-srcs", diff --git a/pkg/util/pod/BUILD b/pkg/util/pod/BUILD new file mode 100644 index 00000000000..57a081ab7c6 --- /dev/null +++ b/pkg/util/pod/BUILD @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["pod.go"], + importpath = "k8s.io/kubernetes/pkg/util/pod", + visibility = ["//visibility:public"], + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["pod_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/util/pod/pod.go b/pkg/util/pod/pod.go new file mode 100644 index 00000000000..81d4304fa2b --- /dev/null +++ b/pkg/util/pod/pod.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "encoding/json" + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + clientset "k8s.io/client-go/kubernetes" +) + +// PatchPodStatus patches pod status. +func PatchPodStatus(c clientset.Interface, namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) (*v1.Pod, []byte, error) { + patchBytes, err := preparePatchBytesforPodStatus(namespace, name, oldPodStatus, newPodStatus) + if err != nil { + return nil, nil, err + } + + updatedPod, err := c.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, patchBytes, "status") + if err != nil { + return nil, nil, fmt.Errorf("failed to patch status %q for pod %q/%q: %v", patchBytes, namespace, name, err) + } + return updatedPod, patchBytes, nil +} + +func preparePatchBytesforPodStatus(namespace, name string, oldPodStatus, newPodStatus v1.PodStatus) ([]byte, error) { + oldData, err := json.Marshal(v1.Pod{ + Status: oldPodStatus, + }) + if err != nil { + return nil, fmt.Errorf("failed to Marshal oldData for pod %q/%q: %v", namespace, name, err) + } + + newData, err := json.Marshal(v1.Pod{ + Status: newPodStatus, + }) + if err != nil { + return nil, fmt.Errorf("failed to Marshal newData for pod %q/%q: %v", namespace, name, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Pod{}) + if err != nil { + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for pod %q/%q: %v", namespace, name, err) + } + return patchBytes, nil +} diff --git a/pkg/util/pod/pod_test.go b/pkg/util/pod/pod_test.go new file mode 100644 index 00000000000..af0278fa090 --- /dev/null +++ b/pkg/util/pod/pod_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "testing" + + "fmt" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + "reflect" +) + +func TestPatchPodStatus(t *testing.T) { + ns := "ns" + name := "name" + client := &fake.Clientset{} + client.CoreV1().Pods(ns).Create(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }) + + testCases := []struct { + description string + mutate func(input v1.PodStatus) v1.PodStatus + expectedPatchBytes []byte + }{ + { + "no change", + func(input v1.PodStatus) v1.PodStatus { return input }, + []byte(fmt.Sprintf(`{}`)), + }, + { + "message change", + func(input v1.PodStatus) v1.PodStatus { + input.Message = "random message" + return input + }, + []byte(fmt.Sprintf(`{"status":{"message":"random message"}}`)), + }, + { + "pod condition change", + func(input v1.PodStatus) v1.PodStatus { + input.Conditions[0].Status = v1.ConditionFalse + return input + }, + []byte(fmt.Sprintf(`{"status":{"$setElementOrder/conditions":[{"type":"Ready"},{"type":"PodScheduled"}],"conditions":[{"status":"False","type":"Ready"}]}}`)), + }, + { + "additional init container condition", + func(input v1.PodStatus) v1.PodStatus { + input.InitContainerStatuses = []v1.ContainerStatus{ + { + Name: "init-container", + Ready: true, + }, + } + return input + }, + []byte(fmt.Sprintf(`{"status":{"initContainerStatuses":[{"image":"","imageID":"","lastState":{},"name":"init-container","ready":true,"restartCount":0,"state":{}}]}}`)), + }, + } + for _, tc := range testCases { + _, patchBytes, err := PatchPodStatus(client, ns, name, getPodStatus(), tc.mutate(getPodStatus())) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(patchBytes, tc.expectedPatchBytes) { + t.Errorf("for test case %q, expect patchBytes: %q, got: %q\n", tc.description, tc.expectedPatchBytes, patchBytes) + } + } +} + +func getPodStatus() v1.PodStatus { + return v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "container1", + Ready: true, + }, + { + Name: "container2", + Ready: true, + }, + }, + Message: "Message", + } +} From 9fe2c536249d7ec3010d71d6b610f35754e478b6 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Mon, 9 Apr 2018 17:36:25 -0700 Subject: [PATCH 281/307] include patch permission for kubelets --- plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go | 2 +- .../authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 912ab05a1b3..284f91a105e 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -119,7 +119,7 @@ func NodeRules() []rbacv1.PolicyRule { rbacv1helpers.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), // Needed for the node to report status of pods it is running. // Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself. - rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), + rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), // Needed for the node to create pod evictions. // Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself. rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 300b66485ca..287440dbaa5 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -1098,6 +1098,7 @@ items: resources: - pods/status verbs: + - patch - update - apiGroups: - "" From 35777c31ea5f7973d8f69b22a81377f5d7ea9c71 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Mon, 9 Apr 2018 17:37:14 -0700 Subject: [PATCH 282/307] change kubelet status manager to use patch instead of put to update pod status --- pkg/kubelet/status/BUILD | 1 + pkg/kubelet/status/status_manager.go | 57 ++++- pkg/kubelet/status/status_manager_test.go | 247 ++++++++++++++++++++-- 3 files changed, 278 insertions(+), 27 deletions(-) diff --git a/pkg/kubelet/status/BUILD b/pkg/kubelet/status/BUILD index ccc78651f0f..63e32a391d9 100644 --- a/pkg/kubelet/status/BUILD +++ b/pkg/kubelet/status/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/format:go_default_library", + "//pkg/util/pod:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index eaf5b9a0512..93eef28918a 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -37,6 +37,7 @@ import ( kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" + statusutil "k8s.io/kubernetes/pkg/util/pod" ) // A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are @@ -121,11 +122,22 @@ func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podD } } -// isStatusEqual returns true if the given pod statuses are equal, false otherwise. +// isPodStatusByKubeletEqual returns true if the given pod statuses are equal when non-kubelet-owned +// pod conditions are excluded. // This method normalizes the status before comparing so as to make sure that meaningless // changes will be ignored. -func isStatusEqual(oldStatus, status *v1.PodStatus) bool { - return apiequality.Semantic.DeepEqual(status, oldStatus) +func isPodStatusByKubeletEqual(oldStatus, status *v1.PodStatus) bool { + oldCopy := oldStatus.DeepCopy() + for _, c := range status.Conditions { + if kubetypes.PodConditionByKubelet(c.Type) { + _, oc := podutil.GetPodCondition(oldCopy, c.Type) + if oc == nil || oc.Status != c.Status { + return false + } + } + } + oldCopy.Conditions = status.Conditions + return apiequality.Semantic.DeepEqual(oldCopy, status) } func (m *manager) Start() { @@ -162,6 +174,13 @@ func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) { func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() + + for _, c := range pod.Status.Conditions { + if !kubetypes.PodConditionByKubelet(c.Type) { + glog.Errorf("Kubelet is trying to update pod condition %q for pod %q. "+ + "But it is not owned by kubelet.", string(c.Type), format.Pod(pod)) + } + } // Make sure we're caching a deep copy. status = *status.DeepCopy() @@ -336,7 +355,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp normalizeStatus(pod, &status) // The intent here is to prevent concurrent updates to a pod's status from // clobbering each other so the phase of a pod progresses monotonically. - if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate { + if isCached && isPodStatusByKubeletEqual(&cachedStatus.status, &status) && !forceUpdate { glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) return false // No new status. } @@ -469,9 +488,10 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { m.deletePodStatus(uid) return } - pod.Status = status.status - // TODO: handle conflict as a retry, make that easier too. - newPod, err := m.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + + oldStatus := pod.Status.DeepCopy() + newPod, patchBytes, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, *oldStatus, mergePodStatus(*oldStatus, status.status)) + glog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes) if err != nil { glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) return @@ -546,7 +566,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { podStatus := pod.Status.DeepCopy() normalizeStatus(pod, podStatus) - if isStatusEqual(podStatus, &status) { + if isPodStatusByKubeletEqual(podStatus, &status) { // If the status from the source is the same with the cached status, // reconcile is not needed. Just return. return false @@ -559,7 +579,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // We add this function, because apiserver only supports *RFC3339* now, which means that the timestamp returned by // apiserver has no nanosecond information. However, the timestamp returned by metav1.Now() contains nanosecond, -// so when we do comparison between status from apiserver and cached status, isStatusEqual() will always return false. +// so when we do comparison between status from apiserver and cached status, isPodStatusByKubeletEqual() will always return false. // There is related issue #15262 and PR #15263 about this. // In fact, the best way to solve this is to do it on api side. However, for now, we normalize the status locally in // kubelet temporarily. @@ -613,3 +633,22 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { kubetypes.SortInitContainerStatuses(pod, status.InitContainerStatuses) return status } + +// mergePodStatus merges oldPodStatus and newPodStatus where pod conditions +// not owned by kubelet is preserved from oldPodStatus +func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus) v1.PodStatus { + podConditions := []v1.PodCondition{} + for _, c := range oldPodStatus.Conditions { + if !kubetypes.PodConditionByKubelet(c.Type) { + podConditions = append(podConditions, c) + } + } + + for _, c := range newPodStatus.Conditions { + if kubetypes.PodConditionByKubelet(c.Type) { + podConditions = append(podConditions, c) + } + } + newPodStatus.Conditions = podConditions + return newPodStatus +} diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 84ddec36e3b..03f79b2a1bb 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -19,6 +19,7 @@ package status import ( "fmt" "math/rand" + "reflect" "strconv" "strings" "testing" @@ -48,6 +49,10 @@ import ( // Generate new instance of test pod with the same initial value. func getTestPod() *v1.Pod { return &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, ObjectMeta: metav1.ObjectMeta{ UID: "12345678", Name: "foo", @@ -303,7 +308,7 @@ func TestSyncPod(t *testing.T) { testPod := getTestPod() syncer.kubeClient = fake.NewSimpleClientset(testPod) syncer.SetPodStatus(testPod, getRandomPodStatus()) - verifyActions(t, syncer, []core.Action{getAction(), updateAction()}) + verifyActions(t, syncer, []core.Action{getAction(), patchAction()}) } func TestSyncPodChecksMismatchedUID(t *testing.T) { @@ -357,18 +362,18 @@ func TestSyncPodNoDeadlock(t *testing.T) { t.Logf("Pod not deleted (success case).") ret = getTestPod() m.SetPodStatus(pod, getRandomPodStatus()) - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) t.Logf("Pod is terminated, but still running.") - pod.DeletionTimestamp = new(metav1.Time) + pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} m.SetPodStatus(pod, getRandomPodStatus()) - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) t.Logf("Pod is terminated successfully.") pod.Status.ContainerStatuses[0].State.Running = nil pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{} m.SetPodStatus(pod, getRandomPodStatus()) - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) t.Logf("Error case.") ret = nil @@ -392,7 +397,7 @@ func TestStaleUpdates(t *testing.T) { t.Logf("sync batch before syncPods pushes latest status, so we should see three statuses in the channel, but only one update") m.syncBatch() verifyUpdates(t, m, 3) - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) t.Logf("Nothing left in the channel to sync") verifyActions(t, m, []core.Action{}) @@ -406,7 +411,7 @@ func TestStaleUpdates(t *testing.T) { m.SetPodStatus(pod, status) m.syncBatch() - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) t.Logf("Nothing stuck in the pipe.") verifyUpdates(t, m, 0) @@ -443,10 +448,27 @@ func TestStatusEquality(t *testing.T) { } normalizeStatus(&pod, &oldPodStatus) normalizeStatus(&pod, &podStatus) - if !isStatusEqual(&oldPodStatus, &podStatus) { + if !isPodStatusByKubeletEqual(&oldPodStatus, &podStatus) { t.Fatalf("Order of container statuses should not affect normalized equality.") } } + + oldPodStatus := podStatus + podStatus.Conditions = append(podStatus.Conditions, v1.PodCondition{ + Type: v1.PodConditionType("www.example.com/feature"), + Status: v1.ConditionTrue, + }) + + oldPodStatus.Conditions = append(podStatus.Conditions, v1.PodCondition{ + Type: v1.PodConditionType("www.example.com/feature"), + Status: v1.ConditionFalse, + }) + + normalizeStatus(&pod, &oldPodStatus) + normalizeStatus(&pod, &podStatus) + if !isPodStatusByKubeletEqual(&oldPodStatus, &podStatus) { + t.Fatalf("Differences in pod condition not owned by kubelet should not affect normalized equality.") + } } func TestStatusNormalizationEnforcesMaxBytes(t *testing.T) { @@ -507,7 +529,7 @@ func TestStaticPod(t *testing.T) { t.Logf("Should be able to get the static pod status from status manager") retrievedStatus := expectPodStatus(t, m, staticPod) normalizeStatus(staticPod, &status) - assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) + assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) t.Logf("Should not sync pod in syncBatch because there is no corresponding mirror pod for the static pod.") m.syncBatch() @@ -520,10 +542,10 @@ func TestStaticPod(t *testing.T) { t.Logf("Should be able to get the mirror pod status from status manager") retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID) - assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) + assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) t.Logf("Should sync pod because the corresponding mirror pod is created") - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) t.Logf("syncBatch should not sync any pods because nothing is changed.") m.testSyncBatch() @@ -741,7 +763,7 @@ func TestReconcilePodStatus(t *testing.T) { t.Errorf("Pod status is different, a reconciliation is needed") } syncer.syncBatch() - verifyActions(t, syncer, []core.Action{getAction(), updateAction()}) + verifyActions(t, syncer, []core.Action{getAction(), patchAction()}) } func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus { @@ -755,18 +777,16 @@ func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus { func TestDeletePods(t *testing.T) { pod := getTestPod() t.Logf("Set the deletion timestamp.") - pod.DeletionTimestamp = new(metav1.Time) + pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} client := fake.NewSimpleClientset(pod) m := newTestManager(client) m.podManager.AddPod(pod) - status := getRandomPodStatus() now := metav1.Now() status.StartTime = &now m.SetPodStatus(pod, status) - t.Logf("Expect to see a delete action.") - verifyActions(t, m, []core.Action{getAction(), updateAction(), deleteAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction(), deleteAction()}) } func TestDoNotDeleteMirrorPods(t *testing.T) { @@ -779,7 +799,7 @@ func TestDoNotDeleteMirrorPods(t *testing.T) { kubetypes.ConfigMirrorAnnotationKey: "mirror", } t.Logf("Set the deletion timestamp.") - mirrorPod.DeletionTimestamp = new(metav1.Time) + mirrorPod.DeletionTimestamp = &metav1.Time{Time: time.Now()} client := fake.NewSimpleClientset(mirrorPod) m := newTestManager(client) m.podManager.AddPod(staticPod) @@ -795,7 +815,7 @@ func TestDoNotDeleteMirrorPods(t *testing.T) { m.SetPodStatus(staticPod, status) t.Logf("Expect not to see a delete action.") - verifyActions(t, m, []core.Action{getAction(), updateAction()}) + verifyActions(t, m, []core.Action{getAction(), patchAction()}) } func TestUpdateLastTransitionTime(t *testing.T) { @@ -867,6 +887,197 @@ func updateAction() core.UpdateAction { return core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} } +func patchAction() core.PatchAction { + return core.PatchActionImpl{ActionImpl: core.ActionImpl{Verb: "patch", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} +} + func deleteAction() core.DeleteAction { return core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}} } + +func TestMergePodStatus(t *testing.T) { + useCases := []struct { + desc string + oldPodStatus func(input v1.PodStatus) v1.PodStatus + newPodStatus func(input v1.PodStatus) v1.PodStatus + expectPodStatus v1.PodStatus + }{ + { + "no change", + func(input v1.PodStatus) v1.PodStatus { return input }, + func(input v1.PodStatus) v1.PodStatus { return input }, + getPodStatus(), + }, + { + "readiness changes", + func(input v1.PodStatus) v1.PodStatus { return input }, + func(input v1.PodStatus) v1.PodStatus { + input.Conditions[0].Status = v1.ConditionFalse + return input + }, + v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + }, + Message: "Message", + }, + }, + { + "additional pod condition", + func(input v1.PodStatus) v1.PodStatus { + input.Conditions = append(input.Conditions, v1.PodCondition{ + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionTrue, + }) + return input + }, + func(input v1.PodStatus) v1.PodStatus { return input }, + v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionTrue, + }, + }, + Message: "Message", + }, + }, + { + "additional pod condition and readiness changes", + func(input v1.PodStatus) v1.PodStatus { + input.Conditions = append(input.Conditions, v1.PodCondition{ + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionTrue, + }) + return input + }, + func(input v1.PodStatus) v1.PodStatus { + input.Conditions[0].Status = v1.ConditionFalse + return input + }, + v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionTrue, + }, + }, + Message: "Message", + }, + }, + { + "additional pod condition changes", + func(input v1.PodStatus) v1.PodStatus { + input.Conditions = append(input.Conditions, v1.PodCondition{ + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionTrue, + }) + return input + }, + func(input v1.PodStatus) v1.PodStatus { + input.Conditions = append(input.Conditions, v1.PodCondition{ + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionFalse, + }) + return input + }, + v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodConditionType("example.com/feature"), + Status: v1.ConditionTrue, + }, + }, + Message: "Message", + }, + }, + } + + for _, tc := range useCases { + output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus())) + if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) { + t.Errorf("test case %q failed, expect: %+v, got %+v", tc.desc, tc.expectPodStatus, output) + } + } + +} + +func statusEqual(left, right v1.PodStatus) bool { + left.Conditions = nil + right.Conditions = nil + return reflect.DeepEqual(left, right) +} + +func conditionsEqual(left, right []v1.PodCondition) bool { + if len(left) != len(right) { + return false + } + + for _, l := range left { + found := false + for _, r := range right { + if l.Type == r.Type { + found = true + if l.Status != r.Status { + return false + } + } + } + if !found { + return false + } + } + return true +} + +func getPodStatus() v1.PodStatus { + return v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + }, + Message: "Message", + } +} From 8b3b4e4deabe4cf922eee752df2fad189b2c1471 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Wed, 11 Apr 2018 11:37:30 -0700 Subject: [PATCH 283/307] add Patch support in fake kubeClient --- staging/src/k8s.io/client-go/testing/BUILD | 2 ++ .../src/k8s.io/client-go/testing/fixture.go | 31 ++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/staging/src/k8s.io/client-go/testing/BUILD b/staging/src/k8s.io/client-go/testing/BUILD index d6821abfb37..5b8684c2653 100644 --- a/staging/src/k8s.io/client-go/testing/BUILD +++ b/staging/src/k8s.io/client-go/testing/BUILD @@ -22,6 +22,8 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", ], diff --git a/staging/src/k8s.io/client-go/testing/fixture.go b/staging/src/k8s.io/client-go/testing/fixture.go index 13192f92d16..00c4c49fce4 100644 --- a/staging/src/k8s.io/client-go/testing/fixture.go +++ b/staging/src/k8s.io/client-go/testing/fixture.go @@ -25,6 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" ) @@ -72,7 +74,6 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { return func(action Action) (bool, runtime.Object, error) { ns := action.GetNamespace() gvr := action.GetResource() - // Here and below we need to switch on implementation types, // not on interfaces, as some interfaces are identical // (e.g. UpdateAction and CreateAction), so if we use them, @@ -125,6 +126,34 @@ func ObjectReaction(tracker ObjectTracker) ReactionFunc { } return true, nil, nil + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + // object is not registered + return false, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + // Only supports strategic merge patch + // TODO: Add support for other Patch types + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + default: return false, nil, fmt.Errorf("no reaction implemented for %s", action) } From cb9ac047773dddf8a55c277f682d9dbfea04b55d Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Wed, 11 Apr 2018 17:04:19 -0700 Subject: [PATCH 284/307] fix unit tests using Patch in fake client --- pkg/controller/statefulset/stateful_set_test.go | 3 ++- pkg/kubelet/kubelet_node_status_test.go | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controller/statefulset/stateful_set_test.go b/pkg/controller/statefulset/stateful_set_test.go index 3893753d1df..7d9a7cc1629 100644 --- a/pkg/controller/statefulset/stateful_set_test.go +++ b/pkg/controller/statefulset/stateful_set_test.go @@ -496,7 +496,6 @@ func TestStatefulSetControllerGetStatefulSetsForPod(t *testing.T) { func TestGetPodsForStatefulSetAdopt(t *testing.T) { set := newStatefulSet(5) - ssc, spc := newFakeStatefulSetController(set) pod1 := newStatefulSetPod(set, 1) // pod2 is an orphan with matching labels and name. pod2 := newStatefulSetPod(set, 2) @@ -510,6 +509,8 @@ func TestGetPodsForStatefulSetAdopt(t *testing.T) { pod4.OwnerReferences = nil pod4.Name = "x" + pod4.Name + ssc, spc := newFakeStatefulSetController(set, pod1, pod2, pod3, pod4) + spc.podsIndexer.Add(pod1) spc.podsIndexer.Add(pod2) spc.podsIndexer.Add(pod3) diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index a657d3f89ba..a9ffe58605e 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -879,7 +879,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") - updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) + updatedNode, err := kubeClient.CoreV1().Nodes().Get(testKubeletHostname, metav1.GetOptions{}) require.NoError(t, err, "can't apply node status patch") for i, cond := range updatedNode.Status.Conditions { @@ -891,7 +891,6 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 lastIndex := len(updatedNode.Status.Conditions) - 1 - assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition") assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message) From 78b86333c10af0927c9b9c3a21638d9588fdf159 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Wed, 11 Apr 2018 17:44:19 -0700 Subject: [PATCH 285/307] make update --- pkg/kubelet/types/BUILD | 2 ++ staging/src/k8s.io/metrics/Godeps/Godeps.json | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/pkg/kubelet/types/BUILD b/pkg/kubelet/types/BUILD index c362d096de2..b1d5ffd1ae5 100644 --- a/pkg/kubelet/types/BUILD +++ b/pkg/kubelet/types/BUILD @@ -12,6 +12,7 @@ go_library( "constants.go", "doc.go", "labels.go", + "pod_status.go", "pod_update.go", "types.go", ], @@ -29,6 +30,7 @@ go_test( name = "go_default_test", srcs = [ "labels_test.go", + "pod_status_test.go", "pod_update_test.go", "types_test.go", ], diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 9415da5b540..250fe79ac28 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -6,6 +6,10 @@ "./..." ], "Deps": [ + { + "ImportPath": "github.com/davecgh/go-spew/spew", + "Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8" + }, { "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee" @@ -362,6 +366,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/util/json", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/util/net", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -374,6 +382,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/util/sets", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -398,6 +410,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/watch", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -465,6 +481,10 @@ { "ImportPath": "k8s.io/client-go/util/integer", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", + "Rev": "86e28c192d2743f0232b9bc5f0a531568ef9f2a5" } ] } From 85e0d05ac76b943acb208ea70c6b806649706ea5 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Mon, 21 May 2018 15:22:43 -0700 Subject: [PATCH 286/307] add utils for pod condition --- pkg/kubelet/types/pod_status.go | 39 +++++++++++++++++++++++ pkg/kubelet/types/pod_status_test.go | 47 ++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 pkg/kubelet/types/pod_status.go create mode 100644 pkg/kubelet/types/pod_status_test.go diff --git a/pkg/kubelet/types/pod_status.go b/pkg/kubelet/types/pod_status.go new file mode 100644 index 00000000000..a7756382b3c --- /dev/null +++ b/pkg/kubelet/types/pod_status.go @@ -0,0 +1,39 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "k8s.io/api/core/v1" +) + +// PodConditionsByKubelet is the list of pod conditions owned by kubelet +var PodConditionsByKubelet = []v1.PodConditionType{ + v1.PodScheduled, + v1.PodReady, + v1.PodInitialized, + v1.PodReasonUnschedulable, +} + +// PodConditionByKubelet returns if the pod condition type is owned by kubelet +func PodConditionByKubelet(conditionType v1.PodConditionType) bool { + for _, c := range PodConditionsByKubelet { + if c == conditionType { + return true + } + } + return false +} diff --git a/pkg/kubelet/types/pod_status_test.go b/pkg/kubelet/types/pod_status_test.go new file mode 100644 index 00000000000..61c837748d3 --- /dev/null +++ b/pkg/kubelet/types/pod_status_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "k8s.io/api/core/v1" + "testing" +) + +func TestPodConditionByKubelet(t *testing.T) { + trueCases := []v1.PodConditionType{ + v1.PodScheduled, + v1.PodReady, + v1.PodInitialized, + v1.PodReasonUnschedulable, + } + + for _, tc := range trueCases { + if !PodConditionByKubelet(tc) { + t.Errorf("Expect %q to be condition owned by kubelet.", tc) + } + } + + falseCases := []v1.PodConditionType{ + v1.PodConditionType("abcd"), + } + + for _, tc := range falseCases { + if PodConditionByKubelet(tc) { + t.Errorf("Expect %q NOT to be condition owned by kubelet.", tc) + } + } +} From fdc58eb8915cd48c572e7a9e15e2d1a03d4e3e70 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Tue, 29 May 2018 20:16:02 -0700 Subject: [PATCH 287/307] [gce provider] More wrappers for alpha/beta backend service --- pkg/cloudprovider/providers/gce/cloud/gen.go | 53 +++++++++++++++++-- .../providers/gce/cloud/meta/meta.go | 17 +++--- .../providers/gce/gce_backendservice.go | 14 +++++ 3 files changed, 72 insertions(+), 12 deletions(-) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go index 9b08d3c8d3a..f84a4bda326 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -3420,6 +3420,7 @@ type AlphaBackendServices interface { List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error Delete(ctx context.Context, key *meta.Key) error + SetSecurityPolicy(context.Context, *meta.Key, *alpha.SecurityPolicyReference) error Update(context.Context, *meta.Key, *alpha.BackendService) error } @@ -3456,11 +3457,12 @@ type MockAlphaBackendServices struct { // order to add your own logic. Return (true, _, _) to prevent the normal // execution flow of the mock. Return (false, nil, nil) to continue with // normal mock behavior/ after the hook function executes. - GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, *alpha.BackendService, error) - ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaBackendServices) (bool, []*alpha.BackendService, error) - InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *MockAlphaBackendServices) (bool, error) - DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, error) - UpdateHook func(context.Context, *meta.Key, *alpha.BackendService, *MockAlphaBackendServices) error + GetHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, *alpha.BackendService, error) + ListHook func(ctx context.Context, fl *filter.F, m *MockAlphaBackendServices) (bool, []*alpha.BackendService, error) + InsertHook func(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *MockAlphaBackendServices) (bool, error) + DeleteHook func(ctx context.Context, key *meta.Key, m *MockAlphaBackendServices) (bool, error) + SetSecurityPolicyHook func(context.Context, *meta.Key, *alpha.SecurityPolicyReference, *MockAlphaBackendServices) error + UpdateHook func(context.Context, *meta.Key, *alpha.BackendService, *MockAlphaBackendServices) error // X is extra state that can be used as part of the mock. Generated code // will not use this field. @@ -3606,6 +3608,14 @@ func (m *MockAlphaBackendServices) Obj(o *alpha.BackendService) *MockBackendServ return &MockBackendServicesObj{o} } +// SetSecurityPolicy is a mock for the corresponding method. +func (m *MockAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { + if m.SetSecurityPolicyHook != nil { + return m.SetSecurityPolicyHook(ctx, key, arg0, m) + } + return nil +} + // Update is a mock for the corresponding method. func (m *MockAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { if m.UpdateHook != nil { @@ -3756,6 +3766,39 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err return err } +// SetSecurityPolicy is a method on GCEAlphaBackendServices. +func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { + glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + + if !key.Valid() { + glog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + return fmt.Errorf("invalid GCE key (%+v)", key) + } + projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") + rk := &RateLimitKey{ + ProjectID: projectID, + Operation: "SetSecurityPolicy", + Version: meta.Version("alpha"), + Service: "BackendServices", + } + glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + + if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { + glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + return err + } + call := g.s.Alpha.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) + call.Context(ctx) + op, err := call.Do() + if err != nil { + glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + return err + } + err = g.s.WaitForCompletion(ctx, op) + glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + return err +} + // Update is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) diff --git a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go index 7c1139b9400..0a2f7253915 100644 --- a/pkg/cloudprovider/providers/gce/cloud/meta/meta.go +++ b/pkg/cloudprovider/providers/gce/cloud/meta/meta.go @@ -119,13 +119,16 @@ var AllServices = []*ServiceInfo{ }, }, { - Object: "BackendService", - Service: "BackendServices", - Resource: "backendServices", - version: VersionAlpha, - keyType: Global, - serviceType: reflect.TypeOf(&alpha.BackendServicesService{}), - additionalMethods: []string{"Update"}, + Object: "BackendService", + Service: "BackendServices", + Resource: "backendServices", + version: VersionAlpha, + keyType: Global, + serviceType: reflect.TypeOf(&alpha.BackendServicesService{}), + additionalMethods: []string{ + "Update", + "SetSecurityPolicy", + }, }, { Object: "BackendService", diff --git a/pkg/cloudprovider/providers/gce/gce_backendservice.go b/pkg/cloudprovider/providers/gce/gce_backendservice.go index a059b2c9bae..d63728038ec 100644 --- a/pkg/cloudprovider/providers/gce/gce_backendservice.go +++ b/pkg/cloudprovider/providers/gce/gce_backendservice.go @@ -42,6 +42,13 @@ func (gce *GCECloud) GetGlobalBackendService(name string) (*compute.BackendServi return v, mc.Observe(err) } +// GetBetaGlobalBackendService retrieves beta backend by name. +func (gce *GCECloud) GetBetaGlobalBackendService(name string) (*computebeta.BackendService, error) { + mc := newBackendServiceMetricContextWithVersion("get", "", computeBetaVersion) + v, err := gce.c.BetaBackendServices().Get(context.Background(), meta.GlobalKey(name)) + return v, mc.Observe(err) +} + // GetAlphaGlobalBackendService retrieves alpha backend by name. func (gce *GCECloud) GetAlphaGlobalBackendService(name string) (*computealpha.BackendService, error) { mc := newBackendServiceMetricContextWithVersion("get", "", computeAlphaVersion) @@ -147,3 +154,10 @@ func (gce *GCECloud) SetSecurityPolicyForBetaGlobalBackendService(backendService mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeBetaVersion) return mc.Observe(gce.c.BetaBackendServices().SetSecurityPolicy(context.Background(), meta.GlobalKey(backendServiceName), securityPolicyReference)) } + +// SetSecurityPolicyForAlphaGlobalBackendService sets the given +// SecurityPolicyReference for the BackendService identified by the given name. +func (gce *GCECloud) SetSecurityPolicyForAlphaGlobalBackendService(backendServiceName string, securityPolicyReference *computealpha.SecurityPolicyReference) error { + mc := newBackendServiceMetricContextWithVersion("set_security_policy", "", computeAlphaVersion) + return mc.Observe(gce.c.AlphaBackendServices().SetSecurityPolicy(context.Background(), meta.GlobalKey(backendServiceName), securityPolicyReference)) +} From 0539086ff3f4a93c2283b1328e4a8fe64bfe174d Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Tue, 22 May 2018 15:56:02 -0700 Subject: [PATCH 288/307] add a flag to control the cap on images reported in node status While I normally try to avoid adding flags, this is a short term scalability fix for v1.11, and there are other long-term solutions in the works, so we shouldn't commit to this in the v1beta1 Kubelet config. Flags are our escape hatch. --- cmd/kubelet/app/options/options.go | 9 + cmd/kubelet/app/server.go | 3 + pkg/kubelet/kubelet.go | 9 +- pkg/kubelet/kubelet_node_status.go | 8 +- pkg/kubelet/kubelet_node_status_test.go | 347 +++++++++++++----------- 5 files changed, 215 insertions(+), 161 deletions(-) diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index 82d319e21d1..7b5a5a3554c 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -169,6 +169,9 @@ type KubeletFlags struct { // bootstrapCheckpointPath is the path to the directory containing pod checkpoints to // run on restore BootstrapCheckpointPath string + // NodeStatusMaxImages caps the number of images reported in Node.Status.Images. + // This is an experimental, short-term flag to help with node scalability. + NodeStatusMaxImages int32 // DEPRECATED FLAGS // minimumGCAge is the minimum age for a finished container before it is @@ -244,6 +247,8 @@ func NewKubeletFlags() *KubeletFlags { CAdvisorPort: 0, // TODO(#58010:v1.13.0): Remove --allow-privileged, it is deprecated AllowPrivileged: true, + // prior to the introduction of this flag, there was a hardcoded cap of 50 images + NodeStatusMaxImages: 50, } } @@ -255,6 +260,9 @@ func ValidateKubeletFlags(f *KubeletFlags) error { if f.CAdvisorPort != 0 && utilvalidation.IsValidPortNum(int(f.CAdvisorPort)) != nil { return fmt.Errorf("invalid configuration: CAdvisorPort (--cadvisor-port) %v must be between 0 and 65535, inclusive", f.CAdvisorPort) } + if f.NodeStatusMaxImages < -1 { + return fmt.Errorf("invalid configuration: NodeStatusMaxImages (--node-status-max-images) must be -1 or greater") + } return nil } @@ -392,6 +400,7 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) { fs.BoolVar(&f.ExitOnLockContention, "exit-on-lock-contention", f.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.") fs.StringVar(&f.SeccompProfileRoot, "seccomp-profile-root", f.SeccompProfileRoot, " Directory path for seccomp profiles.") fs.StringVar(&f.BootstrapCheckpointPath, "bootstrap-checkpoint-path", f.BootstrapCheckpointPath, " Path to to the directory where the checkpoints are stored") + fs.Int32Var(&f.NodeStatusMaxImages, "node-status-max-images", f.NodeStatusMaxImages, " The maximum number of images to report in Node.Status.Images. If -1 is specified, no cap will be applied. Default: 50") // DEPRECATED FLAGS fs.StringVar(&f.BootstrapKubeconfig, "experimental-bootstrap-kubeconfig", f.BootstrapKubeconfig, "") diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index ca13dc51d30..cfa23f2f7f4 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -958,6 +958,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *kubeletconfiginternal. kubeFlags.NodeLabels, kubeFlags.SeccompProfileRoot, kubeFlags.BootstrapCheckpointPath, + kubeFlags.NodeStatusMaxImages, stopCh) if err != nil { return fmt.Errorf("failed to create kubelet: %v", err) @@ -1043,6 +1044,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeLabels map[string]string, seccompProfileRoot string, bootstrapCheckpointPath string, + nodeStatusMaxImages int32, stopCh <-chan struct{}) (k kubelet.Bootstrap, err error) { // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations @@ -1077,6 +1079,7 @@ func CreateAndInitKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeLabels, seccompProfileRoot, bootstrapCheckpointPath, + nodeStatusMaxImages, stopCh) if err != nil { return nil, err diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 9194c1bbb9c..a5459480eeb 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -219,7 +219,9 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration, keepTerminatedPodVolumes bool, nodeLabels map[string]string, seccompProfileRoot string, - bootstrapCheckpointPath string) (Bootstrap, error) + bootstrapCheckpointPath string, + nodeStatusMaxImages int32, + stopCh <-chan struct{}) (Bootstrap, error) // Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping @@ -345,6 +347,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeLabels map[string]string, seccompProfileRoot string, bootstrapCheckpointPath string, + nodeStatusMaxImages int32, stopCh <-chan struct{}) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) @@ -535,6 +538,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, iptablesDropBit: int(kubeCfg.IPTablesDropBit), experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate), keepTerminatedPodVolumes: keepTerminatedPodVolumes, + nodeStatusMaxImages: nodeStatusMaxImages, } if klet.cloud != nil { @@ -1157,6 +1161,9 @@ type Kubelet struct { // such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the // directory returned by kubelet.getPluginsDir() pluginWatcher pluginwatcher.Watcher + + // This flag sets a maximum number of images to report in the node status. + nodeStatusMaxImages int32 } func allGlobalUnicastIPs() ([]net.IP, error) { diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 9a1e6214b6a..47400d3baf5 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -49,9 +49,6 @@ import ( ) const ( - // maxImagesInNodeStatus is the number of max images we store in image status. - maxImagesInNodeStatus = 50 - // maxNamesPerImageInNodeStatus is max number of names per image stored in // the node status. maxNamesPerImageInNodeStatus = 5 @@ -721,8 +718,9 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) { return } // sort the images from max to min, and only set top N images into the node status. - if maxImagesInNodeStatus < len(containerImages) { - containerImages = containerImages[0:maxImagesInNodeStatus] + if int(kl.nodeStatusMaxImages) > -1 && + int(kl.nodeStatusMaxImages) < len(containerImages) { + containerImages = containerImages[0:kl.nodeStatusMaxImages] } for _, image := range containerImages { diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index a657d3f89ba..f220bd669f3 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -60,8 +60,8 @@ const ( maxImageTagsForTest = 20 ) -// generateTestingImageList generate randomly generated image list and corresponding expectedImageList. -func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) { +// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList. +func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) { // imageList is randomly generated image list var imageList []kubecontainer.Image for ; count > 0; count-- { @@ -73,7 +73,12 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI imageList = append(imageList, imageItem) } - // expectedImageList is generated by imageList according to size and maxImagesInNodeStatus + expectedImageList := makeExpectedImageList(imageList, maxImages) + return imageList, expectedImageList +} + +func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage { + // expectedImageList is generated by imageList according to size and maxImages // 1. sort the imageList by size sort.Sort(sliceutils.ByImageSize(imageList)) // 2. convert sorted imageList to v1.ContainerImage list @@ -86,8 +91,11 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI expectedImageList = append(expectedImageList, apiImage) } - // 3. only returns the top maxImagesInNodeStatus images in expectedImageList - return imageList, expectedImageList[0:maxImagesInNodeStatus] + // 3. only returns the top maxImages images in expectedImageList + if maxImages == -1 { // -1 means no limit + return expectedImageList + } + return expectedImageList[0:maxImages] } func generateImageTags() []string { @@ -299,165 +307,190 @@ func sortNodeAddresses(addrs sortableNodeAddress) { } func TestUpdateNewNodeStatus(t *testing.T) { - // generate one more than maxImagesInNodeStatus in inputImageList - inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1) - testKubelet := newTestKubeletWithImageList( - t, inputImageList, false /* controllerAttachDetachEnabled */) - defer testKubelet.Cleanup() - kubelet := testKubelet.kubelet - kubelet.kubeClient = nil // ensure only the heartbeat client is used - kubelet.containerManager = &localCM{ - ContainerManager: cm.NewStubContainerManager(), - allocatableReservation: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI), + cases := []struct { + desc string + nodeStatusMaxImages int32 + }{ + { + desc: "5 image limit", + nodeStatusMaxImages: 5, }, - capacity: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), - }, - } - kubeClient := testKubelet.fakeKubeClient - existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} - kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain - machineInfo := &cadvisorapi.MachineInfo{ - MachineID: "123", - SystemUUID: "abc", - BootID: "1b3", - NumCores: 2, - MemoryCapacity: 10E9, // 10G - } - mockCadvisor := testKubelet.fakeCadvisor - mockCadvisor.On("Start").Return(nil) - mockCadvisor.On("MachineInfo").Return(machineInfo, nil) - versionInfo := &cadvisorapi.VersionInfo{ - KernelVersion: "3.16.0-0.bpo.4-amd64", - ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", - } - mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ - Usage: 400, - Capacity: 5000, - Available: 600, - }, nil) - mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ - Usage: 400, - Capacity: 5000, - Available: 600, - }, nil) - mockCadvisor.On("VersionInfo").Return(versionInfo, nil) - maxAge := 0 * time.Second - options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} - mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) - kubelet.machineInfo = machineInfo - - expectedNode := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, - Spec: v1.NodeSpec{}, - Status: v1.NodeStatus{ - Conditions: []v1.NodeCondition{ - { - Type: v1.NodeOutOfDisk, - Status: v1.ConditionFalse, - Reason: "KubeletHasSufficientDisk", - Message: fmt.Sprintf("kubelet has sufficient disk space available"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodeMemoryPressure, - Status: v1.ConditionFalse, - Reason: "KubeletHasSufficientMemory", - Message: fmt.Sprintf("kubelet has sufficient memory available"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodeDiskPressure, - Status: v1.ConditionFalse, - Reason: "KubeletHasNoDiskPressure", - Message: fmt.Sprintf("kubelet has no disk pressure"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodePIDPressure, - Status: v1.ConditionFalse, - Reason: "KubeletHasSufficientPID", - Message: fmt.Sprintf("kubelet has sufficient PID available"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - { - Type: v1.NodeReady, - Status: v1.ConditionTrue, - Reason: "KubeletReady", - Message: fmt.Sprintf("kubelet is posting ready status"), - LastHeartbeatTime: metav1.Time{}, - LastTransitionTime: metav1.Time{}, - }, - }, - NodeInfo: v1.NodeSystemInfo{ - MachineID: "123", - SystemUUID: "abc", - BootID: "1b3", - KernelVersion: "3.16.0-0.bpo.4-amd64", - OSImage: "Debian GNU/Linux 7 (wheezy)", - OperatingSystem: goruntime.GOOS, - Architecture: goruntime.GOARCH, - ContainerRuntimeVersion: "test://1.5.0", - KubeletVersion: version.Get().String(), - KubeProxyVersion: version.Get().String(), - }, - Capacity: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), - }, - Allocatable: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), - v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), - }, - Addresses: []v1.NodeAddress{ - {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, - {Type: v1.NodeHostName, Address: testKubeletHostname}, - }, - Images: expectedImageList, + { + desc: "no image limit", + nodeStatusMaxImages: -1, }, } - kubelet.updateRuntimeUp() - assert.NoError(t, kubelet.updateNodeStatus()) - actions := kubeClient.Actions() - require.Len(t, actions, 2) - require.True(t, actions[1].Matches("patch", "nodes")) - require.Equal(t, actions[1].GetSubresource(), "status") + for _, tc := range cases { + t.Run(tc.desc, func(t *testing.T) { + // generate one more in inputImageList than we configure the Kubelet to report, + // or 5 images if unlimited + numTestImages := int(tc.nodeStatusMaxImages) + 1 + if tc.nodeStatusMaxImages == -1 { + numTestImages = 5 + } + inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages)) + testKubelet := newTestKubeletWithImageList( + t, inputImageList, false /* controllerAttachDetachEnabled */) + defer testKubelet.Cleanup() + kubelet := testKubelet.kubelet + kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages + kubelet.kubeClient = nil // ensure only the heartbeat client is used + kubelet.containerManager = &localCM{ + ContainerManager: cm.NewStubContainerManager(), + allocatableReservation: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI), + }, + capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + }, + } + kubeClient := testKubelet.fakeKubeClient + existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}} + kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain + machineInfo := &cadvisorapi.MachineInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + NumCores: 2, + MemoryCapacity: 10E9, // 10G + } + mockCadvisor := testKubelet.fakeCadvisor + mockCadvisor.On("Start").Return(nil) + mockCadvisor.On("MachineInfo").Return(machineInfo, nil) + versionInfo := &cadvisorapi.VersionInfo{ + KernelVersion: "3.16.0-0.bpo.4-amd64", + ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", + } + mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400, + Capacity: 5000, + Available: 600, + }, nil) + mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{ + Usage: 400, + Capacity: 5000, + Available: 600, + }, nil) + mockCadvisor.On("VersionInfo").Return(versionInfo, nil) + maxAge := 0 * time.Second + options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge} + mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil) + kubelet.machineInfo = machineInfo - updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) - assert.NoError(t, err) - for i, cond := range updatedNode.Status.Conditions { - assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) - assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) - updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} - updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} + expectedNode := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, + Spec: v1.NodeSpec{}, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeOutOfDisk, + Status: v1.ConditionFalse, + Reason: "KubeletHasSufficientDisk", + Message: fmt.Sprintf("kubelet has sufficient disk space available"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodeMemoryPressure, + Status: v1.ConditionFalse, + Reason: "KubeletHasSufficientMemory", + Message: fmt.Sprintf("kubelet has sufficient memory available"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodeDiskPressure, + Status: v1.ConditionFalse, + Reason: "KubeletHasNoDiskPressure", + Message: fmt.Sprintf("kubelet has no disk pressure"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodePIDPressure, + Status: v1.ConditionFalse, + Reason: "KubeletHasSufficientPID", + Message: fmt.Sprintf("kubelet has sufficient PID available"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + Reason: "KubeletReady", + Message: fmt.Sprintf("kubelet is posting ready status"), + LastHeartbeatTime: metav1.Time{}, + LastTransitionTime: metav1.Time{}, + }, + }, + NodeInfo: v1.NodeSystemInfo{ + MachineID: "123", + SystemUUID: "abc", + BootID: "1b3", + KernelVersion: "3.16.0-0.bpo.4-amd64", + OSImage: "Debian GNU/Linux 7 (wheezy)", + OperatingSystem: goruntime.GOOS, + Architecture: goruntime.GOARCH, + ContainerRuntimeVersion: "test://1.5.0", + KubeletVersion: version.Get().String(), + KubeProxyVersion: version.Get().String(), + }, + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + }, + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI), + }, + Addresses: []v1.NodeAddress{ + {Type: v1.NodeInternalIP, Address: "127.0.0.1"}, + {Type: v1.NodeHostName, Address: testKubeletHostname}, + }, + Images: expectedImageList, + }, + } + + kubelet.updateRuntimeUp() + assert.NoError(t, kubelet.updateNodeStatus()) + actions := kubeClient.Actions() + require.Len(t, actions, 2) + require.True(t, actions[1].Matches("patch", "nodes")) + require.Equal(t, actions[1].GetSubresource(), "status") + + updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) + assert.NoError(t, err) + for i, cond := range updatedNode.Status.Conditions { + assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) + assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type) + updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} + updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} + } + + // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 + assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, + "NotReady should be last") + assert.Len(t, updatedNode.Status.Images, len(expectedImageList)) + assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) + }) } - - // Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 - assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, - "NotReady should be last") - assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus) - assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) } func TestUpdateExistingNodeStatus(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet - kubelet.kubeClient = nil // ensure only the heartbeat client is used + kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test + kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ @@ -742,7 +775,8 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet - kubelet.kubeClient = nil // ensure only the heartbeat client is used + kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test + kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), allocatableReservation: v1.ResourceList{ @@ -1214,12 +1248,15 @@ func TestTryRegisterWithApiServer(t *testing.T) { } func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { - // generate one more than maxImagesInNodeStatus in inputImageList - inputImageList, _ := generateTestingImageList(maxImagesInNodeStatus + 1) + const nodeStatusMaxImages = 5 + + // generate one more in inputImageList than we configure the Kubelet to report + inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages) testKubelet := newTestKubeletWithImageList( t, inputImageList, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet + kubelet.nodeStatusMaxImages = nodeStatusMaxImages kubelet.kubeClient = nil // ensure only the heartbeat client is used kubelet.containerManager = &localCM{ ContainerManager: cm.NewStubContainerManager(), From cd89f9473faa60c15b8e9d223e5c4f9dab53627a Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 30 May 2018 14:03:32 -0700 Subject: [PATCH 289/307] Add TLS support to exec authenticator plugin https://github.com/kubernetes/community/blob/master/contributors/design-proposals/auth/kubectl-exec-plugins.md#tls-client-certificate-support Allows exec plugin to return raw TLS key/cert data. This data populates transport.Config.TLS fields. transport.Config.TLS propagates custom credentials using tls.Config.GetClientCertificate callback. On key/cert rotation, all connections using old credentials are closed --- .../Godeps/Godeps.json | 4 + .../src/k8s.io/apiserver/Godeps/Godeps.json | 4 + .../pkg/apis/clientauthentication/types.go | 7 + .../clientauthentication/v1alpha1/types.go | 8 + .../v1alpha1/zz_generated.conversion.go | 4 + .../plugin/pkg/client/auth/exec/BUILD | 5 + .../plugin/pkg/client/auth/exec/exec.go | 154 +++++++--- .../plugin/pkg/client/auth/exec/exec_test.go | 286 +++++++++++++++++- .../client/auth/exec/testdata/test-plugin.sh | 2 +- .../src/k8s.io/client-go/rest/transport.go | 59 ++-- .../src/k8s.io/client-go/transport/cache.go | 4 +- .../k8s.io/client-go/transport/cache_test.go | 20 ++ .../src/k8s.io/client-go/transport/config.go | 10 +- .../k8s.io/client-go/transport/transport.go | 34 ++- .../client-go/transport/transport_test.go | 129 ++++++-- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 4 + staging/src/k8s.io/metrics/Godeps/Godeps.json | 4 + .../sample-apiserver/Godeps/Godeps.json | 4 + .../sample-controller/Godeps/Godeps.json | 4 + 19 files changed, 634 insertions(+), 112 deletions(-) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index fba71ace596..9fd2162d973 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -2010,6 +2010,10 @@ "ImportPath": "k8s.io/client-go/util/cert", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 0bdf0001024..8e3d9436a4b 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -1738,6 +1738,10 @@ "ImportPath": "k8s.io/client-go/util/cert", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/types.go index 5c05825f67b..6fb53cecf94 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/types.go +++ b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -57,7 +57,14 @@ type ExecCredentialStatus struct { // +optional ExpirationTimestamp *metav1.Time // Token is a bearer token used by the client for request authentication. + // +optional Token string + // PEM-encoded client TLS certificate. + // +optional + ClientCertificateData string + // PEM-encoded client TLS private key. + // +optional + ClientKeyData string } // Response defines metadata about a failed request, including HTTP status code and diff --git a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index 8920d31876e..921f3a2b94d 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -52,12 +52,20 @@ type ExecCredentialSpec struct { } // ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. type ExecCredentialStatus struct { // ExpirationTimestamp indicates a time when the provided credentials expire. // +optional ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` // Token is a bearer token used by the client for request authentication. Token string `json:"token,omitempty"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty"` } // Response defines metadata about a failed request, including HTTP status code and diff --git a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go index 032eaf38f27..9921c7ee580 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go +++ b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -99,6 +99,8 @@ func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialS func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData return nil } @@ -110,6 +112,8 @@ func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentia func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData return nil } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD index 38cf9bdc299..a111f67d79f 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD @@ -15,6 +15,8 @@ go_library( "//vendor/k8s.io/client-go/pkg/apis/clientauthentication:go_default_library", "//vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", + "//vendor/k8s.io/client-go/transport:go_default_library", + "//vendor/k8s.io/client-go/util/connrotation:go_default_library", ], ) @@ -24,8 +26,11 @@ go_test( data = glob(["testdata/**"]), embed = [":go_default_library"], deps = [ + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/client-go/pkg/apis/clientauthentication:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", + "//vendor/k8s.io/client-go/transport:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index dfd434d0c2f..30f6156240b 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -18,11 +18,15 @@ package exec import ( "bytes" + "context" + "crypto/tls" "fmt" "io" + "net" "net/http" "os" "os/exec" + "reflect" "sync" "time" @@ -35,6 +39,8 @@ import ( "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" + "k8s.io/client-go/util/connrotation" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -147,14 +153,55 @@ type Authenticator struct { // The mutex also guards calling the plugin. Since the plugin could be // interactive we want to make sure it's only called once. mu sync.Mutex - cachedToken string + cachedCreds *credentials exp time.Time + + onRotate func() } -// WrapTransport instruments an existing http.RoundTripper with credentials returned -// by the plugin. -func (a *Authenticator) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &roundTripper{a, rt} +type credentials struct { + token string + cert *tls.Certificate +} + +// UpdateTransportConfig updates the transport.Config to use credentials +// returned by the plugin. +func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { + wt := c.WrapTransport + c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return &roundTripper{a, rt} + } + + getCert := c.TLS.GetCert + c.TLS.GetCert = func() (*tls.Certificate, error) { + // If previous GetCert is present and returns a valid non-nil + // certificate, use that. Otherwise use cert from exec plugin. + if getCert != nil { + cert, err := getCert() + if err != nil { + return nil, err + } + if cert != nil { + return cert, nil + } + } + return a.cert() + } + + var dial func(ctx context.Context, network, addr string) (net.Conn, error) + if c.Dial != nil { + dial = c.Dial + } else { + dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext + } + d := connrotation.NewDialer(dial) + a.onRotate = d.CloseAll + c.Dial = d.DialContext + + return nil } type roundTripper struct { @@ -169,11 +216,13 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return r.base.RoundTrip(req) } - token, err := r.a.token() + creds, err := r.a.getCreds() if err != nil { - return nil, fmt.Errorf("getting token: %v", err) + return nil, fmt.Errorf("getting credentials: %v", err) + } + if creds.token != "" { + req.Header.Set("Authorization", "Bearer "+creds.token) } - req.Header.Set("Authorization", "Bearer "+token) res, err := r.base.RoundTrip(req) if err != nil { @@ -184,47 +233,60 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Header: res.Header, Code: int32(res.StatusCode), } - if err := r.a.refresh(token, resp); err != nil { - glog.Errorf("refreshing token: %v", err) + if err := r.a.maybeRefreshCreds(creds, resp); err != nil { + glog.Errorf("refreshing credentials: %v", err) } } return res, nil } -func (a *Authenticator) tokenExpired() bool { +func (a *Authenticator) credsExpired() bool { if a.exp.IsZero() { return false } return a.now().After(a.exp) } -func (a *Authenticator) token() (string, error) { - a.mu.Lock() - defer a.mu.Unlock() - if a.cachedToken != "" && !a.tokenExpired() { - return a.cachedToken, nil +func (a *Authenticator) cert() (*tls.Certificate, error) { + creds, err := a.getCreds() + if err != nil { + return nil, err } - - return a.getToken(nil) + return creds.cert, nil } -// refresh executes the plugin to force a rotation of the token. -func (a *Authenticator) refresh(token string, r *clientauthentication.Response) error { +func (a *Authenticator) getCreds() (*credentials, error) { + a.mu.Lock() + defer a.mu.Unlock() + if a.cachedCreds != nil && !a.credsExpired() { + return a.cachedCreds, nil + } + + if err := a.refreshCredsLocked(nil); err != nil { + return nil, err + } + return a.cachedCreds, nil +} + +// maybeRefreshCreds executes the plugin to force a rotation of the +// credentials, unless they were rotated already. +func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error { a.mu.Lock() defer a.mu.Unlock() - if token != a.cachedToken { - // Token already rotated. + // Since we're not making a new pointer to a.cachedCreds in getCreds, no + // need to do deep comparison. + if creds != a.cachedCreds { + // Credentials already rotated. return nil } - _, err := a.getToken(r) - return err + return a.refreshCredsLocked(r) } -// getToken executes the plugin and reads the credentials from stdout. It must be -// called while holding the Authenticator's mutex. -func (a *Authenticator) getToken(r *clientauthentication.Response) (string, error) { +// refreshCredsLocked executes the plugin and reads the credentials from +// stdout. It must be called while holding the Authenticator's mutex. +func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error { cred := &clientauthentication.ExecCredential{ Spec: clientauthentication.ExecCredentialSpec{ Response: r, @@ -234,7 +296,7 @@ func (a *Authenticator) getToken(r *clientauthentication.Response) (string, erro data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) if err != nil { - return "", fmt.Errorf("encode ExecCredentials: %v", err) + return fmt.Errorf("encode ExecCredentials: %v", err) } env := append(a.environ(), a.env...) @@ -250,23 +312,26 @@ func (a *Authenticator) getToken(r *clientauthentication.Response) (string, erro } if err := cmd.Run(); err != nil { - return "", fmt.Errorf("exec: %v", err) + return fmt.Errorf("exec: %v", err) } _, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred) if err != nil { - return "", fmt.Errorf("decode stdout: %v", err) + return fmt.Errorf("decoding stdout: %v", err) } if gvk.Group != a.group.Group || gvk.Version != a.group.Version { - return "", fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s", + return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s", a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}) } if cred.Status == nil { - return "", fmt.Errorf("exec plugin didn't return a status field") + return fmt.Errorf("exec plugin didn't return a status field") } - if cred.Status.Token == "" { - return "", fmt.Errorf("exec plugin didn't return a token") + if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" { + return fmt.Errorf("exec plugin didn't return a token or cert/key pair") + } + if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") { + return fmt.Errorf("exec plugin returned only certificate or key, not both") } if cred.Status.ExpirationTimestamp != nil { @@ -274,7 +339,24 @@ func (a *Authenticator) getToken(r *clientauthentication.Response) (string, erro } else { a.exp = time.Time{} } - a.cachedToken = cred.Status.Token - return a.cachedToken, nil + newCreds := &credentials{ + token: cred.Status.Token, + } + if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" { + cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData)) + if err != nil { + return fmt.Errorf("failed parsing client key/certificate: %v", err) + } + newCreds.cert = &cert + } + + oldCreds := a.cachedCreds + a.cachedCreds = newCreds + // Only close all connections when TLS cert rotates. Token rotation doesn't + // need the extra noise. + if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { + a.onRotate() + } + return nil } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec_test.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec_test.go index 28137eb8477..c37ee4c67a0 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec_test.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec_test.go @@ -18,19 +18,88 @@ package exec import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" "encoding/json" + "encoding/pem" "fmt" "io/ioutil" + "math/big" "net/http" "net/http/httptest" + "reflect" "strings" "testing" "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/pkg/apis/clientauthentication" "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" ) +var ( + certData = []byte(`-----BEGIN CERTIFICATE----- +MIIC6jCCAdSgAwIBAgIBCzALBgkqhkiG9w0BAQswIzEhMB8GA1UEAwwYMTAuMTMu +MTI5LjEwNkAxNDIxMzU5MDU4MB4XDTE1MDExNTIyMDEzMVoXDTE2MDExNTIyMDEz +MlowGzEZMBcGA1UEAxMQb3BlbnNoaWZ0LWNsaWVudDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKtdhz0+uCLXw5cSYns9rU/XifFSpb/x24WDdrm72S/v +b9BPYsAStiP148buylr1SOuNi8sTAZmlVDDIpIVwMLff+o2rKYDicn9fjbrTxTOj +lI4pHJBH+JU3AJ0tbajupioh70jwFS0oYpwtneg2zcnE2Z4l6mhrj2okrc5Q1/X2 +I2HChtIU4JYTisObtin10QKJX01CLfYXJLa8upWzKZ4/GOcHG+eAV3jXWoXidtjb +1Usw70amoTZ6mIVCkiu1QwCoa8+ycojGfZhvqMsAp1536ZcCul+Na+AbCv4zKS7F +kQQaImVrXdUiFansIoofGlw/JNuoKK6ssVpS5Ic3pgcCAwEAAaM1MDMwDgYDVR0P +AQH/BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJ +KoZIhvcNAQELA4IBAQCKLREH7bXtXtZ+8vI6cjD7W3QikiArGqbl36bAhhWsJLp/ +p/ndKz39iFNaiZ3GlwIURWOOKx3y3GA0x9m8FR+Llthf0EQ8sUjnwaknWs0Y6DQ3 +jjPFZOpV3KPCFrdMJ3++E3MgwFC/Ih/N2ebFX9EcV9Vcc6oVWMdwT0fsrhu683rq +6GSR/3iVX1G/pmOiuaR0fNUaCyCfYrnI4zHBDgSfnlm3vIvN2lrsR/DQBakNL8DJ +HBgKxMGeUPoneBv+c8DMXIL0EhaFXRlBv9QW45/GiAIOuyFJ0i6hCtGZpJjq4OpQ +BRjCI+izPzFTjsxD4aORE+WOkyWFCGPWKfNejfw0 +-----END CERTIFICATE-----`) + keyData = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAq12HPT64ItfDlxJiez2tT9eJ8VKlv/HbhYN2ubvZL+9v0E9i +wBK2I/Xjxu7KWvVI642LyxMBmaVUMMikhXAwt9/6jaspgOJyf1+NutPFM6OUjikc +kEf4lTcAnS1tqO6mKiHvSPAVLShinC2d6DbNycTZniXqaGuPaiStzlDX9fYjYcKG +0hTglhOKw5u2KfXRAolfTUIt9hcktry6lbMpnj8Y5wcb54BXeNdaheJ22NvVSzDv +RqahNnqYhUKSK7VDAKhrz7JyiMZ9mG+oywCnXnfplwK6X41r4BsK/jMpLsWRBBoi +ZWtd1SIVqewiih8aXD8k26gorqyxWlLkhzemBwIDAQABAoIBAD2XYRs3JrGHQUpU +FkdbVKZkvrSY0vAZOqBTLuH0zUv4UATb8487anGkWBjRDLQCgxH+jucPTrztekQK +aW94clo0S3aNtV4YhbSYIHWs1a0It0UdK6ID7CmdWkAj6s0T8W8lQT7C46mWYVLm +5mFnCTHi6aB42jZrqmEpC7sivWwuU0xqj3Ml8kkxQCGmyc9JjmCB4OrFFC8NNt6M +ObvQkUI6Z3nO4phTbpxkE1/9dT0MmPIF7GhHVzJMS+EyyRYUDllZ0wvVSOM3qZT0 +JMUaBerkNwm9foKJ1+dv2nMKZZbJajv7suUDCfU44mVeaEO+4kmTKSGCGjjTBGkr +7L1ySDECgYEA5ElIMhpdBzIivCuBIH8LlUeuzd93pqssO1G2Xg0jHtfM4tz7fyeI +cr90dc8gpli24dkSxzLeg3Tn3wIj/Bu64m2TpZPZEIlukYvgdgArmRIPQVxerYey +OkrfTNkxU1HXsYjLCdGcGXs5lmb+K/kuTcFxaMOs7jZi7La+jEONwf8CgYEAwCs/ +rUOOA0klDsWWisbivOiNPII79c9McZCNBqncCBfMUoiGe8uWDEO4TFHN60vFuVk9 +8PkwpCfvaBUX+ajvbafIfHxsnfk1M04WLGCeqQ/ym5Q4sQoQOcC1b1y9qc/xEWfg +nIUuia0ukYRpl7qQa3tNg+BNFyjypW8zukUAC/kCgYB1/Kojuxx5q5/oQVPrx73k +2bevD+B3c+DYh9MJqSCNwFtUpYIWpggPxoQan4LwdsmO0PKzocb/ilyNFj4i/vII +NToqSc/WjDFpaDIKyuu9oWfhECye45NqLWhb/6VOuu4QA/Nsj7luMhIBehnEAHW+ +GkzTKM8oD1PxpEG3nPKXYQKBgQC6AuMPRt3XBl1NkCrpSBy/uObFlFaP2Enpf39S +3OZ0Gv0XQrnSaL1kP8TMcz68rMrGX8DaWYsgytstR4W+jyy7WvZwsUu+GjTJ5aMG +77uEcEBpIi9CBzivfn7hPccE8ZgqPf+n4i6q66yxBJflW5xhvafJqDtW2LcPNbW/ +bvzdmQKBgExALRUXpq+5dbmkdXBHtvXdRDZ6rVmrnjy4nI5bPw+1GqQqk6uAR6B/ +F6NmLCQOO4PDG/cuatNHIr2FrwTmGdEL6ObLUGWn9Oer9gJhHVqqsY5I4sEPo4XX +stR0Yiw0buV6DL/moUO0HIM9Bjh96HJp+LxiIS6UCdIhMPp5HoQa +-----END RSA PRIVATE KEY-----`) + validCert *tls.Certificate +) + +func init() { + cert, err := tls.X509KeyPair(certData, keyData) + if err != nil { + panic(err) + } + validCert = &cert +} + func TestCacheKey(t *testing.T) { c1 := &api.ExecConfig{ Command: "foo-bar", @@ -93,7 +162,7 @@ func compJSON(t *testing.T, got, want []byte) { } } -func TestGetToken(t *testing.T) { +func TestRefreshCreds(t *testing.T) { tests := []struct { name string config api.ExecConfig @@ -101,7 +170,7 @@ func TestGetToken(t *testing.T) { interactive bool response *clientauthentication.Response wantInput string - wantToken string + wantCreds credentials wantExpiry time.Time wantErr bool }{ @@ -122,7 +191,7 @@ func TestGetToken(t *testing.T) { "token": "foo-bar" } }`, - wantToken: "foo-bar", + wantCreds: credentials{token: "foo-bar"}, }, { name: "interactive", @@ -144,7 +213,7 @@ func TestGetToken(t *testing.T) { "token": "foo-bar" } }`, - wantToken: "foo-bar", + wantCreds: credentials{token: "foo-bar"}, }, { name: "response", @@ -178,7 +247,7 @@ func TestGetToken(t *testing.T) { "token": "foo-bar" } }`, - wantToken: "foo-bar", + wantCreds: credentials{token: "foo-bar"}, }, { name: "expiry", @@ -199,7 +268,7 @@ func TestGetToken(t *testing.T) { } }`, wantExpiry: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC), - wantToken: "foo-bar", + wantCreds: credentials{token: "foo-bar"}, }, { name: "no-group-version", @@ -236,7 +305,7 @@ func TestGetToken(t *testing.T) { wantErr: true, }, { - name: "no-token", + name: "no-creds", config: api.ExecConfig{ APIVersion: "client.authentication.k8s.io/v1alpha1", }, @@ -252,6 +321,65 @@ func TestGetToken(t *testing.T) { }`, wantErr: true, }, + { + name: "TLS credentials", + config: api.ExecConfig{ + APIVersion: "client.authentication.k8s.io/v1alpha1", + }, + wantInput: `{ + "kind":"ExecCredential", + "apiVersion":"client.authentication.k8s.io/v1alpha1", + "spec": {} + }`, + output: fmt.Sprintf(`{ + "kind": "ExecCredential", + "apiVersion": "client.authentication.k8s.io/v1alpha1", + "status": { + "clientKeyData": %q, + "clientCertificateData": %q + } + }`, keyData, certData), + wantCreds: credentials{cert: validCert}, + }, + { + name: "bad TLS credentials", + config: api.ExecConfig{ + APIVersion: "client.authentication.k8s.io/v1alpha1", + }, + wantInput: `{ + "kind":"ExecCredential", + "apiVersion":"client.authentication.k8s.io/v1alpha1", + "spec": {} + }`, + output: `{ + "kind": "ExecCredential", + "apiVersion": "client.authentication.k8s.io/v1alpha1", + "status": { + "clientKeyData": "foo", + "clientCertificateData": "bar" + } + }`, + wantErr: true, + }, + { + name: "cert but no key", + config: api.ExecConfig{ + APIVersion: "client.authentication.k8s.io/v1alpha1", + }, + wantInput: `{ + "kind":"ExecCredential", + "apiVersion":"client.authentication.k8s.io/v1alpha1", + "spec": {} + }`, + output: fmt.Sprintf(`{ + "kind": "ExecCredential", + "apiVersion": "client.authentication.k8s.io/v1alpha1", + "status": { + "clientCertificateData": %q + } + }`, certData), + wantErr: true, + }, } for _, test := range tests { @@ -274,8 +402,7 @@ func TestGetToken(t *testing.T) { a.interactive = test.interactive a.environ = func() []string { return nil } - token, err := a.getToken(test.response) - if err != nil { + if err := a.refreshCredsLocked(test.response); err != nil { if !test.wantErr { t.Errorf("get token %v", err) } @@ -285,8 +412,8 @@ func TestGetToken(t *testing.T) { t.Fatal("expected error getting token") } - if token != test.wantToken { - t.Errorf("expected token %q got %q", test.wantToken, token) + if !reflect.DeepEqual(a.cachedCreds, &test.wantCreds) { + t.Errorf("expected credentials %+v got %+v", &test.wantCreds, a.cachedCreds) } if !a.exp.Equal(test.wantExpiry) { @@ -342,8 +469,12 @@ func TestRoundTripper(t *testing.T) { a.now = now a.stderr = ioutil.Discard + tc := &transport.Config{} + if err := a.UpdateTransportConfig(tc); err != nil { + t.Fatal(err) + } client := http.Client{ - Transport: a.WrapTransport(http.DefaultTransport), + Transport: tc.WrapTransport(http.DefaultTransport), } get := func(t *testing.T, statusCode int) { @@ -411,3 +542,134 @@ func TestRoundTripper(t *testing.T) { // Old token is expired, should refresh automatically without hitting a 401. get(t, http.StatusOK) } + +func TestTLSCredentials(t *testing.T) { + now := time.Now() + + certPool := x509.NewCertPool() + cert, key := genClientCert(t) + if !certPool.AppendCertsFromPEM(cert) { + t.Fatal("failed to add client cert to CertPool") + } + + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "ok") + })) + server.TLS = &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: certPool, + } + server.StartTLS() + defer server.Close() + + a, err := newAuthenticator(newCache(), &api.ExecConfig{ + Command: "./testdata/test-plugin.sh", + APIVersion: "client.authentication.k8s.io/v1alpha1", + }) + if err != nil { + t.Fatal(err) + } + var output *clientauthentication.ExecCredential + a.environ = func() []string { + data, err := runtime.Encode(codecs.LegacyCodec(a.group), output) + if err != nil { + t.Fatal(err) + } + return []string{"TEST_OUTPUT=" + string(data)} + } + a.now = func() time.Time { return now } + a.stderr = ioutil.Discard + + // We're not interested in server's cert, this test is about client cert. + tc := &transport.Config{TLS: transport.TLSConfig{Insecure: true}} + if err := a.UpdateTransportConfig(tc); err != nil { + t.Fatal(err) + } + + get := func(t *testing.T, desc string, wantErr bool) { + t.Run(desc, func(t *testing.T) { + tlsCfg, err := transport.TLSConfigFor(tc) + if err != nil { + t.Fatal("TLSConfigFor:", err) + } + client := http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsCfg}, + } + resp, err := client.Get(server.URL) + switch { + case err != nil && !wantErr: + t.Errorf("got client.Get error: %q, want nil", err) + case err == nil && wantErr: + t.Error("got nil client.Get error, want non-nil") + } + if err == nil { + resp.Body.Close() + } + }) + } + + output = &clientauthentication.ExecCredential{ + Status: &clientauthentication.ExecCredentialStatus{ + ClientCertificateData: string(cert), + ClientKeyData: string(key), + ExpirationTimestamp: &v1.Time{now.Add(time.Hour)}, + }, + } + get(t, "valid TLS cert", false) + + // Advance time to force re-exec. + nCert, nKey := genClientCert(t) + now = now.Add(time.Hour * 2) + output = &clientauthentication.ExecCredential{ + Status: &clientauthentication.ExecCredentialStatus{ + ClientCertificateData: string(nCert), + ClientKeyData: string(nKey), + ExpirationTimestamp: &v1.Time{now.Add(time.Hour)}, + }, + } + get(t, "untrusted TLS cert", true) + + now = now.Add(time.Hour * 2) + output = &clientauthentication.ExecCredential{ + Status: &clientauthentication.ExecCredentialStatus{ + ClientCertificateData: string(cert), + ClientKeyData: string(key), + ExpirationTimestamp: &v1.Time{now.Add(time.Hour)}, + }, + } + get(t, "valid TLS cert again", false) +} + +// genClientCert generates an x509 certificate for testing. Certificate and key +// are returned in PEM encoding. +func genClientCert(t *testing.T) ([]byte, []byte) { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + keyRaw, err := x509.MarshalECPrivateKey(key) + if err != nil { + t.Fatal(err) + } + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + t.Fatal(err) + } + cert := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"Acme Co"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } + certRaw, err := x509.CreateCertificate(rand.Reader, cert, cert, key.Public(), key) + if err != nil { + t.Fatal(err) + } + return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certRaw}), + pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyRaw}) +} diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/testdata/test-plugin.sh b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/testdata/test-plugin.sh index 6b9bb100c7b..aa7daad5fd4 100755 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/testdata/test-plugin.sh +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/testdata/test-plugin.sh @@ -1,4 +1,4 @@ -#!/bin/sh -e +#!/bin/bash -e # Copyright 2018 The Kubernetes Authors. # diff --git a/staging/src/k8s.io/client-go/rest/transport.go b/staging/src/k8s.io/client-go/rest/transport.go index b6a0676326e..7f01823d16d 100644 --- a/staging/src/k8s.io/client-go/rest/transport.go +++ b/staging/src/k8s.io/client-go/rest/transport.go @@ -59,39 +59,10 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // TransportConfig converts a client config to an appropriate transport config. func (c *Config) TransportConfig() (*transport.Config, error) { - wt := c.WrapTransport - if c.ExecProvider != nil { - provider, err := exec.GetAuthenticator(c.ExecProvider) - if err != nil { - return nil, err - } - if wt != nil { - previousWT := wt - wt = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(previousWT(rt)) - } - } else { - wt = provider.WrapTransport - } - } - if c.AuthProvider != nil { - provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) - if err != nil { - return nil, err - } - if wt != nil { - previousWT := wt - wt = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(previousWT(rt)) - } - } else { - wt = provider.WrapTransport - } - } - return &transport.Config{ + conf := &transport.Config{ UserAgent: c.UserAgent, Transport: c.Transport, - WrapTransport: wt, + WrapTransport: c.WrapTransport, TLS: transport.TLSConfig{ Insecure: c.Insecure, ServerName: c.ServerName, @@ -111,5 +82,29 @@ func (c *Config) TransportConfig() (*transport.Config, error) { Extra: c.Impersonate.Extra, }, Dial: c.Dial, - }, nil + } + if c.ExecProvider != nil { + provider, err := exec.GetAuthenticator(c.ExecProvider) + if err != nil { + return nil, err + } + if err := provider.UpdateTransportConfig(conf); err != nil { + return nil, err + } + } + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + wt := conf.WrapTransport + if wt != nil { + conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(wt(rt)) + } + } else { + conf.WrapTransport = provider.WrapTransport + } + } + return conf, nil } diff --git a/staging/src/k8s.io/client-go/transport/cache.go b/staging/src/k8s.io/client-go/transport/cache.go index 540af849460..7cffe2a5faf 100644 --- a/staging/src/k8s.io/client-go/transport/cache.go +++ b/staging/src/k8s.io/client-go/transport/cache.go @@ -43,6 +43,7 @@ type tlsCacheKey struct { caData string certData string keyData string + getCert string serverName string dial string } @@ -52,7 +53,7 @@ func (t tlsCacheKey) String() string { if len(t.keyData) > 0 { keyText = "" } - return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.serverName, t.dial) + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) } func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { @@ -109,6 +110,7 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) { caData: string(c.TLS.CAData), certData: string(c.TLS.CertData), keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), serverName: c.TLS.ServerName, dial: fmt.Sprintf("%p", c.Dial), }, nil diff --git a/staging/src/k8s.io/client-go/transport/cache_test.go b/staging/src/k8s.io/client-go/transport/cache_test.go index 61f3affc62c..9b740cdeca8 100644 --- a/staging/src/k8s.io/client-go/transport/cache_test.go +++ b/staging/src/k8s.io/client-go/transport/cache_test.go @@ -18,6 +18,7 @@ package transport import ( "context" + "crypto/tls" "net" "net/http" "testing" @@ -54,6 +55,7 @@ func TestTLSConfigKey(t *testing.T) { // Make sure config fields that affect the tls config affect the cache key dialer := net.Dialer{} + getCert := func() (*tls.Certificate, error) { return nil, nil } uniqueConfigurations := map[string]*Config{ "no tls": {}, "dialer": {Dial: dialer.DialContext}, @@ -106,6 +108,24 @@ func TestTLSConfigKey(t *testing.T) { KeyData: []byte{1}, }, }, + "getCert1": { + TLS: TLSConfig{ + KeyData: []byte{1}, + GetCert: getCert, + }, + }, + "getCert2": { + TLS: TLSConfig{ + KeyData: []byte{1}, + GetCert: func() (*tls.Certificate, error) { return nil, nil }, + }, + }, + "getCert1, key 2": { + TLS: TLSConfig{ + KeyData: []byte{2}, + GetCert: getCert, + }, + }, } for nameA, valueA := range uniqueConfigurations { for nameB, valueB := range uniqueConfigurations { diff --git a/staging/src/k8s.io/client-go/transport/config.go b/staging/src/k8s.io/client-go/transport/config.go index 90f705d25f5..4081c23e7ff 100644 --- a/staging/src/k8s.io/client-go/transport/config.go +++ b/staging/src/k8s.io/client-go/transport/config.go @@ -18,6 +18,7 @@ package transport import ( "context" + "crypto/tls" "net" "net/http" ) @@ -84,7 +85,12 @@ func (c *Config) HasTokenAuth() bool { // HasCertAuth returns whether the configuration has certificate authentication or not. func (c *Config) HasCertAuth() bool { - return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 + return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0) +} + +// HasCertCallbacks returns whether the configuration has certificate callback or not. +func (c *Config) HasCertCallback() bool { + return c.TLS.GetCert != nil } // TLSConfig holds the information needed to set up a TLS transport. @@ -99,4 +105,6 @@ type TLSConfig struct { CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. } diff --git a/staging/src/k8s.io/client-go/transport/transport.go b/staging/src/k8s.io/client-go/transport/transport.go index c2bb7ae5e44..c19739fdfe8 100644 --- a/staging/src/k8s.io/client-go/transport/transport.go +++ b/staging/src/k8s.io/client-go/transport/transport.go @@ -28,7 +28,7 @@ import ( // or transport level security defined by the provided Config. func New(config *Config) (http.RoundTripper, error) { // Set transport level security - if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) { return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") } @@ -52,7 +52,7 @@ func New(config *Config) (http.RoundTripper, error) { // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { return nil, nil } if c.HasCA() && c.TLS.Insecure { @@ -75,12 +75,40 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) } + var staticCert *tls.Certificate if c.HasCertAuth() { + // If key/cert were provided, verify them before setting up + // tlsConfig.GetClientCertificate. cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) if err != nil { return nil, err } - tlsConfig.Certificates = []tls.Certificate{cert} + staticCert = &cert + } + + if c.HasCertAuth() || c.HasCertCallback() { + tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + // Note: static key/cert data always take precedence over cert + // callback. + if staticCert != nil { + return staticCert, nil + } + if c.HasCertCallback() { + cert, err := c.TLS.GetCert() + if err != nil { + return nil, err + } + // GetCert may return empty value, meaning no cert. + if cert != nil { + return cert, nil + } + } + + // Both c.TLS.CertData/KeyData were unset and GetCert didn't return + // anything. Return an empty tls.Certificate, no client cert will + // be sent to the server. + return &tls.Certificate{}, nil + } } return tlsConfig, nil diff --git a/staging/src/k8s.io/client-go/transport/transport_test.go b/staging/src/k8s.io/client-go/transport/transport_test.go index 8de751562d6..2e9896a08b1 100644 --- a/staging/src/k8s.io/client-go/transport/transport_test.go +++ b/staging/src/k8s.io/client-go/transport/transport_test.go @@ -17,6 +17,8 @@ limitations under the License. package transport import ( + "crypto/tls" + "errors" "net/http" "testing" ) @@ -94,6 +96,8 @@ func TestNew(t *testing.T) { Config *Config Err bool TLS bool + TLSCert bool + TLSErr bool Default bool }{ "default transport": { @@ -135,7 +139,8 @@ func TestNew(t *testing.T) { }, "cert transport": { - TLS: true, + TLS: true, + TLSCert: true, Config: &Config{ TLS: TLSConfig{ CAData: []byte(rootCACert), @@ -165,7 +170,8 @@ func TestNew(t *testing.T) { }, }, "key data overriding bad file cert transport": { - TLS: true, + TLS: true, + TLSCert: true, Config: &Config{ TLS: TLSConfig{ CAData: []byte(rootCACert), @@ -175,37 +181,108 @@ func TestNew(t *testing.T) { }, }, }, + "callback cert and key": { + TLS: true, + TLSCert: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + GetCert: func() (*tls.Certificate, error) { + crt, err := tls.X509KeyPair([]byte(certData), []byte(keyData)) + return &crt, err + }, + }, + }, + }, + "cert callback error": { + TLS: true, + TLSCert: true, + TLSErr: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + GetCert: func() (*tls.Certificate, error) { + return nil, errors.New("GetCert failure") + }, + }, + }, + }, + "cert data overrides empty callback result": { + TLS: true, + TLSCert: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + GetCert: func() (*tls.Certificate, error) { + return nil, nil + }, + CertData: []byte(certData), + KeyData: []byte(keyData), + }, + }, + }, + "callback returns nothing": { + TLS: true, + TLSCert: true, + Config: &Config{ + TLS: TLSConfig{ + CAData: []byte(rootCACert), + GetCert: func() (*tls.Certificate, error) { + return nil, nil + }, + }, + }, + }, } for k, testCase := range testCases { - transport, err := New(testCase.Config) - switch { - case testCase.Err && err == nil: - t.Errorf("%s: unexpected non-error", k) - continue - case !testCase.Err && err != nil: - t.Errorf("%s: unexpected error: %v", k, err) - continue - } + t.Run(k, func(t *testing.T) { + rt, err := New(testCase.Config) + switch { + case testCase.Err && err == nil: + t.Fatal("unexpected non-error") + case !testCase.Err && err != nil: + t.Fatalf("unexpected error: %v", err) + } + if testCase.Err { + return + } - switch { - case testCase.Default && transport != http.DefaultTransport: - t.Errorf("%s: expected the default transport, got %#v", k, transport) - continue - case !testCase.Default && transport == http.DefaultTransport: - t.Errorf("%s: expected non-default transport, got %#v", k, transport) - continue - } + switch { + case testCase.Default && rt != http.DefaultTransport: + t.Fatalf("got %#v, expected the default transport", rt) + case !testCase.Default && rt == http.DefaultTransport: + t.Fatalf("got %#v, expected non-default transport", rt) + } - // We only know how to check TLSConfig on http.Transports - if transport, ok := transport.(*http.Transport); ok { + // We only know how to check TLSConfig on http.Transports + transport := rt.(*http.Transport) switch { case testCase.TLS && transport.TLSClientConfig == nil: - t.Errorf("%s: expected TLSClientConfig, got %#v", k, transport) - continue + t.Fatalf("got %#v, expected TLSClientConfig", transport) case !testCase.TLS && transport.TLSClientConfig != nil: - t.Errorf("%s: expected no TLSClientConfig, got %#v", k, transport) - continue + t.Fatalf("got %#v, expected no TLSClientConfig", transport) } - } + if !testCase.TLS { + return + } + + switch { + case testCase.TLSCert && transport.TLSClientConfig.GetClientCertificate == nil: + t.Fatalf("got %#v, expected TLSClientConfig.GetClientCertificate", transport.TLSClientConfig) + case !testCase.TLSCert && transport.TLSClientConfig.GetClientCertificate != nil: + t.Fatalf("got %#v, expected no TLSClientConfig.GetClientCertificate", transport.TLSClientConfig) + } + if !testCase.TLSCert { + return + } + + _, err = transport.TLSClientConfig.GetClientCertificate(nil) + switch { + case testCase.TLSErr && err == nil: + t.Error("got nil error from GetClientCertificate, expected non-nil") + case !testCase.TLSErr && err != nil: + t.Errorf("got error from GetClientCertificate: %q, expected nil", err) + } + }) } } diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index 9a0970403aa..4e684a16123 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -1654,6 +1654,10 @@ "ImportPath": "k8s.io/client-go/util/cert", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 9415da5b540..656f0776ac2 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -458,6 +458,10 @@ "ImportPath": "k8s.io/client-go/util/cert", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index f0635220789..008ec6a99d5 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -1622,6 +1622,10 @@ "ImportPath": "k8s.io/client-go/util/cert", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 9d2e0acd719..7fbae5e9bd2 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -1046,6 +1046,10 @@ "ImportPath": "k8s.io/client-go/util/cert", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/client-go/util/connrotation", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/client-go/util/flowcontrol", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" From 759e74cca05d1231879a01c41301864deacfe657 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Fri, 4 May 2018 10:47:09 -0700 Subject: [PATCH 290/307] core v1: deprecate the gitRepo volume type --- pkg/apis/core/types.go | 7 +++++++ staging/src/k8s.io/api/core/v1/types.go | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 54c23ab7194..94bec6cb864 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -75,6 +75,9 @@ type VolumeSource struct { // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. // +optional GitRepo *GitRepoVolumeSource // Secret represents a secret that should populate this volume. @@ -790,6 +793,10 @@ type AWSElasticBlockStoreVolumeSource struct { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. type GitRepoVolumeSource struct { // Repository URL Repository string diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index e46e9db0a7f..7c58d39d13e 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -70,6 +70,9 @@ type VolumeSource struct { // +optional AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. // +optional GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` // Secret represents a secret that should populate this volume. @@ -972,6 +975,10 @@ type AWSElasticBlockStoreVolumeSource struct { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. type GitRepoVolumeSource struct { // Repository URL Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` From f8f5f045a72622eb9784cc29aa97082a41adc56b Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Fri, 4 May 2018 11:19:34 -0700 Subject: [PATCH 291/307] generated: update generated API files Commands run: ./hack/update-api-reference-docs.sh ./hack/update-generated-protobuf.sh ./hack/update-generated-swagger-docs.sh ./hack/update-openapi-spec.sh ./hack/update-swagger-spec.sh --- api/openapi-spec/swagger.json | 4 ++-- api/swagger-spec/apps_v1.json | 4 ++-- api/swagger-spec/apps_v1beta1.json | 4 ++-- api/swagger-spec/apps_v1beta2.json | 4 ++-- api/swagger-spec/batch_v1.json | 4 ++-- api/swagger-spec/batch_v1beta1.json | 4 ++-- api/swagger-spec/batch_v2alpha1.json | 4 ++-- api/swagger-spec/extensions_v1beta1.json | 4 ++-- api/swagger-spec/settings.k8s.io_v1alpha1.json | 4 ++-- api/swagger-spec/v1.json | 4 ++-- docs/api-reference/apps/v1/definitions.html | 5 ++++- docs/api-reference/apps/v1beta1/definitions.html | 5 ++++- docs/api-reference/apps/v1beta2/definitions.html | 5 ++++- docs/api-reference/batch/v1/definitions.html | 5 ++++- docs/api-reference/batch/v1beta1/definitions.html | 5 ++++- docs/api-reference/batch/v2alpha1/definitions.html | 5 ++++- docs/api-reference/extensions/v1beta1/definitions.html | 5 ++++- .../settings.k8s.io/v1alpha1/definitions.html | 5 ++++- docs/api-reference/v1/definitions.html | 5 ++++- staging/src/k8s.io/api/core/v1/generated.proto | 7 +++++++ .../src/k8s.io/api/core/v1/types_swagger_doc_generated.go | 4 ++-- 21 files changed, 65 insertions(+), 31 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index a62df308c16..1e49bbbd1fd 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -77675,7 +77675,7 @@ } }, "io.k8s.api.core.v1.GitRepoVolumeSource": { - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], @@ -80895,7 +80895,7 @@ "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" }, "gitRepo": { - "description": "GitRepo represents a git repository at a particular revision.", + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource" }, "glusterfs": { diff --git a/api/swagger-spec/apps_v1.json b/api/swagger-spec/apps_v1.json index f951a05981d..dfc0297150f 100644 --- a/api/swagger-spec/apps_v1.json +++ b/api/swagger-spec/apps_v1.json @@ -6775,7 +6775,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -6956,7 +6956,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/apps_v1beta1.json b/api/swagger-spec/apps_v1beta1.json index 3811b8905da..ed2a7db2fba 100644 --- a/api/swagger-spec/apps_v1beta1.json +++ b/api/swagger-spec/apps_v1beta1.json @@ -4409,7 +4409,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -4590,7 +4590,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/apps_v1beta2.json b/api/swagger-spec/apps_v1beta2.json index 91c674f3c1a..4c81ca9c50a 100644 --- a/api/swagger-spec/apps_v1beta2.json +++ b/api/swagger-spec/apps_v1beta2.json @@ -6775,7 +6775,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -6956,7 +6956,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/batch_v1.json b/api/swagger-spec/batch_v1.json index bb316ff8fe9..1fe7d2add63 100644 --- a/api/swagger-spec/batch_v1.json +++ b/api/swagger-spec/batch_v1.json @@ -1749,7 +1749,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -1930,7 +1930,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/batch_v1beta1.json b/api/swagger-spec/batch_v1beta1.json index ccd0c6c5d8a..9bd8e5501a9 100644 --- a/api/swagger-spec/batch_v1beta1.json +++ b/api/swagger-spec/batch_v1beta1.json @@ -1804,7 +1804,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -1985,7 +1985,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/batch_v2alpha1.json b/api/swagger-spec/batch_v2alpha1.json index 3c9f4227fcf..f83e193481c 100644 --- a/api/swagger-spec/batch_v2alpha1.json +++ b/api/swagger-spec/batch_v2alpha1.json @@ -1804,7 +1804,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -1985,7 +1985,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json index 4ab3384c071..e6337f0c443 100644 --- a/api/swagger-spec/extensions_v1beta1.json +++ b/api/swagger-spec/extensions_v1beta1.json @@ -7417,7 +7417,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -7598,7 +7598,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/settings.k8s.io_v1alpha1.json b/api/swagger-spec/settings.k8s.io_v1alpha1.json index d4427baab0f..f6989fb5cd4 100644 --- a/api/swagger-spec/settings.k8s.io_v1alpha1.json +++ b/api/swagger-spec/settings.k8s.io_v1alpha1.json @@ -1587,7 +1587,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -1768,7 +1768,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 7ac54ed33b1..a7c2114513c 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -20388,7 +20388,7 @@ }, "gitRepo": { "$ref": "v1.GitRepoVolumeSource", - "description": "GitRepo represents a git repository at a particular revision." + "description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." }, "secret": { "$ref": "v1.SecretVolumeSource", @@ -20496,7 +20496,7 @@ }, "v1.GitRepoVolumeSource": { "id": "v1.GitRepoVolumeSource", - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "required": [ "repository" ], diff --git a/docs/api-reference/apps/v1/definitions.html b/docs/api-reference/apps/v1/definitions.html index 68d17e7a575..283e2b8044f 100755 --- a/docs/api-reference/apps/v1/definitions.html +++ b/docs/api-reference/apps/v1/definitions.html @@ -1341,6 +1341,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+
@@ -3220,7 +3223,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/apps/v1beta1/definitions.html b/docs/api-reference/apps/v1beta1/definitions.html index 04d3a39f6d6..1785b9f517c 100755 --- a/docs/api-reference/apps/v1beta1/definitions.html +++ b/docs/api-reference/apps/v1beta1/definitions.html @@ -1273,6 +1273,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -3217,7 +3220,7 @@ The StatefulSet guarantees that a given network identity will always map to the - + diff --git a/docs/api-reference/apps/v1beta2/definitions.html b/docs/api-reference/apps/v1beta2/definitions.html index 2221a7e1464..9ecb9b68c99 100755 --- a/docs/api-reference/apps/v1beta2/definitions.html +++ b/docs/api-reference/apps/v1beta2/definitions.html @@ -1454,6 +1454,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -3923,7 +3926,7 @@ The StatefulSet guarantees that a given network identity will always map to the - + diff --git a/docs/api-reference/batch/v1/definitions.html b/docs/api-reference/batch/v1/definitions.html index 4e96f025f6a..84f6054f57f 100755 --- a/docs/api-reference/batch/v1/definitions.html +++ b/docs/api-reference/batch/v1/definitions.html @@ -1028,6 +1028,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -2587,7 +2590,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/batch/v1beta1/definitions.html b/docs/api-reference/batch/v1beta1/definitions.html index c8ce2f1f2f7..90f77d57af7 100755 --- a/docs/api-reference/batch/v1beta1/definitions.html +++ b/docs/api-reference/batch/v1beta1/definitions.html @@ -1069,6 +1069,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -2621,7 +2624,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/batch/v2alpha1/definitions.html b/docs/api-reference/batch/v2alpha1/definitions.html index 5926d49603a..1e278a4ff73 100755 --- a/docs/api-reference/batch/v2alpha1/definitions.html +++ b/docs/api-reference/batch/v2alpha1/definitions.html @@ -1028,6 +1028,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -2594,7 +2597,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index d2969deece6..0ebbaa26929 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -1746,6 +1746,9 @@ Examples: /foo would allow /foo, /foo/ an

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -3801,7 +3804,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html b/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html index e048b45f835..a8610242a6c 100755 --- a/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html +++ b/docs/api-reference/settings.k8s.io/v1alpha1/definitions.html @@ -1370,6 +1370,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -3365,7 +3368,7 @@ When an object is created, the system will populate this list with the current s - + diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index ac244a75f50..69609a2a628 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -967,6 +967,9 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.

+
+

DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

@@ -8570,7 +8573,7 @@ Examples:
- + diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index a92d7392c6e..ffb9bd53ad1 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -1318,6 +1318,10 @@ message GCEPersistentDiskVolumeSource { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. message GitRepoVolumeSource { // Repository URL optional string repository = 1; @@ -4413,6 +4417,9 @@ message VolumeSource { optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. // +optional optional GitRepoVolumeSource gitRepo = 5; diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index 567f37dd22d..751835290ca 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -672,7 +672,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { } var map_GitRepoVolumeSource = map[string]string{ - "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "repository": "Repository URL", "revision": "Commit hash for the specified revision.", "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", @@ -2188,7 +2188,7 @@ var map_VolumeSource = map[string]string{ "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "gitRepo": "GitRepo represents a git repository at a particular revision.", + "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", From dc9e3f1b3e9e557e2676393b460c1eb43a097282 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Thu, 17 May 2018 12:47:27 -0700 Subject: [PATCH 292/307] svcacct: validate min and max expiration seconds on TokenRequest --- pkg/apis/authentication/BUILD | 1 + pkg/apis/authentication/validation/BUILD | 26 +++++++++++ .../authentication/validation/validation.go | 41 +++++++++++++++++ .../core/serviceaccount/storage/BUILD | 1 + .../core/serviceaccount/storage/token.go | 19 +++++--- test/integration/auth/BUILD | 1 + test/integration/auth/svcaccttoken_test.go | 46 ++++++++++++------- 7 files changed, 113 insertions(+), 22 deletions(-) create mode 100644 pkg/apis/authentication/validation/BUILD create mode 100644 pkg/apis/authentication/validation/validation.go diff --git a/pkg/apis/authentication/BUILD b/pkg/apis/authentication/BUILD index 1d09c7766db..1781f634232 100644 --- a/pkg/apis/authentication/BUILD +++ b/pkg/apis/authentication/BUILD @@ -37,6 +37,7 @@ filegroup( "//pkg/apis/authentication/install:all-srcs", "//pkg/apis/authentication/v1:all-srcs", "//pkg/apis/authentication/v1beta1:all-srcs", + "//pkg/apis/authentication/validation:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/apis/authentication/validation/BUILD b/pkg/apis/authentication/validation/BUILD new file mode 100644 index 00000000000..3192543f9c7 --- /dev/null +++ b/pkg/apis/authentication/validation/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["validation.go"], + importpath = "k8s.io/kubernetes/pkg/apis/authentication/validation", + visibility = ["//visibility:public"], + deps = [ + "//pkg/apis/authentication:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/apis/authentication/validation/validation.go b/pkg/apis/authentication/validation/validation.go new file mode 100644 index 00000000000..b174ddfc1e9 --- /dev/null +++ b/pkg/apis/authentication/validation/validation.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains methods to validate kinds in the +// authentication.k8s.io API group. +package validation + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/authentication" +) + +// ValidateTokenRequest validates a TokenRequest. +func ValidateTokenRequest(tr *authentication.TokenRequest) field.ErrorList { + allErrs := field.ErrorList{} + specPath := field.NewPath("spec") + + const min = 10 * time.Minute + if tr.Spec.ExpirationSeconds < int64(min.Seconds()) { + allErrs = append(allErrs, field.Invalid(specPath.Child("expirationSeconds"), tr.Spec.ExpirationSeconds, "may not specify a duration less than 10 minutes")) + } + if tr.Spec.ExpirationSeconds > 1<<32 { + allErrs = append(allErrs, field.Invalid(specPath.Child("expirationSeconds"), tr.Spec.ExpirationSeconds, "may not specify a duration larger than 2^32 seconds")) + } + return allErrs +} diff --git a/pkg/registry/core/serviceaccount/storage/BUILD b/pkg/registry/core/serviceaccount/storage/BUILD index 1469f663888..d74dcd4db52 100644 --- a/pkg/registry/core/serviceaccount/storage/BUILD +++ b/pkg/registry/core/serviceaccount/storage/BUILD @@ -32,6 +32,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", deps = [ "//pkg/apis/authentication:go_default_library", + "//pkg/apis/authentication/validation:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", diff --git a/pkg/registry/core/serviceaccount/storage/token.go b/pkg/registry/core/serviceaccount/storage/token.go index 32df9503f10..e1a94f6e50f 100644 --- a/pkg/registry/core/serviceaccount/storage/token.go +++ b/pkg/registry/core/serviceaccount/storage/token.go @@ -29,6 +29,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" authenticationapi "k8s.io/kubernetes/pkg/apis/authentication" + authenticationvalidation "k8s.io/kubernetes/pkg/apis/authentication/validation" api "k8s.io/kubernetes/pkg/apis/core" token "k8s.io/kubernetes/pkg/serviceaccount" ) @@ -48,6 +49,12 @@ type TokenREST struct { var _ = rest.NamedCreater(&TokenREST{}) var _ = rest.GroupVersionKindProvider(&TokenREST{}) +var gvk = schema.GroupVersionKind{ + Group: authenticationapiv1.SchemeGroupVersion.Group, + Version: authenticationapiv1.SchemeGroupVersion.Version, + Kind: "TokenRequest", +} + func (r *TokenREST) Create(ctx context.Context, name string, obj runtime.Object, createValidation rest.ValidateObjectFunc, includeUninitialized bool) (runtime.Object, error) { if err := createValidation(obj); err != nil { return nil, err @@ -55,6 +62,10 @@ func (r *TokenREST) Create(ctx context.Context, name string, obj runtime.Object, out := obj.(*authenticationapi.TokenRequest) + if errs := authenticationvalidation.ValidateTokenRequest(out); len(errs) != 0 { + return nil, errors.NewInvalid(gvk.GroupKind(), "", errs) + } + svcacctObj, err := r.svcaccts.Get(ctx, name, &metav1.GetOptions{}) if err != nil { return nil, err @@ -113,12 +124,8 @@ func (r *TokenREST) Create(ctx context.Context, name string, obj runtime.Object, return out, nil } -func (r *TokenREST) GroupVersionKind(containingGV schema.GroupVersion) schema.GroupVersionKind { - return schema.GroupVersionKind{ - Group: authenticationapiv1.SchemeGroupVersion.Group, - Version: authenticationapiv1.SchemeGroupVersion.Version, - Kind: "TokenRequest", - } +func (r *TokenREST) GroupVersionKind(schema.GroupVersion) schema.GroupVersionKind { + return gvk } type getter interface { diff --git a/test/integration/auth/BUILD b/test/integration/auth/BUILD index 619c3df4a09..e5ed620905f 100644 --- a/test/integration/auth/BUILD +++ b/test/integration/auth/BUILD @@ -51,6 +51,7 @@ go_test( "//test/integration:go_default_library", "//test/integration/framework:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", "//vendor/k8s.io/api/authentication/v1:go_default_library", "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/auth/svcaccttoken_test.go b/test/integration/auth/svcaccttoken_test.go index 65edcc44dc5..ac7ba22edae 100644 --- a/test/integration/auth/svcaccttoken_test.go +++ b/test/integration/auth/svcaccttoken_test.go @@ -24,17 +24,20 @@ import ( "testing" "time" + "gopkg.in/square/go-jose.v2/jwt" authenticationv1 "k8s.io/api/authentication/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/authentication/request/bearertoken" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authorization/authorizerfactory" utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" clientset "k8s.io/client-go/kubernetes" externalclientset "k8s.io/client-go/kubernetes" certutil "k8s.io/client-go/util/cert" + "k8s.io/kubernetes/pkg/apis/core" serviceaccountgetter "k8s.io/kubernetes/pkg/controller/serviceaccount" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/serviceaccount" @@ -118,7 +121,6 @@ func TestServiceAccountTokenCreate(t *testing.T) { }, } - one = int64(1) wrongUID = types.UID("wrong") noUID = types.UID("") ) @@ -126,8 +128,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("bound to service account", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, }, } @@ -157,8 +158,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("bound to service account and pod", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, BoundObjectRef: &authenticationv1.BoundObjectReference{ Kind: "Pod", APIVersion: "v1", @@ -211,8 +211,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("bound to service account and secret", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, BoundObjectRef: &authenticationv1.BoundObjectReference{ Kind: "Secret", APIVersion: "v1", @@ -266,8 +265,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("bound to service account and pod running as different service account", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, BoundObjectRef: &authenticationv1.BoundObjectReference{ Kind: "Pod", APIVersion: "v1", @@ -289,8 +287,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("expired token", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, }, } @@ -303,7 +300,26 @@ func TestServiceAccountTokenCreate(t *testing.T) { } doTokenReview(t, cs, treq, false) - time.Sleep(63 * time.Second) + + // backdate the token + then := time.Now().Add(-2 * time.Hour) + sc := &jwt.Claims{ + Subject: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name), + Audience: jwt.Audience([]string{"api"}), + IssuedAt: jwt.NewNumericDate(then), + NotBefore: jwt.NewNumericDate(then), + Expiry: jwt.NewNumericDate(then.Add(time.Duration(60*60) * time.Second)), + } + coresa := core.ServiceAccount{ + ObjectMeta: sa.ObjectMeta, + } + _, pc := serviceaccount.Claims(coresa, nil, nil, 0, nil) + tok, err := masterConfig.ExtraConfig.ServiceAccountIssuer.GenerateToken(sc, pc) + if err != nil { + t.Fatalf("err signing expired token: %v", err) + } + + treq.Status.Token = tok doTokenReview(t, cs, treq, true) }) @@ -346,8 +362,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("a token should be invalid after recreating same name pod", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, BoundObjectRef: &authenticationv1.BoundObjectReference{ Kind: "Pod", APIVersion: "v1", @@ -386,8 +401,7 @@ func TestServiceAccountTokenCreate(t *testing.T) { t.Run("a token should be invalid after recreating same name secret", func(t *testing.T) { treq := &authenticationv1.TokenRequest{ Spec: authenticationv1.TokenRequestSpec{ - Audiences: []string{"api"}, - ExpirationSeconds: &one, + Audiences: []string{"api"}, BoundObjectRef: &authenticationv1.BoundObjectReference{ Kind: "Secret", APIVersion: "v1", From 73a22b2e611647de04aa8d7fe910fd4657e6a9d8 Mon Sep 17 00:00:00 2001 From: xuzhonghu Date: Mon, 14 May 2018 16:19:38 +0800 Subject: [PATCH 293/307] Support dynamicly set logging verbosity --- .../src/k8s.io/apiserver/pkg/server/config.go | 12 ++ .../pkg/server/genericapiserver_test.go | 1 + .../k8s.io/apiserver/pkg/server/routes/BUILD | 1 + .../apiserver/pkg/server/routes/flags.go | 126 ++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 staging/src/k8s.io/apiserver/pkg/server/routes/flags.go diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index eb1c27d141f..b4724481344 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -65,6 +65,7 @@ import ( openapicommon "k8s.io/kube-openapi/pkg/common" // install apis + "github.com/golang/glog" _ "k8s.io/apiserver/pkg/apis/apiserver/install" ) @@ -575,6 +576,16 @@ func installAPI(s *GenericAPIServer, c *Config) { if c.EnableContentionProfiling { goruntime.SetBlockProfileRate(1) } + // so far, only logging related endpoints are considered valid to add for these debug flags. + routes.DebugFlags{}.Install(s.Handler.NonGoRestfulMux, "v", routes.StringFlagPutHandler( + routes.StringFlagSetterFunc(func(val string) (string, error) { + var level glog.Level + if err := level.Set(val); err != nil { + return "", fmt.Errorf("failed set glog.logging.verbosity %s: %v", val, err) + } + return "successfully set glog.logging.verbosity to " + val, nil + }), + )) } if c.EnableMetrics { if c.EnableProfiling { @@ -583,6 +594,7 @@ func installAPI(s *GenericAPIServer, c *Config) { routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux) } } + routes.Version{Version: c.Version}.Install(s.Handler.GoRestfulContainer) if c.EnableDiscovery { diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go index b68b30982e7..650374434c8 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver_test.go @@ -385,6 +385,7 @@ func TestNotRestRoutesHaveAuth(t *testing.T) { {"/"}, {"/swagger-ui/"}, {"/debug/pprof/"}, + {"/debug/flags/"}, {"/version"}, } { resp := httptest.NewRecorder() diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD b/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD index e7ffd916bfd..9dd38395648 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD @@ -9,6 +9,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "flags.go", "index.go", "metrics.go", "openapi.go", diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go b/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go new file mode 100644 index 00000000000..d40f11499b3 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package routes + +import ( + "fmt" + "html/template" + "io/ioutil" + "net/http" + "path" + "sync" + + "github.com/golang/glog" + + "k8s.io/apiserver/pkg/server/mux" +) + +var ( + lock = &sync.RWMutex{} + registeredFlags = map[string]debugFlag{} +) + +// DebugFlags adds handlers for flags under /debug/flags. +type DebugFlags struct { +} + +// Install registers the APIServer's flags handler. +func (f DebugFlags) Install(c *mux.PathRecorderMux, flag string, handler func(http.ResponseWriter, *http.Request)) { + c.UnlistedHandle("/debug/flags", http.HandlerFunc(f.Index)) + c.UnlistedHandlePrefix("/debug/flags/", http.HandlerFunc(f.Index)) + + url := path.Join("/debug/flags", flag) + c.UnlistedHandleFunc(url, handler) + + f.addFlag(flag) +} + +// Index responds with the `/debug/flags` request. +// For example, "/debug/flags/v" serves the "--v" flag. +// Index responds to a request for "/debug/flags/" with an HTML page +// listing the available flags. +func (f DebugFlags) Index(w http.ResponseWriter, r *http.Request) { + lock.RLock() + defer lock.RUnlock() + if err := indexTmpl.Execute(w, registeredFlags); err != nil { + glog.Error(err) + } +} + +var indexTmpl = template.Must(template.New("index").Parse(` + +/debug/flags/ + + +/debug/flags/
+
+flags:
+

gitRepo

GitRepo represents a git repository at a particular revision.

GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container.

false

v1.GitRepoVolumeSource

+{{range .}} +{{.Flag}}
+{{end}} +
+
+full flags configurable
+ + +`)) + +type debugFlag struct { + Flag string +} + +func (f DebugFlags) addFlag(flag string) { + lock.Lock() + defer lock.Unlock() + registeredFlags[flag] = debugFlag{flag} +} + +// StringFlagSetterFunc is a func used for setting string type flag. +type StringFlagSetterFunc func(string) (string, error) + +// StringFlagPutHandler wraps an http Handler to set string type flag. +func StringFlagPutHandler(setter StringFlagSetterFunc) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + switch { + case req.Method == "PUT": + body, err := ioutil.ReadAll(req.Body) + if err != nil { + writePlainText(http.StatusBadRequest, "error reading request body: "+err.Error(), w) + return + } + defer req.Body.Close() + response, err := setter(string(body)) + if err != nil { + writePlainText(http.StatusBadRequest, err.Error(), w) + return + } + writePlainText(http.StatusOK, response, w) + return + default: + writePlainText(http.StatusNotAcceptable, "unsupported http method", w) + return + } + }) +} + +// writePlainText renders a simple string response. +func writePlainText(statusCode int, text string, w http.ResponseWriter) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(statusCode) + fmt.Fprintln(w, text) +} From f15a0fc0dae7cb73f8342ea5b46705a1964ced6b Mon Sep 17 00:00:00 2001 From: Guoliang Wang Date: Mon, 14 May 2018 14:58:18 +0800 Subject: [PATCH 294/307] use subtest for table units (pkg/master) --- pkg/master/client_ca_hook_test.go | 22 ++--- .../crdregistration_controller_test.go | 36 +++---- pkg/master/reconcilers/lease_test.go | 94 ++++++++++--------- pkg/master/tunneler/ssh_test.go | 89 ++++++++++++------ 4 files changed, 137 insertions(+), 104 deletions(-) diff --git a/pkg/master/client_ca_hook_test.go b/pkg/master/client_ca_hook_test.go index 64aa61712c1..011078bddb9 100644 --- a/pkg/master/client_ca_hook_test.go +++ b/pkg/master/client_ca_hook_test.go @@ -189,18 +189,18 @@ func TestWriteClientCAs(t *testing.T) { } for _, test := range tests { - client := fake.NewSimpleClientset(test.preexistingObjs...) - test.hook.tryToWriteClientCAs(client.Core()) + t.Run(test.name, func(t *testing.T) { + client := fake.NewSimpleClientset(test.preexistingObjs...) + test.hook.tryToWriteClientCAs(client.Core()) - actualConfigMaps, updated := getFinalConfiMaps(client) - if !reflect.DeepEqual(test.expectedConfigMaps, actualConfigMaps) { - t.Errorf("%s: %v", test.name, diff.ObjectReflectDiff(test.expectedConfigMaps, actualConfigMaps)) - continue - } - if test.expectUpdate != updated { - t.Errorf("%s: expected %v, got %v", test.name, test.expectUpdate, updated) - continue - } + actualConfigMaps, updated := getFinalConfiMaps(client) + if !reflect.DeepEqual(test.expectedConfigMaps, actualConfigMaps) { + t.Fatalf("%s: %v", test.name, diff.ObjectReflectDiff(test.expectedConfigMaps, actualConfigMaps)) + } + if test.expectUpdate != updated { + t.Fatalf("%s: expected %v, got %v", test.name, test.expectUpdate, updated) + } + }) } } diff --git a/pkg/master/controller/crdregistration/crdregistration_controller_test.go b/pkg/master/controller/crdregistration/crdregistration_controller_test.go index 1e2d8df5879..070620e792f 100644 --- a/pkg/master/controller/crdregistration/crdregistration_controller_test.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller_test.go @@ -78,25 +78,27 @@ func TestHandleVersionUpdate(t *testing.T) { } for _, test := range tests { - registration := &fakeAPIServiceRegistration{} - crdCache := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - crdLister := crdlisters.NewCustomResourceDefinitionLister(crdCache) - c := crdRegistrationController{ - crdLister: crdLister, - apiServiceRegistration: registration, - } - for i := range test.startingCRDs { - crdCache.Add(test.startingCRDs[i]) - } + t.Run(test.name, func(t *testing.T) { + registration := &fakeAPIServiceRegistration{} + crdCache := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + crdLister := crdlisters.NewCustomResourceDefinitionLister(crdCache) + c := crdRegistrationController{ + crdLister: crdLister, + apiServiceRegistration: registration, + } + for i := range test.startingCRDs { + crdCache.Add(test.startingCRDs[i]) + } - c.handleVersionUpdate(test.version) + c.handleVersionUpdate(test.version) - if !reflect.DeepEqual(test.expectedAdded, registration.added) { - t.Errorf("%s expected %v, got %v", test.name, test.expectedAdded, registration.added) - } - if !reflect.DeepEqual(test.expectedRemoved, registration.removed) { - t.Errorf("%s expected %v, got %v", test.name, test.expectedRemoved, registration.removed) - } + if !reflect.DeepEqual(test.expectedAdded, registration.added) { + t.Errorf("%s expected %v, got %v", test.name, test.expectedAdded, registration.added) + } + if !reflect.DeepEqual(test.expectedRemoved, registration.removed) { + t.Errorf("%s expected %v, got %v", test.name, test.expectedRemoved, registration.removed) + } + }) } } diff --git a/pkg/master/reconcilers/lease_test.go b/pkg/master/reconcilers/lease_test.go index e16612e255e..97000d39fba 100644 --- a/pkg/master/reconcilers/lease_test.go +++ b/pkg/master/reconcilers/lease_test.go @@ -509,29 +509,31 @@ func TestLeaseEndpointReconciler(t *testing.T) { }, } for _, test := range nonReconcileTests { - fakeLeases := newFakeLeases() - fakeLeases.SetKeys(test.endpointKeys) - registry := ®istrytest.EndpointRegistry{ - Endpoints: test.endpoints, - } - r := NewLeaseEndpointReconciler(registry, fakeLeases) - err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) - if err != nil { - t.Errorf("case %q: unexpected error: %v", test.testName, err) - } - if test.expectUpdate != nil { - if len(registry.Updates) != 1 { - t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) - } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { - t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + t.Run(test.testName, func(t *testing.T) { + fakeLeases := newFakeLeases() + fakeLeases.SetKeys(test.endpointKeys) + registry := ®istrytest.EndpointRegistry{ + Endpoints: test.endpoints, } - } - if test.expectUpdate == nil && len(registry.Updates) > 0 { - t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) - } - if updatedKeys := fakeLeases.GetUpdatedKeys(); len(updatedKeys) != 1 || updatedKeys[0] != test.ip { - t.Errorf("case %q: expected the master's IP to be refreshed, but the following IPs were refreshed instead: %v", test.testName, updatedKeys) - } + r := NewLeaseEndpointReconciler(registry, fakeLeases) + err := r.ReconcileEndpoints(test.serviceName, net.ParseIP(test.ip), test.endpointPorts, false) + if err != nil { + t.Errorf("case %q: unexpected error: %v", test.testName, err) + } + if test.expectUpdate != nil { + if len(registry.Updates) != 1 { + t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) + } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectUpdate == nil && len(registry.Updates) > 0 { + t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) + } + if updatedKeys := fakeLeases.GetUpdatedKeys(); len(updatedKeys) != 1 || updatedKeys[0] != test.ip { + t.Errorf("case %q: expected the master's IP to be refreshed, but the following IPs were refreshed instead: %v", test.testName, updatedKeys) + } + }) } } @@ -604,30 +606,32 @@ func TestLeaseStopReconciling(t *testing.T) { }, } for _, test := range stopTests { - fakeLeases := newFakeLeases() - fakeLeases.SetKeys(test.endpointKeys) - registry := ®istrytest.EndpointRegistry{ - Endpoints: test.endpoints, - } - r := NewLeaseEndpointReconciler(registry, fakeLeases) - err := r.StopReconciling(test.serviceName, net.ParseIP(test.ip), test.endpointPorts) - if err != nil { - t.Errorf("case %q: unexpected error: %v", test.testName, err) - } - if test.expectUpdate != nil { - if len(registry.Updates) != 1 { - t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) - } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { - t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + t.Run(test.testName, func(t *testing.T) { + fakeLeases := newFakeLeases() + fakeLeases.SetKeys(test.endpointKeys) + registry := ®istrytest.EndpointRegistry{ + Endpoints: test.endpoints, } - } - if test.expectUpdate == nil && len(registry.Updates) > 0 { - t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) - } - for _, key := range fakeLeases.GetUpdatedKeys() { - if key == test.ip { - t.Errorf("case %q: Found ip %s in leases but shouldn't be there", test.testName, key) + r := NewLeaseEndpointReconciler(registry, fakeLeases) + err := r.StopReconciling(test.serviceName, net.ParseIP(test.ip), test.endpointPorts) + if err != nil { + t.Errorf("case %q: unexpected error: %v", test.testName, err) } - } + if test.expectUpdate != nil { + if len(registry.Updates) != 1 { + t.Errorf("case %q: unexpected updates: %v", test.testName, registry.Updates) + } else if e, a := test.expectUpdate, ®istry.Updates[0]; !reflect.DeepEqual(e, a) { + t.Errorf("case %q: expected update:\n%#v\ngot:\n%#v\n", test.testName, e, a) + } + } + if test.expectUpdate == nil && len(registry.Updates) > 0 { + t.Errorf("case %q: no update expected, yet saw: %v", test.testName, registry.Updates) + } + for _, key := range fakeLeases.GetUpdatedKeys() { + if key == test.ip { + t.Errorf("case %q: Found ip %s in leases but shouldn't be there", test.testName, key) + } + } + }) } } diff --git a/pkg/master/tunneler/ssh_test.go b/pkg/master/tunneler/ssh_test.go index 1b1f4005809..d69fb94ba80 100644 --- a/pkg/master/tunneler/ssh_test.go +++ b/pkg/master/tunneler/ssh_test.go @@ -32,39 +32,66 @@ import ( // TestSecondsSinceSync verifies that proper results are returned // when checking the time between syncs func TestSecondsSinceSync(t *testing.T) { - tunneler := &SSHTunneler{} - assert := assert.New(t) + tests := []struct { + name string + lastSync int64 + clock *clock.FakeClock + want int64 + }{ + { + name: "Nano Second. No difference", + lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)), + want: int64(0), + }, + { + name: "Second", + lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)), + want: int64(1), + }, + { + name: "Minute", + lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)), + want: int64(60), + }, + { + name: "Hour", + lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)), + want: int64(3600), + }, + { + name: "Day", + lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)), + want: int64(86400), + }, + { + name: "Month", + lastSync: time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)), + want: int64(2678400), + }, + { + name: "Future Month. Should be -Month", + lastSync: time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix(), + clock: clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)), + want: int64(-2678400), + }, + } - tunneler.lastSync = time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC).Unix() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tunneler := &SSHTunneler{} + assert := assert.New(t) + tunneler.lastSync = tt.lastSync + tunneler.clock = tt.clock + assert.Equal(int64(tt.want), tunneler.SecondsSinceSync()) + }) + } - // Nano Second. No difference. - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 2, time.UTC)) - assert.Equal(int64(0), tunneler.SecondsSinceSync()) - - // Second - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 2, 1, time.UTC)) - assert.Equal(int64(1), tunneler.SecondsSinceSync()) - - // Minute - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 2, 1, 1, time.UTC)) - assert.Equal(int64(60), tunneler.SecondsSinceSync()) - - // Hour - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 2, 1, 1, 1, time.UTC)) - assert.Equal(int64(3600), tunneler.SecondsSinceSync()) - - // Day - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 2, 1, 1, 1, 1, time.UTC)) - assert.Equal(int64(86400), tunneler.SecondsSinceSync()) - - // Month - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC)) - assert.Equal(int64(2678400), tunneler.SecondsSinceSync()) - - // Future Month. Should be -Month. - tunneler.lastSync = time.Date(2015, time.February, 1, 1, 1, 1, 1, time.UTC).Unix() - tunneler.clock = clock.NewFakeClock(time.Date(2015, time.January, 1, 1, 1, 1, 1, time.UTC)) - assert.Equal(int64(-2678400), tunneler.SecondsSinceSync()) } // generateTempFile creates a temporary file path From 89dd2b2807f287f79cdb1a25115f94b21d3f3211 Mon Sep 17 00:00:00 2001 From: hangaoshuai Date: Thu, 31 May 2018 13:43:04 +0800 Subject: [PATCH 295/307] remove unused code in kubeadm error.go --- cmd/kubeadm/app/util/error.go | 10 +++------- cmd/kubeadm/app/util/error_test.go | 2 +- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/cmd/kubeadm/app/util/error.go b/cmd/kubeadm/app/util/error.go index 58a892dd191..4eaf984fd4e 100644 --- a/cmd/kubeadm/app/util/error.go +++ b/cmd/kubeadm/app/util/error.go @@ -33,10 +33,6 @@ const ( ValidationExitCode = 3 ) -type debugError interface { - DebugError() (msg string, args []interface{}) -} - // fatal prints the message if set and then exits. func fatal(msg string, code int) { if len(msg) > 0 { @@ -56,7 +52,7 @@ func fatal(msg string, code int) { // This method is generic to the command in use and may be used by non-Kubectl // commands. func CheckErr(err error) { - checkErr("", err, fatal) + checkErr(err, fatal) } // preflightError allows us to know if the error is a preflight error or not @@ -66,8 +62,8 @@ type preflightError interface { } // checkErr formats a given error as a string and calls the passed handleErr -// func with that string and an kubectl exit code. -func checkErr(prefix string, err error, handleErr func(string, int)) { +// func with that string and an exit code. +func checkErr(err error, handleErr func(string, int)) { switch err.(type) { case nil: return diff --git a/cmd/kubeadm/app/util/error_test.go b/cmd/kubeadm/app/util/error_test.go index 94f131babae..20c4fdb8fb3 100644 --- a/cmd/kubeadm/app/util/error_test.go +++ b/cmd/kubeadm/app/util/error_test.go @@ -42,7 +42,7 @@ func TestCheckErr(t *testing.T) { for _, rt := range tokenTest { codeReturned = 0 - checkErr("", rt.e, errHandle) + checkErr(rt.e, errHandle) if codeReturned != rt.expected { t.Errorf( "failed checkErr:\n\texpected: %d\n\t actual: %d", From df3f1ff39093e168a6b3b9b8ee18bc60e641bb91 Mon Sep 17 00:00:00 2001 From: hangaoshuai Date: Thu, 31 May 2018 16:35:26 +0800 Subject: [PATCH 296/307] kubeadm uses its own scheme instead of kubectl scheme --- cmd/kubeadm/app/util/audit/BUILD | 7 +++++-- cmd/kubeadm/app/util/audit/utils.go | 16 +++++++++++----- cmd/kubeadm/app/util/audit/utils_test.go | 8 ++++++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/cmd/kubeadm/app/util/audit/BUILD b/cmd/kubeadm/app/util/audit/BUILD index 7f5453fab3a..cd3098c00f2 100644 --- a/cmd/kubeadm/app/util/audit/BUILD +++ b/cmd/kubeadm/app/util/audit/BUILD @@ -8,8 +8,10 @@ go_library( deps = [ "//cmd/kubeadm/app/util:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit/install:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", ], ) @@ -18,8 +20,9 @@ go_test( srcs = ["utils_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/kubectl/scheme:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/apis/audit/install:go_default_library", "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/audit/utils.go b/cmd/kubeadm/app/util/audit/utils.go index 688d8f0f614..8155d096b18 100644 --- a/cmd/kubeadm/app/util/audit/utils.go +++ b/cmd/kubeadm/app/util/audit/utils.go @@ -23,8 +23,10 @@ import ( "path/filepath" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/apis/audit/install" auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/kubernetes/cmd/kubeadm/app/util" ) @@ -32,7 +34,7 @@ import ( func CreateDefaultAuditLogPolicy(policyFile string) error { policy := auditv1beta1.Policy{ TypeMeta: metav1.TypeMeta{ - APIVersion: "audit.k8s.io/v1beta1", + APIVersion: auditv1beta1.SchemeGroupVersion.String(), Kind: "Policy", }, Rules: []auditv1beta1.PolicyRule{ @@ -50,11 +52,15 @@ func writePolicyToDisk(policyFile string, policy *auditv1beta1.Policy) error { return fmt.Errorf("failed to create directory %q: %v", filepath.Dir(policyFile), err) } - // Registers auditv1beta1 with the runtime Scheme - auditv1beta1.AddToScheme(scheme.Scheme) + scheme := runtime.NewScheme() + // Registers the API group with the scheme and adds types to a scheme + install.Install(scheme) + + codecs := serializer.NewCodecFactory(scheme) // writes the policy to disk - serialized, err := util.MarshalToYaml(policy, auditv1beta1.SchemeGroupVersion) + serialized, err := util.MarshalToYamlForCodecs(policy, auditv1beta1.SchemeGroupVersion, codecs) + if err != nil { return fmt.Errorf("failed to marshal audit policy to YAML: %v", err) } diff --git a/cmd/kubeadm/app/util/audit/utils_test.go b/cmd/kubeadm/app/util/audit/utils_test.go index 113488a752f..01fde9eb8de 100644 --- a/cmd/kubeadm/app/util/audit/utils_test.go +++ b/cmd/kubeadm/app/util/audit/utils_test.go @@ -23,8 +23,9 @@ import ( "testing" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apiserver/pkg/apis/audit/install" auditv1beta1 "k8s.io/apiserver/pkg/apis/audit/v1beta1" - "k8s.io/kubernetes/pkg/kubectl/scheme" ) func cleanup(t *testing.T, path string) { @@ -50,8 +51,11 @@ func TestCreateDefaultAuditLogPolicy(t *testing.T) { if err != nil { t.Fatalf("failed to read %v: %v", auditPolicyFile, err) } + scheme := runtime.NewScheme() + install.Install(scheme) + codecs := serializer.NewCodecFactory(scheme) policy := auditv1beta1.Policy{} - err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), policyBytes, &policy) + err = runtime.DecodeInto(codecs.UniversalDecoder(), policyBytes, &policy) if err != nil { t.Fatalf("failed to decode written policy: %v", err) } From ca12c733239e8de8a5d20c0588d87b420075c614 Mon Sep 17 00:00:00 2001 From: mlmhl Date: Sat, 28 Apr 2018 14:59:55 +0800 Subject: [PATCH 297/307] implement kubelet side online file system resize for volume --- pkg/features/kube_features.go | 50 +++--- pkg/kubelet/volumemanager/cache/BUILD | 2 + .../cache/actual_state_of_world.go | 118 ++++++++++++- pkg/kubelet/volumemanager/populator/BUILD | 3 + .../desired_state_of_world_populator.go | 158 +++++++++++++++-- .../desired_state_of_world_populator_test.go | 161 +++++++++++++++++- pkg/kubelet/volumemanager/reconciler/BUILD | 1 + .../volumemanager/reconciler/reconciler.go | 16 ++ .../reconciler/reconciler_test.go | 142 +++++++++++++++ pkg/volume/testing/testing.go | 9 + .../operationexecutor/operation_executor.go | 13 ++ .../operation_executor_test.go | 10 ++ .../operationexecutor/operation_generator.go | 59 +++++++ 13 files changed, 697 insertions(+), 45 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 63965511ef9..7e0b0f8f94b 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -99,6 +99,11 @@ const ( // Ability to Expand persistent volumes ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes" + // owner: @mlmhl + // alpha: v1.11 + // Ability to expand persistent volumes' file system without unmounting volumes. + ExpandPersistentVolumesFSWithoutUnmounting utilfeature.Feature = "ExpandPersistentVolumesFSWithoutUnmounting" + // owner: @verb // alpha: v1.10 // @@ -328,28 +333,29 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS MountPropagation: {Default: true, PreRelease: utilfeature.Beta}, QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta}, - CPUManager: {Default: true, PreRelease: utilfeature.Beta}, - ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, - MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, - CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, - CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, - BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, - StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, - ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, - SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, - SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, - HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, - ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, - TokenRequest: {Default: false, PreRelease: utilfeature.Alpha}, - TokenRequestProjection: {Default: false, PreRelease: utilfeature.Alpha}, - CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, - GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, - RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, - BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, - DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, + ExpandPersistentVolumesFSWithoutUnmounting: {Default: false, PreRelease: utilfeature.Alpha}, + CPUManager: {Default: true, PreRelease: utilfeature.Beta}, + ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, + CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, + CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, + BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, + SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, + HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, + ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, + TokenRequest: {Default: false, PreRelease: utilfeature.Alpha}, + TokenRequestProjection: {Default: false, PreRelease: utilfeature.Alpha}, + CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, + GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, + RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, + BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/pkg/kubelet/volumemanager/cache/BUILD b/pkg/kubelet/volumemanager/cache/BUILD index bac59a12bd8..c79fad51379 100644 --- a/pkg/kubelet/volumemanager/cache/BUILD +++ b/pkg/kubelet/volumemanager/cache/BUILD @@ -14,6 +14,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", deps = [ + "//pkg/features:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", @@ -21,6 +22,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 6ba3cd0dfda..5b375e780c7 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -28,6 +28,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" @@ -148,6 +150,11 @@ type ActualStateOfWorld interface { // with pod's unique name. This map can be used to determine which pod is currently // in actual state of world. GetPods() map[volumetypes.UniquePodName]bool + + // MarkFSResizeRequired marks each volume that is successfully attached and + // mounted for the specified pod as requiring file system resize (if the plugin for the + // volume indicates it requires file system resize). + MarkFSResizeRequired(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) } // MountedVolume represents a volume that has successfully been mounted to a pod. @@ -291,6 +298,10 @@ type mountedPod struct { // volumeGidValue contains the value of the GID annotation, if present. volumeGidValue string + + // fsResizeRequired indicates the underlying volume has been successfully + // mounted to this pod but its size has been expanded after that. + fsResizeRequired bool } func (asw *actualStateOfWorld) MarkVolumeAsAttached( @@ -444,6 +455,34 @@ func (asw *actualStateOfWorld) AddPodToVolume( return nil } +func (asw *actualStateOfWorld) MarkVolumeAsResized( + podName volumetypes.UniquePodName, + volumeName v1.UniqueVolumeName) error { + asw.Lock() + defer asw.Unlock() + + volumeObj, volumeExists := asw.attachedVolumes[volumeName] + if !volumeExists { + return fmt.Errorf( + "no volume with the name %q exists in the list of attached volumes", + volumeName) + } + + podObj, podExists := volumeObj.mountedPods[podName] + if !podExists { + return fmt.Errorf( + "no pod with the name %q exists in the mounted pods list of volume %s", + podName, + volumeName) + } + + glog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized", + volumeName, podObj.outerVolumeSpecName, podName) + podObj.fsResizeRequired = false + asw.attachedVolumes[volumeName].mountedPods[podName] = podObj + return nil +} + func (asw *actualStateOfWorld) MarkRemountRequired( podName volumetypes.UniquePodName) { asw.Lock() @@ -475,6 +514,46 @@ func (asw *actualStateOfWorld) MarkRemountRequired( } } +func (asw *actualStateOfWorld) MarkFSResizeRequired( + volumeName v1.UniqueVolumeName, + podName volumetypes.UniquePodName) { + asw.Lock() + defer asw.Unlock() + volumeObj, exist := asw.attachedVolumes[volumeName] + if !exist { + glog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName) + return + } + + podObj, exist := volumeObj.mountedPods[podName] + if !exist { + glog.Warningf("MarkFSResizeRequired for volume %s failed "+ + "as pod(%s) not exist", volumeName, podName) + return + } + + volumePlugin, err := + asw.volumePluginMgr.FindExpandablePluginBySpec(podObj.volumeSpec) + if err != nil || volumePlugin == nil { + // Log and continue processing + glog.Errorf( + "MarkFSResizeRequired failed to find expandable plugin for pod %q volume: %q (volSpecName: %q)", + podObj.podName, + volumeObj.volumeName, + podObj.volumeSpec.Name()) + return + } + + if volumePlugin.RequiresFSResize() { + if !podObj.fsResizeRequired { + glog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize", + volumeName, podObj.outerVolumeSpecName, podName) + podObj.fsResizeRequired = true + } + asw.attachedVolumes[volumeName].mountedPods[podName] = podObj + } +} + func (asw *actualStateOfWorld) SetVolumeGloballyMounted( volumeName v1.UniqueVolumeName, globallyMounted bool, devicePath, deviceMountPath string) error { asw.Lock() @@ -546,8 +625,14 @@ func (asw *actualStateOfWorld) PodExistsInVolume( } podObj, podExists := volumeObj.mountedPods[podName] - if podExists && podObj.remountRequired { - return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName) + if podExists { + if podObj.remountRequired { + return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName) + } + if podObj.fsResizeRequired && + utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + return true, volumeObj.devicePath, newFsResizeRequiredError(volumeObj.volumeName, podObj.podName) + } } return podExists, volumeObj.devicePath, nil @@ -716,6 +801,35 @@ func newRemountRequiredError( } } +// fsResizeRequiredError is an error returned when PodExistsInVolume() found +// volume/pod attached/mounted but fsResizeRequired was true, indicating the +// given volume receives an resize request after attached/mounted. +type fsResizeRequiredError struct { + volumeName v1.UniqueVolumeName + podName volumetypes.UniquePodName +} + +func (err fsResizeRequiredError) Error() string { + return fmt.Sprintf( + "volumeName %q mounted to %q needs to resize file system", + err.volumeName, err.podName) +} + +func newFsResizeRequiredError( + volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) error { + return fsResizeRequiredError{ + volumeName: volumeName, + podName: podName, + } +} + +// IsFSResizeRequiredError returns true if the specified error is a +// fsResizeRequiredError. +func IsFSResizeRequiredError(err error) bool { + _, ok := err.(fsResizeRequiredError) + return ok +} + // getMountedVolume constructs and returns a MountedVolume object from the given // mountedPod and attachedVolume objects. func getMountedVolume( diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index c44312cb7f0..fe4d2da5a45 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -25,6 +25,7 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", @@ -49,6 +50,7 @@ go_test( srcs = ["desired_state_of_world_populator_test.go"], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//pkg/kubelet/configmap:go_default_library", "//pkg/kubelet/container/testing:go_default_library", "//pkg/kubelet/pod:go_default_library", @@ -61,6 +63,7 @@ go_test( "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index cc642511a72..f4894d8e92d 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -30,6 +30,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" @@ -182,12 +183,26 @@ func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool { // Iterate through all pods and add to desired state of world if they don't // exist but should func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() { + // Map unique pod name to outer volume name to MountedVolume. + mountedVolumesForPod := make(map[volumetypes.UniquePodName]map[string]cache.MountedVolume) + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + for _, mountedVolume := range dswp.actualStateOfWorld.GetMountedVolumes() { + mountedVolumes, exist := mountedVolumesForPod[mountedVolume.PodName] + if !exist { + mountedVolumes = make(map[string]cache.MountedVolume) + mountedVolumesForPod[mountedVolume.PodName] = mountedVolumes + } + mountedVolumes[mountedVolume.OuterVolumeSpecName] = mountedVolume + } + } + + processedVolumesForFSResize := sets.NewString() for _, pod := range dswp.podManager.GetPods() { if dswp.isPodTerminated(pod) { // Do not (re)add volumes for terminated pods continue } - dswp.processPodVolumes(pod) + dswp.processPodVolumes(pod, mountedVolumesForPod, processedVolumesForFSResize) } } @@ -259,7 +274,10 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { // processPodVolumes processes the volumes in the given pod and adds them to the // desired state of the world. -func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { +func (dswp *desiredStateOfWorldPopulator) processPodVolumes( + pod *v1.Pod, + mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume, + processedVolumesForFSResize sets.String) { if pod == nil { return } @@ -274,7 +292,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { // Process volume spec for each volume defined in pod for _, podVolume := range pod.Spec.Volumes { - volumeSpec, volumeGidValue, err := + pvc, volumeSpec, volumeGidValue, err := dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap) if err != nil { glog.Errorf( @@ -304,6 +322,11 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { podVolume.Name, volumeSpec.Name(), uniquePodName) + + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + dswp.checkVolumeFSResize(pod, podVolume, pvc, volumeSpec, + uniquePodName, mountedVolumesForPod, processedVolumesForFSResize) + } } // some of the volume additions may have failed, should not mark this pod as fully processed @@ -316,6 +339,106 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { } +// checkVolumeFSResize checks whether a PVC mounted by the pod requires file +// system resize or not. If so, marks this volume as fsResizeRequired in ASW. +// - mountedVolumesForPod stores all mounted volumes in ASW, because online +// volume resize only considers mounted volumes. +// - processedVolumesForFSResize stores all volumes we have checked in current loop, +// because file system resize operation is a global operation for volume, so +// we only need to check it once if more than one pod use it. +func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize( + pod *v1.Pod, + podVolume v1.Volume, + pvc *v1.PersistentVolumeClaim, + volumeSpec *volume.Spec, + uniquePodName volumetypes.UniquePodName, + mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume, + processedVolumesForFSResize sets.String) { + if podVolume.PersistentVolumeClaim == nil { + // Only PVC supports resize operation. + return + } + uniqueVolumeName, exist := getUniqueVolumeName(uniquePodName, podVolume.Name, mountedVolumesForPod) + if !exist { + // Volume not exist in ASW, we assume it hasn't been mounted yet. If it needs resize, + // it will be handled as offline resize(if it indeed hasn't been mounted yet), + // or online resize in subsequent loop(after we confirm it has been mounted). + return + } + fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec) + if err != nil { + glog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v", + uniqueVolumeName, podVolume.Name, err) + return + } + if !fsVolume { + glog.V(5).Infof("Block mode volume needn't to check file system resize request") + return + } + if processedVolumesForFSResize.Has(string(uniqueVolumeName)) { + // File system resize operation is a global operation for volume, + // so we only need to check it once if more than one pod use it. + return + } + if mountedReadOnlyByPod(podVolume, pod) { + // This volume is used as read only by this pod, we don't perform resize for read only volumes. + glog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+ + "as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name) + return + } + if volumeRequiresFSResize(pvc, volumeSpec.PersistentVolume) { + dswp.actualStateOfWorld.MarkFSResizeRequired(uniqueVolumeName, uniquePodName) + } + processedVolumesForFSResize.Insert(string(uniqueVolumeName)) +} + +func mountedReadOnlyByPod(podVolume v1.Volume, pod *v1.Pod) bool { + if podVolume.PersistentVolumeClaim.ReadOnly { + return true + } + for _, container := range pod.Spec.InitContainers { + if !mountedReadOnlyByContainer(podVolume.Name, &container) { + return false + } + } + for _, container := range pod.Spec.Containers { + if !mountedReadOnlyByContainer(podVolume.Name, &container) { + return false + } + } + return true +} + +func mountedReadOnlyByContainer(volumeName string, container *v1.Container) bool { + for _, volumeMount := range container.VolumeMounts { + if volumeMount.Name == volumeName && !volumeMount.ReadOnly { + return false + } + } + return true +} + +func getUniqueVolumeName( + podName volumetypes.UniquePodName, + outerVolumeSpecName string, + mountedVolumesForPod map[volumetypes.UniquePodName]map[string]cache.MountedVolume) (v1.UniqueVolumeName, bool) { + mountedVolumes, exist := mountedVolumesForPod[podName] + if !exist { + return "", false + } + mountedVolume, exist := mountedVolumes[outerVolumeSpecName] + if !exist { + return "", false + } + return mountedVolume.VolumeName, true +} + +func volumeRequiresFSResize(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool { + capacity := pvc.Status.Capacity[v1.ResourceStorage] + requested := pv.Spec.Capacity[v1.ResourceStorage] + return requested.Cmp(capacity) > 0 +} + // podPreviouslyProcessed returns true if the volumes for this pod have already // been processed by the populator func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed( @@ -350,7 +473,7 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod( // specified volume. It dereference any PVC to get PV objects, if needed. // Returns an error if unable to obtain the volume at this time. func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( - podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*volume.Spec, string, error) { + podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { glog.V(5).Infof( @@ -359,15 +482,16 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( pvcSource.ClaimName) // If podVolume is a PVC, fetch the real PV behind the claim - pvName, pvcUID, err := dswp.getPVCExtractPV( + pvc, err := dswp.getPVCExtractPV( podNamespace, pvcSource.ClaimName) if err != nil { - return nil, "", fmt.Errorf( + return nil, nil, "", fmt.Errorf( "error processing PVC %q/%q: %v", podNamespace, pvcSource.ClaimName, err) } + pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID glog.V(5).Infof( "Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q", @@ -380,7 +504,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( volumeSpec, volumeGidValue, err := dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID) if err != nil { - return nil, "", fmt.Errorf( + return nil, nil, "", fmt.Errorf( "error processing PVC %q/%q: %v", podNamespace, pvcSource.ClaimName, @@ -399,11 +523,11 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { volumeMode, err := util.GetVolumeMode(volumeSpec) if err != nil { - return nil, "", err + return nil, nil, "", err } // Error if a container has volumeMounts but the volumeMode of PVC isn't Filesystem if mountsMap[podVolume.Name] && volumeMode != v1.PersistentVolumeFilesystem { - return nil, "", fmt.Errorf( + return nil, nil, "", fmt.Errorf( "Volume %q has volumeMode %q, but is specified in volumeMounts for pod %q/%q", podVolume.Name, volumeMode, @@ -412,7 +536,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( } // Error if a container has volumeDevices but the volumeMode of PVC isn't Block if devicesMap[podVolume.Name] && volumeMode != v1.PersistentVolumeBlock { - return nil, "", fmt.Errorf( + return nil, nil, "", fmt.Errorf( "Volume %q has volumeMode %q, but is specified in volumeDevices for pod %q/%q", podVolume.Name, volumeMode, @@ -420,13 +544,13 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( podName) } } - return volumeSpec, volumeGidValue, nil + return pvc, volumeSpec, volumeGidValue, nil } // Do not return the original volume object, since the source could mutate it clonedPodVolume := podVolume.DeepCopy() - return volume.NewSpecFromVolume(clonedPodVolume), "", nil + return nil, volume.NewSpecFromVolume(clonedPodVolume), "", nil } // getPVCExtractPV fetches the PVC object with the given namespace and name from @@ -434,11 +558,11 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( // it is pointing to and returns it. // An error is returned if the PVC object's phase is not "Bound". func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( - namespace string, claimName string) (string, types.UID, error) { + namespace string, claimName string) (*v1.PersistentVolumeClaim, error) { pvc, err := dswp.kubeClient.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) if err != nil || pvc == nil { - return "", "", fmt.Errorf( + return nil, fmt.Errorf( "failed to fetch PVC %s/%s from API server. err=%v", namespace, claimName, @@ -455,7 +579,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( // It should happen only in very rare case when scheduler schedules // a pod and user deletes a PVC that's used by it at the same time. if pvc.ObjectMeta.DeletionTimestamp != nil { - return "", "", fmt.Errorf( + return nil, fmt.Errorf( "can't start pod because PVC %s/%s is being deleted", namespace, claimName) @@ -464,7 +588,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" { - return "", "", fmt.Errorf( + return nil, fmt.Errorf( "PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", namespace, claimName, @@ -472,7 +596,7 @@ func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV( pvc.Spec.VolumeName) } - return pvc.Spec.VolumeName, pvc.UID, nil + return pvc, nil } // getPVSpec fetches the PV object with the given name from the API server diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go index 1ecf9674dd8..5acd7df04d9 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go @@ -20,12 +20,16 @@ import ( "testing" "time" + "fmt" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/configmap" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" @@ -294,7 +298,7 @@ func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) { fakePodManager.AddPod(pod) mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers) - volumeSpec, _, err := + _, volumeSpec, _, err := dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap) // Assert @@ -343,7 +347,7 @@ func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) { fakePodManager.AddPod(pod) mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers) - volumeSpec, _, err := + _, volumeSpec, _, err := dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap) // Assert @@ -395,7 +399,7 @@ func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) { fakePodManager.AddPod(pod) mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers) - volumeSpec, _, err := + _, volumeSpec, _, err := dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap) // Assert @@ -447,7 +451,7 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) { fakePodManager.AddPod(pod) mountsMap, devicesMap := dswp.makeVolumeMap(pod.Spec.Containers) - volumeSpec, _, err := + _, volumeSpec, _, err := dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap) // Assert @@ -459,6 +463,155 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) { utilfeature.DefaultFeatureGate.Set("BlockVolume=false") } +func TestCheckVolumeFSResize(t *testing.T) { + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dswp-test-volume-name", + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}}, + Capacity: volumeCapacity(1), + ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"}, + }, + } + pvc := &v1.PersistentVolumeClaim{ + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "dswp-test-volume-name", + Resources: v1.ResourceRequirements{ + Requests: volumeCapacity(1), + }, + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + Capacity: volumeCapacity(1), + }, + } + dswp, fakePodManager, fakeDSW := createDswpWithVolume(t, pv, pvc) + fakeASW := dswp.actualStateOfWorld + + // create pod + containers := []v1.Container{ + { + VolumeMounts: []v1.VolumeMount{ + { + Name: "dswp-test-volume-name", + MountPath: "/mnt", + }, + }, + }, + } + pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers) + uniquePodName := types.UniquePodName(pod.UID) + uniqueVolumeName := v1.UniqueVolumeName("fake-plugin/" + pod.Spec.Volumes[0].Name) + + fakePodManager.AddPod(pod) + // Fill the dsw to contains volumes and pods. + dswp.findAndAddNewPods() + reconcileASW(fakeASW, fakeDSW, t) + + // No resize request for volume, volumes in ASW shouldn't be marked as fsResizeRequired. + setExpandOnlinePersistentVolumesFeatureGate("true", t) + resizeRequiredVolumes := reprocess(dswp, uniquePodName, fakeDSW, fakeASW) + if len(resizeRequiredVolumes) > 0 { + t.Fatalf("No resize request for any volumes, but found resize required volumes in ASW: %v", resizeRequiredVolumes) + } + + // Add a resize request to volume. + pv.Spec.Capacity = volumeCapacity(2) + pvc.Spec.Resources.Requests = volumeCapacity(2) + + // Disable the feature gate, so volume shouldn't be marked as fsResizeRequired. + setExpandOnlinePersistentVolumesFeatureGate("false", t) + resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW) + if len(resizeRequiredVolumes) > 0 { + t.Fatalf("Feature gate disabled, but found resize required volumes in ASW: %v", resizeRequiredVolumes) + } + + // Make volume used as ReadOnly, so volume shouldn't be marked as fsResizeRequired. + setExpandOnlinePersistentVolumesFeatureGate("true", t) + pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW) + if len(resizeRequiredVolumes) > 0 { + t.Fatalf("volume mounted as ReadOnly, but found resize required volumes in ASW: %v", resizeRequiredVolumes) + } + + // Clear ASW, so volume shouldn't be marked as fsResizeRequired because they are not mounted. + pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = false + clearASW(fakeASW, fakeDSW, t) + resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW) + if len(resizeRequiredVolumes) > 0 { + t.Fatalf("volume hasn't been mounted, but found resize required volumes in ASW: %v", resizeRequiredVolumes) + } + + // volume in ASW should be marked as fsResizeRequired. + reconcileASW(fakeASW, fakeDSW, t) + resizeRequiredVolumes = reprocess(dswp, uniquePodName, fakeDSW, fakeASW) + if len(resizeRequiredVolumes) == 0 { + t.Fatalf("Request resize for volume, but volume in ASW hasn't been marked as fsResizeRequired") + } + if len(resizeRequiredVolumes) != 1 { + t.Fatalf("Some unexpected volumes are marked as fsResizeRequired: %v", resizeRequiredVolumes) + } + if resizeRequiredVolumes[0] != uniqueVolumeName { + t.Fatalf("Mark wrong volume as fsResizeRequired: %s", resizeRequiredVolumes[0]) + } +} + +func volumeCapacity(size int) v1.ResourceList { + return v1.ResourceList{v1.ResourceStorage: resource.MustParse(fmt.Sprintf("%dGi", size))} +} + +func setExpandOnlinePersistentVolumesFeatureGate(value string, t *testing.T) { + err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%s", features.ExpandPersistentVolumesFSWithoutUnmounting, value)) + if err != nil { + t.Fatalf("Set ExpandPersistentVolumesFSWithoutUnmounting feature gate to %s failed: %v", value, err) + } +} + +func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) { + for _, volumeToMount := range dsw.GetVolumesToMount() { + err := asw.MarkVolumeAsAttached(volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "") + if err != nil { + t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err) + } + err = asw.MarkVolumeAsMounted(volumeToMount.PodName, volumeToMount.Pod.UID, + volumeToMount.VolumeName, nil, nil, volumeToMount.OuterVolumeSpecName, volumeToMount.VolumeGidValue, volumeToMount.VolumeSpec) + if err != nil { + t.Fatalf("Unexpected error when MarkVolumeAsMounted: %v", err) + } + } +} + +func clearASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) { + for _, volumeToMount := range dsw.GetVolumesToMount() { + err := asw.MarkVolumeAsUnmounted(volumeToMount.PodName, volumeToMount.VolumeName) + if err != nil { + t.Fatalf("Unexpected error when MarkVolumeAsUnmounted: %v", err) + } + } + for _, volumeToMount := range dsw.GetVolumesToMount() { + asw.MarkVolumeAsDetached(volumeToMount.VolumeName, "") + } +} + +func reprocess(dswp *desiredStateOfWorldPopulator, uniquePodName types.UniquePodName, + dsw cache.DesiredStateOfWorld, asw cache.ActualStateOfWorld) []v1.UniqueVolumeName { + dswp.ReprocessPod(uniquePodName) + dswp.findAndAddNewPods() + return getResizeRequiredVolumes(dsw, asw) +} + +func getResizeRequiredVolumes(dsw cache.DesiredStateOfWorld, asw cache.ActualStateOfWorld) []v1.UniqueVolumeName { + resizeRequiredVolumes := []v1.UniqueVolumeName{} + for _, volumeToMount := range dsw.GetVolumesToMount() { + _, _, err := asw.PodExistsInVolume(volumeToMount.PodName, volumeToMount.VolumeName) + if cache.IsFSResizeRequiredError(err) { + resizeRequiredVolumes = append(resizeRequiredVolumes, volumeToMount.VolumeName) + } + } + return resizeRequiredVolumes +} + func verifyVolumeExistsInVolumesToMount(t *testing.T, expectedVolumeName v1.UniqueVolumeName, expectReportedInUse bool, dsw cache.DesiredStateOfWorld) { volumesToMount := dsw.GetVolumesToMount() for _, volume := range volumesToMount { diff --git a/pkg/kubelet/volumemanager/reconciler/BUILD b/pkg/kubelet/volumemanager/reconciler/BUILD index b2a41d00065..5877ceced44 100644 --- a/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/pkg/kubelet/volumemanager/reconciler/BUILD @@ -38,6 +38,7 @@ go_test( srcs = ["reconciler_test.go"], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 5d6619d5b32..21a58394e4a 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -254,6 +254,22 @@ func (rc *reconciler) reconcile() { glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) } } + } else if cache.IsFSResizeRequiredError(err) && + utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", "")) + err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting( + volumeToMount.VolumeToMount, + rc.actualStateOfWorld) + if err != nil && + !nestedpendingoperations.IsAlreadyExists(err) && + !exponentialbackoff.IsExponentialBackoff(err) { + // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. + // Log all other errors. + glog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error()) + } + if err == nil { + glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", "")) + } } } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index bc615ddcaa2..c67ea7cc3fd 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -965,6 +966,120 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) { utilfeature.DefaultFeatureGate.Set("BlockVolume=false") } +// Populates desiredStateOfWorld cache with one volume/pod. +// Enables controllerAttachDetachEnabled. +// Calls Run() +// Wait for volume mounted. +// Mark volume as fsResizeRequired in ASW. +// Verifies volume's fsResizeRequired flag is cleared later. +func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) { + utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExpandPersistentVolumesFSWithoutUnmounting)) + pv := &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv", + UID: "pvuid", + }, + Spec: v1.PersistentVolumeSpec{ + ClaimRef: &v1.ObjectReference{Name: "pvc"}, + }, + } + pvc := &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc", + UID: "pvcuid", + }, + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "pv", + }, + } + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + UID: "pod1uid", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume-name", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }, + }, + }, + } + + volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t) + dsw := cache.NewDesiredStateOfWorld(volumePluginMgr) + asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr) + kubeClient := createtestClientWithPVPVC(pv, pvc) + fakeRecorder := &record.FakeRecorder{} + fakeHandler := volumetesting.NewBlockVolumePathHandler() + oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator( + kubeClient, + volumePluginMgr, + fakeRecorder, + false, /* checkNodeCapabilitiesBeforeMount */ + fakeHandler)) + + reconciler := NewReconciler( + kubeClient, + true, /* controllerAttachDetachEnabled */ + reconcilerLoopSleepDuration, + reconcilerSyncStatesSleepPeriod, + waitForAttachTimeout, + nodeName, + dsw, + asw, + hasAddedPods, + oex, + &mount.FakeMounter{}, + volumePluginMgr, + kubeletPodsDir) + + volumeSpec := &volume.Spec{PersistentVolume: pv} + podName := util.GetUniquePodName(pod) + volumeName, err := dsw.AddPodToVolume( + podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) + // Assert + if err != nil { + t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) + } + dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName}) + + // Start the reconciler to fill ASW. + stopChan, stoppedChan := make(chan struct{}), make(chan struct{}) + go func() { + reconciler.Run(stopChan) + close(stoppedChan) + }() + waitForMount(t, fakePlugin, volumeName, asw) + // Stop the reconciler. + close(stopChan) + <-stoppedChan + + // Mark volume as fsResizeRequired. + asw.MarkFSResizeRequired(volumeName, podName) + _, _, podExistErr := asw.PodExistsInVolume(podName, volumeName) + if !cache.IsFSResizeRequiredError(podExistErr) { + t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr) + } + + // Start the reconciler again, we hope reconciler will perform the + // resize operation and clear the fsResizeRequired flag for volume. + go reconciler.Run(wait.NeverStop) + + waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) { + mounted, _, err := asw.PodExistsInVolume(podName, volumeName) + return mounted && err == nil, nil + }) + if waitErr != nil { + t.Fatal("Volume resize should succeeded") + } +} + func waitForMount( t *testing.T, fakePlugin *volumetesting.FakeVolumePlugin, @@ -1044,3 +1159,30 @@ func createTestClient() *fake.Clientset { func runReconciler(reconciler Reconciler) { go reconciler.Run(wait.NeverStop) } + +func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) *fake.Clientset { + fakeClient := &fake.Clientset{} + fakeClient.AddReactor("get", "nodes", + func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)}, + Status: v1.NodeStatus{ + VolumesAttached: []v1.AttachedVolume{ + { + Name: "fake-plugin/pv", + DevicePath: "fake/path", + }, + }}, + }, nil + }) + fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) { + return true, pvc, nil + }) + fakeClient.AddReactor("get", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) { + return true, pv, nil + }) + fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("no reaction implemented for %s", action) + }) + return fakeClient +} diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index 1cfe6dddc1a..fb805812d6e 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -439,6 +439,15 @@ func (plugin *FakeVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]st return []string{}, nil } +// Expandable volume support +func (plugin *FakeVolumePlugin) ExpandVolumeDevice(spec *Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { + return resource.Quantity{}, nil +} + +func (plugin *FakeVolumePlugin) RequiresFSResize() bool { + return true +} + type FakeFileVolumePlugin struct { } diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index c065ace25e1..283a322259f 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -142,6 +142,8 @@ type OperationExecutor interface { IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName) bool // Expand Volume will grow size available to PVC ExpandVolume(*expandcache.PVCWithResizeRequest, expandcache.VolumeResizeMap) error + // ExpandVolumeFSWithoutUnmounting will resize volume's file system to expected size without unmounting the volume. + ExpandVolumeFSWithoutUnmounting(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error // ReconstructVolumeOperation construct a new volumeSpec and returns it created by plugin ReconstructVolumeOperation(volumeMode v1.PersistentVolumeMode, plugin volume.VolumePlugin, mapperPlugin volume.BlockVolumePlugin, uid types.UID, podName volumetypes.UniquePodName, volumeSpecName string, mountPath string, pluginName string) (*volume.Spec, error) // CheckVolumeExistenceOperation checks volume existence @@ -173,6 +175,9 @@ type ActualStateOfWorldMounterUpdater interface { // Marks the specified volume as having its global mount unmounted. MarkDeviceAsUnmounted(volumeName v1.UniqueVolumeName) error + + // Marks the specified volume's file system resize request is finished. + MarkVolumeAsResized(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error } // ActualStateOfWorldAttacherUpdater defines a set of operations updating the @@ -817,6 +822,14 @@ func (oe *operationExecutor) ExpandVolume(pvcWithResizeRequest *expandcache.PVCW return oe.pendingOperations.Run(uniqueVolumeKey, "", generatedOperations) } +func (oe *operationExecutor) ExpandVolumeFSWithoutUnmounting(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { + generatedOperations, err := oe.operationGenerator.GenerateExpandVolumeFSWithoutUnmountingFunc(volumeToMount, actualStateOfWorld) + if err != nil { + return err + } + return oe.pendingOperations.Run(volumeToMount.VolumeName, "", generatedOperations) +} + func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount VolumeToMount, nodeName types.NodeName, diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index 0551c4733fb..7d427e48f0f 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -438,6 +438,16 @@ func (fopg *fakeOperationGenerator) GenerateExpandVolumeFunc(pvcWithResizeReques }, nil } +func (fopg *fakeOperationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { + opFunc := func() (error, error) { + startOperationAndBlock(fopg.ch, fopg.quit) + return nil, nil + } + return volumetypes.GeneratedOperations{ + OperationFunc: opFunc, + }, nil +} + func (fopg *fakeOperationGenerator) GenerateBulkVolumeVerifyFunc( pluginNodeVolumes map[types.NodeName][]*volume.Spec, pluginNane string, diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index a9312fe7fcf..582d74b6859 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -121,6 +121,9 @@ type OperationGenerator interface { map[*volume.Spec]v1.UniqueVolumeName, ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) GenerateExpandVolumeFunc(*expandcache.PVCWithResizeRequest, expandcache.VolumeResizeMap) (volumetypes.GeneratedOperations, error) + + // Generates the volume file system resize function, which can resize volume's file system to expected size without unmounting the volume. + GenerateExpandVolumeFSWithoutUnmountingFunc(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) } func (og *operationGenerator) GenerateVolumesAreAttachedFunc( @@ -1306,6 +1309,62 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( }, nil } +func (og *operationGenerator) GenerateExpandVolumeFSWithoutUnmountingFunc( + volumeToMount VolumeToMount, + actualStateOfWorld ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) { + volumePlugin, err := + og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) + if err != nil || volumePlugin == nil { + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VolumeFSResize.FindPluginBySpec failed", err) + } + + attachableVolumePlugin, err := + og.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec) + if err != nil || attachableVolumePlugin == nil { + if attachableVolumePlugin == nil { + err = fmt.Errorf("AttachableVolumePlugin is nil") + } + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VolumeFSResize.FindAttachablePluginBySpec failed", err) + } + + volumeAttacher, err := attachableVolumePlugin.NewAttacher() + if err != nil || volumeAttacher == nil { + if volumeAttacher == nil { + err = fmt.Errorf("VolumeAttacher is nil") + } + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VolumeFSResize.NewAttacher failed", err) + } + + deviceMountPath, err := volumeAttacher.GetDeviceMountPath(volumeToMount.VolumeSpec) + if err != nil { + return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VolumeFSResize.GetDeviceMountPath failed", err) + } + + fsResizeFunc := func() (error, error) { + resizeSimpleError, resizeDetailedError := og.resizeFileSystem(volumeToMount, volumeToMount.DevicePath, deviceMountPath, volumePlugin.GetPluginName()) + if resizeSimpleError != nil || resizeDetailedError != nil { + return resizeSimpleError, resizeDetailedError + } + markFSResizedErr := actualStateOfWorld.MarkVolumeAsResized(volumeToMount.PodName, volumeToMount.VolumeName) + if markFSResizedErr != nil { + // On failure, return error. Caller will log and retry. + return volumeToMount.GenerateError("VolumeFSResize.MarkVolumeAsResized failed", markFSResizedErr) + } + return nil, nil + } + eventRecorderFunc := func(err *error) { + if *err != nil { + og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.VolumeResizeFailed, (*err).Error()) + } + } + + return volumetypes.GeneratedOperations{ + OperationFunc: fsResizeFunc, + EventRecorderFunc: eventRecorderFunc, + CompleteFunc: util.OperationCompleteHook(volumePlugin.GetPluginName(), "volume_fs_resize"), + }, nil +} + func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error { mountOptions := util.MountOptionFromSpec(volumeToMount.VolumeSpec) From cc87e73dd881e50ad04e39e2cea599a03c517935 Mon Sep 17 00:00:00 2001 From: Rohit Ramkumar Date: Thu, 15 Feb 2018 11:06:13 -0800 Subject: [PATCH 298/307] Add ipvs module loading logic to gce scripts --- cluster/gce/gci/configure-helper.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index ebf735955cf..07bbfdc613e 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1199,7 +1199,15 @@ function prepare-kube-proxy-manifest-variables { params+=" --feature-gates=${FEATURE_GATES}" fi if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then - params+=" --proxy-mode=ipvs --feature-gates=SupportIPVSProxyMode=true" + sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4 + if [[ $? -eq 0 ]]; + then + params+=" --proxy-mode=ipvs" + else + # If IPVS modules are not present, make sure the node does not come up as + # healthy. + exit 1 + fi fi params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s" if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then @@ -2586,4 +2594,4 @@ if [[ "$#" -eq 1 && "${1}" == "--source-only" ]]; then : else main "${@}" -fi \ No newline at end of file +fi From 6ed91fc07ca75591691d3aacc30dc8d56f6cbf62 Mon Sep 17 00:00:00 2001 From: liz Date: Tue, 29 May 2018 17:04:39 -0400 Subject: [PATCH 299/307] Save kubeadm manifest backup directories When kubeadm upgrades a static pod cluster, the old manifests were previously deleted. This patch alters this behaviour so they are now stored in a timestamped temporary directory. --- cmd/kubeadm/app/cmd/upgrade/BUILD | 1 + cmd/kubeadm/app/cmd/upgrade/apply.go | 10 +- cmd/kubeadm/app/cmd/upgrade/apply_test.go | 92 +++++++++++++++++++ cmd/kubeadm/app/constants/constants.go | 28 +++++- cmd/kubeadm/app/phases/upgrade/staticpods.go | 43 +++++++-- .../app/phases/upgrade/staticpods_test.go | 91 ++++++++++++++++++ cmd/kubeadm/app/util/etcd/BUILD | 6 +- cmd/kubeadm/app/util/etcd/etcd.go | 6 ++ cmd/kubeadm/app/util/etcd/etcd_test.go | 48 ++++++++++ 9 files changed, 309 insertions(+), 16 deletions(-) diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index 9a74d83aa07..4018c1c876b 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -53,6 +53,7 @@ go_test( embed = [":go_default_library"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/upgrade:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 1acb2f895b1..94f00b1771d 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun" + etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" "k8s.io/kubernetes/pkg/util/version" ) @@ -266,12 +267,19 @@ func PerformControlPlaneUpgrade(flags *applyFlags, client clientset.Interface, w return DryRunStaticPodUpgrade(internalcfg) } + // Don't save etcd backup directory if etcd is HA, as this could cause corruption return PerformStaticPodUpgrade(client, waiter, internalcfg, flags.etcdUpgrade) } +// GetPathManagerForUpgrade returns a path manager properly configured for the given MasterConfiguration. +func GetPathManagerForUpgrade(internalcfg *kubeadmapi.MasterConfiguration, etcdUpgrade bool) (upgrade.StaticPodPathManager, error) { + isHAEtcd := etcdutil.CheckConfigurationIsHA(&internalcfg.Etcd) + return upgrade.NewKubeStaticPodPathManagerUsingTempDirs(constants.GetStaticPodDirectory(), true, etcdUpgrade && !isHAEtcd) +} + // PerformStaticPodUpgrade performs the upgrade of the control plane components for a static pod hosted cluster func PerformStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter, internalcfg *kubeadmapi.MasterConfiguration, etcdUpgrade bool) error { - pathManager, err := upgrade.NewKubeStaticPodPathManagerUsingTempDirs(constants.GetStaticPodDirectory()) + pathManager, err := GetPathManagerForUpgrade(internalcfg, etcdUpgrade) if err != nil { return err } diff --git a/cmd/kubeadm/app/cmd/upgrade/apply_test.go b/cmd/kubeadm/app/cmd/upgrade/apply_test.go index 24d1778f8be..a7cd2394b5f 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply_test.go @@ -17,8 +17,13 @@ limitations under the License. package upgrade import ( + "io/ioutil" + "os" "reflect" "testing" + + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" + "k8s.io/kubernetes/cmd/kubeadm/app/constants" ) func TestSetImplicitFlags(t *testing.T) { @@ -145,3 +150,90 @@ func TestSetImplicitFlags(t *testing.T) { } } } + +func TestGetPathManagerForUpgrade(t *testing.T) { + + haEtcd := &kubeadmapi.MasterConfiguration{ + Etcd: kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"10.100.0.1:2379", "10.100.0.2:2379", "10.100.0.3:2379"}, + }, + }, + } + + noHAEtcd := &kubeadmapi.MasterConfiguration{} + + tests := []struct { + name string + cfg *kubeadmapi.MasterConfiguration + etcdUpgrade bool + shouldDeleteEtcd bool + }{ + { + name: "ha etcd but no etcd upgrade", + cfg: haEtcd, + etcdUpgrade: false, + shouldDeleteEtcd: true, + }, + { + name: "non-ha etcd with etcd upgrade", + cfg: noHAEtcd, + etcdUpgrade: true, + shouldDeleteEtcd: false, + }, + { + name: "ha etcd and etcd upgrade", + cfg: haEtcd, + etcdUpgrade: true, + shouldDeleteEtcd: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Use a temporary directory + tmpdir, err := ioutil.TempDir("", "TestGetPathManagerForUpgrade") + if err != nil { + t.Fatalf("unexpected error making temporary directory: %v", err) + } + oldK8sDir := constants.KubernetesDir + constants.KubernetesDir = tmpdir + defer func() { + constants.KubernetesDir = oldK8sDir + os.RemoveAll(tmpdir) + }() + + pathmgr, err := GetPathManagerForUpgrade(test.cfg, test.etcdUpgrade) + if err != nil { + t.Fatalf("unexpected error creating path manager: %v", err) + } + + if _, err := os.Stat(pathmgr.BackupManifestDir()); os.IsNotExist(err) { + t.Errorf("expected manifest dir %s to exist, but it did not (%v)", pathmgr.BackupManifestDir(), err) + } + + if _, err := os.Stat(pathmgr.BackupEtcdDir()); os.IsNotExist(err) { + t.Errorf("expected etcd dir %s to exist, but it did not (%v)", pathmgr.BackupEtcdDir(), err) + } + + if err := pathmgr.CleanupDirs(); err != nil { + t.Fatalf("unexpected error cleaning up directories: %v", err) + } + + if _, err := os.Stat(pathmgr.BackupManifestDir()); os.IsNotExist(err) { + t.Errorf("expected manifest dir %s to exist, but it did not (%v)", pathmgr.BackupManifestDir(), err) + } + + if test.shouldDeleteEtcd { + if _, err := os.Stat(pathmgr.BackupEtcdDir()); !os.IsNotExist(err) { + t.Errorf("expected etcd dir %s not to exist, but it did (%v)", pathmgr.BackupEtcdDir(), err) + } + } else { + if _, err := os.Stat(pathmgr.BackupEtcdDir()); os.IsNotExist(err) { + t.Errorf("expected etcd dir %s to exist, but it did not", pathmgr.BackupEtcdDir()) + } + } + }) + } + +} diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 3a6f4233f4f..544f3a3f7cd 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "net" "os" + "path" "path/filepath" "time" @@ -38,7 +39,8 @@ const ( // ManifestsSubDirName defines directory name to store manifests ManifestsSubDirName = "manifests" // TempDirForKubeadm defines temporary directory for kubeadm - TempDirForKubeadm = "/etc/kubernetes/tmp" + // should be joined with KubernetesDir. + TempDirForKubeadm = "tmp" // CACertAndKeyBaseName defines certificate authority base name CACertAndKeyBaseName = "ca" @@ -343,18 +345,36 @@ func AddSelfHostedPrefix(componentName string) string { // CreateTempDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp (not using /tmp as that would potentially be dangerous) func CreateTempDirForKubeadm(dirName string) (string, error) { + tempDir := path.Join(KubernetesDir, TempDirForKubeadm) // creates target folder if not already exists - if err := os.MkdirAll(TempDirForKubeadm, 0700); err != nil { - return "", fmt.Errorf("failed to create directory %q: %v", TempDirForKubeadm, err) + if err := os.MkdirAll(tempDir, 0700); err != nil { + return "", fmt.Errorf("failed to create directory %q: %v", tempDir, err) } - tempDir, err := ioutil.TempDir(TempDirForKubeadm, dirName) + tempDir, err := ioutil.TempDir(tempDir, dirName) if err != nil { return "", fmt.Errorf("couldn't create a temporary directory: %v", err) } return tempDir, nil } +// CreateTimestampDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp formatted with the current date +func CreateTimestampDirForKubeadm(dirName string) (string, error) { + tempDir := path.Join(KubernetesDir, TempDirForKubeadm) + // creates target folder if not already exists + if err := os.MkdirAll(tempDir, 0700); err != nil { + return "", fmt.Errorf("failed to create directory %q: %v", tempDir, err) + } + + timestampDirName := fmt.Sprintf("%s-%s", dirName, time.Now().Format("2006-01-02-15-04-05")) + timestampDir := path.Join(tempDir, timestampDirName) + if err := os.Mkdir(timestampDir, 0700); err != nil { + return "", fmt.Errorf("could not create timestamp directory: %v", err) + } + + return timestampDir, nil +} + // GetDNSIP returns a dnsIP, which is 10th IP in svcSubnet CIDR range func GetDNSIP(svcSubnet string) (net.IP, error) { // Get the service subnet CIDR diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 77056dcadda..33b1a0dae8a 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -51,6 +51,8 @@ type StaticPodPathManager interface { BackupManifestDir() string // BackupEtcdDir should point to the backup directory used for backuping manifests during the transition BackupEtcdDir() string + // CleanupDirs cleans up all temporary directories + CleanupDirs() error } // KubeStaticPodPathManager is a real implementation of StaticPodPathManager that is used when upgrading a static pod cluster @@ -59,34 +61,39 @@ type KubeStaticPodPathManager struct { tempManifestDir string backupManifestDir string backupEtcdDir string + + keepManifestDir bool + keepEtcdDir bool } // NewKubeStaticPodPathManager creates a new instance of KubeStaticPodPathManager -func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string) StaticPodPathManager { +func NewKubeStaticPodPathManager(realDir, tempDir, backupDir, backupEtcdDir string, keepManifestDir, keepEtcdDir bool) StaticPodPathManager { return &KubeStaticPodPathManager{ realManifestDir: realDir, tempManifestDir: tempDir, backupManifestDir: backupDir, backupEtcdDir: backupEtcdDir, + keepManifestDir: keepManifestDir, + keepEtcdDir: keepEtcdDir, } } // NewKubeStaticPodPathManagerUsingTempDirs creates a new instance of KubeStaticPodPathManager with temporary directories backing it -func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string) (StaticPodPathManager, error) { +func NewKubeStaticPodPathManagerUsingTempDirs(realManifestDir string, saveManifestsDir, saveEtcdDir bool) (StaticPodPathManager, error) { upgradedManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgraded-manifests") if err != nil { return nil, err } - backupManifestsDir, err := constants.CreateTempDirForKubeadm("kubeadm-backup-manifests") + backupManifestsDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-manifests") if err != nil { return nil, err } - backupEtcdDir, err := constants.CreateTempDirForKubeadm("kubeadm-backup-etcd") + backupEtcdDir, err := constants.CreateTimestampDirForKubeadm("kubeadm-backup-etcd") if err != nil { return nil, err } - return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir), nil + return NewKubeStaticPodPathManager(realManifestDir, upgradedManifestsDir, backupManifestsDir, backupEtcdDir, saveManifestsDir, saveEtcdDir), nil } // MoveFile should move a file from oldPath to newPath @@ -129,6 +136,26 @@ func (spm *KubeStaticPodPathManager) BackupEtcdDir() string { return spm.backupEtcdDir } +// CleanupDirs cleans up all temporary directories except those the user has requested to keep around +func (spm *KubeStaticPodPathManager) CleanupDirs() error { + if err := os.RemoveAll(spm.TempManifestDir()); err != nil { + return err + } + if !spm.keepManifestDir { + if err := os.RemoveAll(spm.BackupManifestDir()); err != nil { + return err + } + } + + if !spm.keepEtcdDir { + if err := os.RemoveAll(spm.BackupEtcdDir()); err != nil { + return err + } + } + + return nil +} + func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticPodPathManager, cfg *kubeadmapi.MasterConfiguration, beforePodHash string, recoverManifests map[string]string, isTLSUpgrade bool) error { // Special treatment is required for etcd case, when rollbackOldManifests should roll back etcd // manifests only for the case when component is Etcd @@ -449,11 +476,7 @@ func StaticPodControlPlane(waiter apiclient.Waiter, pathMgr StaticPodPathManager // Remove the temporary directories used on a best-effort (don't fail if the calls error out) // The calls are set here by design; we should _not_ use "defer" above as that would remove the directories // even in the "fail and rollback" case, where we want the directories preserved for the user. - os.RemoveAll(pathMgr.TempManifestDir()) - os.RemoveAll(pathMgr.BackupManifestDir()) - os.RemoveAll(pathMgr.BackupEtcdDir()) - - return nil + return pathMgr.CleanupDirs() } // rollbackOldManifests rolls back the backed-up manifests if something went wrong. diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 1ba9042b77b..f8b85d89c59 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -203,6 +203,19 @@ func (spm *fakeStaticPodPathManager) BackupEtcdDir() string { return spm.backupEtcdDir } +func (spm *fakeStaticPodPathManager) CleanupDirs() error { + if err := os.RemoveAll(spm.TempManifestDir()); err != nil { + return err + } + if err := os.RemoveAll(spm.BackupManifestDir()); err != nil { + return err + } + if err := os.RemoveAll(spm.BackupEtcdDir()); err != nil { + return err + } + return nil +} + type fakeTLSEtcdClient struct{ TLS bool } func (c fakeTLSEtcdClient) HasTLS() bool { @@ -513,3 +526,81 @@ func getConfig(version, certsDir, etcdDataDir string) (*kubeadmapi.MasterConfigu kubeadmscheme.Scheme.Convert(externalcfg, internalcfg, nil) return internalcfg, nil } + +func getTempDir(t *testing.T, name string) (string, func()) { + dir, err := ioutil.TempDir(os.TempDir(), name) + if err != nil { + t.Fatalf("couldn't make temporary directory: %v", err) + } + + return dir, func() { + os.RemoveAll(dir) + } +} + +func TestCleanupDirs(t *testing.T) { + tests := []struct { + name string + keepManifest, keepEtcd bool + }{ + { + name: "save manifest backup", + keepManifest: true, + }, + { + name: "save both etcd and manifest", + keepManifest: true, + keepEtcd: true, + }, + { + name: "save nothing", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + realManifestDir, cleanup := getTempDir(t, "realManifestDir") + defer cleanup() + + tempManifestDir, cleanup := getTempDir(t, "tempManifestDir") + defer cleanup() + + backupManifestDir, cleanup := getTempDir(t, "backupManifestDir") + defer cleanup() + + backupEtcdDir, cleanup := getTempDir(t, "backupEtcdDir") + defer cleanup() + + mgr := NewKubeStaticPodPathManager(realManifestDir, tempManifestDir, backupManifestDir, backupEtcdDir, test.keepManifest, test.keepEtcd) + err := mgr.CleanupDirs() + if err != nil { + t.Errorf("unexpected error cleaning up: %v", err) + } + + if _, err := os.Stat(tempManifestDir); !os.IsNotExist(err) { + t.Errorf("%q should not have existed", tempManifestDir) + } + _, err = os.Stat(backupManifestDir) + if test.keepManifest { + if err != nil { + t.Errorf("unexpected error getting backup manifest dir") + } + } else { + if !os.IsNotExist(err) { + t.Error("expected backup manifest to not exist") + } + } + + _, err = os.Stat(backupEtcdDir) + if test.keepEtcd { + if err != nil { + t.Errorf("unexpected error getting backup etcd dir") + } + } else { + if !os.IsNotExist(err) { + t.Error("expected backup etcd dir to not exist") + } + } + }) + } +} diff --git a/cmd/kubeadm/app/util/etcd/BUILD b/cmd/kubeadm/app/util/etcd/BUILD index 8a439c27352..852e914f87f 100644 --- a/cmd/kubeadm/app/util/etcd/BUILD +++ b/cmd/kubeadm/app/util/etcd/BUILD @@ -6,6 +6,7 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd", visibility = ["//visibility:public"], deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util/staticpod:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", @@ -17,7 +18,10 @@ go_test( name = "go_default_test", srcs = ["etcd_test.go"], embed = [":go_default_library"], - deps = ["//cmd/kubeadm/test:go_default_library"], + deps = [ + "//cmd/kubeadm/app/apis/kubeadm:go_default_library", + "//cmd/kubeadm/test:go_default_library", + ], ) filegroup( diff --git a/cmd/kubeadm/app/util/etcd/etcd.go b/cmd/kubeadm/app/util/etcd/etcd.go index a390d670640..3470da6b5c9 100644 --- a/cmd/kubeadm/app/util/etcd/etcd.go +++ b/cmd/kubeadm/app/util/etcd/etcd.go @@ -26,6 +26,7 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/transport" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod" ) @@ -216,3 +217,8 @@ func (c Client) WaitForClusterAvailable(delay time.Duration, retries int, retryI } return false, fmt.Errorf("timeout waiting for etcd cluster to be available") } + +// CheckConfigurationIsHA returns true if the given MasterConfiguration etcd block appears to be an HA configuration. +func CheckConfigurationIsHA(cfg *kubeadmapi.Etcd) bool { + return cfg.External != nil && len(cfg.External.Endpoints) > 1 +} diff --git a/cmd/kubeadm/app/util/etcd/etcd_test.go b/cmd/kubeadm/app/util/etcd/etcd_test.go index 1003f6cf262..3150183bc47 100644 --- a/cmd/kubeadm/app/util/etcd/etcd_test.go +++ b/cmd/kubeadm/app/util/etcd/etcd_test.go @@ -22,6 +22,7 @@ import ( "path/filepath" "testing" + kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" testutil "k8s.io/kubernetes/cmd/kubeadm/test" ) @@ -195,3 +196,50 @@ func TestPodManifestHasTLS(t *testing.T) { } } } + +func TestCheckConfigurationIsHA(t *testing.T) { + var tests = []struct { + name string + cfg *kubeadmapi.Etcd + expected bool + }{ + { + name: "HA etcd", + cfg: &kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"10.100.0.1:2379", "10.100.0.2:2379", "10.100.0.3:2379"}, + }, + }, + expected: true, + }, + { + name: "single External etcd", + cfg: &kubeadmapi.Etcd{ + External: &kubeadmapi.ExternalEtcd{ + Endpoints: []string{"10.100.0.1:2379"}, + }, + }, + expected: false, + }, + { + name: "local etcd", + cfg: &kubeadmapi.Etcd{ + Local: &kubeadmapi.LocalEtcd{}, + }, + expected: false, + }, + { + name: "empty etcd struct", + cfg: &kubeadmapi.Etcd{}, + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if isHA := CheckConfigurationIsHA(test.cfg); isHA != test.expected { + t.Errorf("expected isHA to be %v, got %v", test.expected, isHA) + } + }) + } +} From 179e5d7006ffd57d31c8f9828c9661cefe2c515e Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 31 May 2018 17:28:12 -0400 Subject: [PATCH 300/307] Rename online resizine feature gate --- pkg/features/kube_features.go | 48 +++++++++---------- .../cache/actual_state_of_world.go | 2 +- .../desired_state_of_world_populator.go | 4 +- .../desired_state_of_world_populator_test.go | 4 +- .../volumemanager/reconciler/reconciler.go | 2 +- .../reconciler/reconciler_test.go | 2 +- 6 files changed, 31 insertions(+), 31 deletions(-) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 7e0b0f8f94b..65250c499d4 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -102,7 +102,7 @@ const ( // owner: @mlmhl // alpha: v1.11 // Ability to expand persistent volumes' file system without unmounting volumes. - ExpandPersistentVolumesFSWithoutUnmounting utilfeature.Feature = "ExpandPersistentVolumesFSWithoutUnmounting" + ExpandInUsePersistentVolumes utilfeature.Feature = "ExpandInUsePersistentVolumes" // owner: @verb // alpha: v1.10 @@ -333,29 +333,29 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS MountPropagation: {Default: true, PreRelease: utilfeature.Beta}, QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta}, - ExpandPersistentVolumesFSWithoutUnmounting: {Default: false, PreRelease: utilfeature.Alpha}, - CPUManager: {Default: true, PreRelease: utilfeature.Beta}, - ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, - MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, - CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, - CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, - BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, - StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, - ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, - SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, - SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, - HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, - ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, - TokenRequest: {Default: false, PreRelease: utilfeature.Alpha}, - TokenRequestProjection: {Default: false, PreRelease: utilfeature.Alpha}, - CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, - GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, - RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, - BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, - DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha}, - VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, + ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + CPUManager: {Default: true, PreRelease: utilfeature.Beta}, + ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: true, PreRelease: utilfeature.Beta}, + CSIPersistentVolume: {Default: true, PreRelease: utilfeature.Beta}, + CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, + BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, + SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, + HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, + ScheduleDaemonSetPods: {Default: false, PreRelease: utilfeature.Alpha}, + TokenRequest: {Default: false, PreRelease: utilfeature.Alpha}, + TokenRequestProjection: {Default: false, PreRelease: utilfeature.Alpha}, + CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, + GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.Beta}, + RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, + BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + DynamicProvisioningScheduling: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 5b375e780c7..82aef86c720 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -630,7 +630,7 @@ func (asw *actualStateOfWorld) PodExistsInVolume( return true, volumeObj.devicePath, newRemountRequiredError(volumeObj.volumeName, podObj.podName) } if podObj.fsResizeRequired && - utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { return true, volumeObj.devicePath, newFsResizeRequiredError(volumeObj.volumeName, podObj.podName) } } diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index f4894d8e92d..e65fdc1b2f8 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -185,7 +185,7 @@ func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool { func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() { // Map unique pod name to outer volume name to MountedVolume. mountedVolumesForPod := make(map[volumetypes.UniquePodName]map[string]cache.MountedVolume) - if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { for _, mountedVolume := range dswp.actualStateOfWorld.GetMountedVolumes() { mountedVolumes, exist := mountedVolumesForPod[mountedVolume.PodName] if !exist { @@ -323,7 +323,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( volumeSpec.Name(), uniquePodName) - if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { dswp.checkVolumeFSResize(pod, podVolume, pvc, volumeSpec, uniquePodName, mountedVolumesForPod, processedVolumesForFSResize) } diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go index 5acd7df04d9..969ac66911f 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go @@ -562,9 +562,9 @@ func volumeCapacity(size int) v1.ResourceList { } func setExpandOnlinePersistentVolumesFeatureGate(value string, t *testing.T) { - err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%s", features.ExpandPersistentVolumesFSWithoutUnmounting, value)) + err := utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=%s", features.ExpandInUsePersistentVolumes, value)) if err != nil { - t.Fatalf("Set ExpandPersistentVolumesFSWithoutUnmounting feature gate to %s failed: %v", value, err) + t.Fatalf("Set ExpandInUsePersistentVolumes feature gate to %s failed: %v", value, err) } } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 21a58394e4a..2ffb5c99fa7 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -255,7 +255,7 @@ func (rc *reconciler) reconcile() { } } } else if cache.IsFSResizeRequiredError(err) && - utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumesFSWithoutUnmounting) { + utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", "")) err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting( volumeToMount.VolumeToMount, diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index c67ea7cc3fd..31a4875ecdd 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -973,7 +973,7 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) { // Mark volume as fsResizeRequired in ASW. // Verifies volume's fsResizeRequired flag is cleared later. func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) { - utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExpandPersistentVolumesFSWithoutUnmounting)) + utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.ExpandInUsePersistentVolumes)) pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "pv", From c844b9afc4d1531f2c80296a7d4e2a01a8c14691 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Thu, 31 May 2018 15:25:58 -0700 Subject: [PATCH 301/307] disable memcg for testing prior to 1.11 release --- cluster/gce/config-test.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index cfefaf937f6..c23586e1bcb 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -191,12 +191,6 @@ TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}" KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}" -if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then - NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true" -fi -if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then - MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true" -fi APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --vmodule=httplog=3 --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}" CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}" SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}" From 174b6d0e2fc99d9964a7d5a7484aa0b7d50b4be1 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Thu, 17 May 2018 18:10:12 -0700 Subject: [PATCH 302/307] Proxy container streaming in kubelet. --- cmd/kubelet/app/options/container_runtime.go | 15 +-- cmd/kubelet/app/server.go | 23 +--- pkg/kubelet/config/flags.go | 10 ++ pkg/kubelet/dockershim/docker_service.go | 33 ++++-- .../dockershim/remote/docker_server.go | 2 +- pkg/kubelet/kubelet.go | 71 ++++++------ pkg/kubelet/kubelet_pods.go | 23 ---- pkg/kubelet/server/server.go | 108 +++++++----------- pkg/kubelet/server/server_test.go | 24 +--- pkg/kubelet/server/streaming/server.go | 12 +- 10 files changed, 138 insertions(+), 183 deletions(-) diff --git a/cmd/kubelet/app/options/container_runtime.go b/cmd/kubelet/app/options/container_runtime.go index 2f04036bffb..d07cfb699f4 100644 --- a/cmd/kubelet/app/options/container_runtime.go +++ b/cmd/kubelet/app/options/container_runtime.go @@ -45,12 +45,13 @@ func NewContainerRuntimeOptions() *config.ContainerRuntimeOptions { } return &config.ContainerRuntimeOptions{ - ContainerRuntime: kubetypes.DockerContainerRuntime, - DockerEndpoint: dockerEndpoint, - DockershimRootDirectory: "/var/lib/dockershim", - DockerDisableSharedPID: true, - PodSandboxImage: defaultPodSandboxImage, - ImagePullProgressDeadline: metav1.Duration{Duration: 1 * time.Minute}, - ExperimentalDockershim: false, + ContainerRuntime: kubetypes.DockerContainerRuntime, + RedirectContainerStreaming: false, + DockerEndpoint: dockerEndpoint, + DockershimRootDirectory: "/var/lib/dockershim", + DockerDisableSharedPID: true, + PodSandboxImage: defaultPodSandboxImage, + ImagePullProgressDeadline: metav1.Duration{Duration: 1 * time.Minute}, + ExperimentalDockershim: false, } } diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index ca13dc51d30..2d354375ba2 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -1170,30 +1170,13 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols, } + // Standalone dockershim will always start the local streaming server. ds, err := dockershim.NewDockerService(dockerClientConfig, r.PodSandboxImage, streamingConfig, &pluginSettings, - f.RuntimeCgroups, c.CgroupDriver, r.DockershimRootDirectory, r.DockerDisableSharedPID) + f.RuntimeCgroups, c.CgroupDriver, r.DockershimRootDirectory, r.DockerDisableSharedPID, true /*startLocalStreamingServer*/) if err != nil { return err } glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(f.RemoteRuntimeEndpoint, ds) - if err := server.Start(stopCh); err != nil { - return err - } - - streamingServer := &http.Server{ - Addr: net.JoinHostPort(c.Address, strconv.Itoa(int(c.Port))), - Handler: ds, - } - - go func() { - <-stopCh - streamingServer.Shutdown(context.Background()) - }() - - // Start the streaming server - if err := streamingServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - return err - } - return nil + return server.Start(stopCh) } diff --git a/pkg/kubelet/config/flags.go b/pkg/kubelet/config/flags.go index c51b4de8444..377d64e4574 100644 --- a/pkg/kubelet/config/flags.go +++ b/pkg/kubelet/config/flags.go @@ -31,6 +31,15 @@ type ContainerRuntimeOptions struct { ContainerRuntime string // RuntimeCgroups that container runtime is expected to be isolated in. RuntimeCgroups string + // RedirectContainerStreaming enables container streaming redirect. + // When RedirectContainerStreaming is false, kubelet will proxy container streaming data + // between apiserver and container runtime. This approach is more secure, but the proxy + // introduces some overhead. + // When RedirectContainerStreaming is true, kubelet will return an http redirect to apiserver, + // and apiserver will access container runtime directly. This approach is more performant, + // but less secure because the connection between apiserver and container runtime is not + // authenticated. + RedirectContainerStreaming bool // Docker-specific options. @@ -77,6 +86,7 @@ func (s *ContainerRuntimeOptions) AddFlags(fs *pflag.FlagSet) { // General settings. fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'remote', 'rkt (deprecated)'.") fs.StringVar(&s.RuntimeCgroups, "runtime-cgroups", s.RuntimeCgroups, "Optional absolute name of cgroups to create and run the runtime in.") + fs.BoolVar(&s.RedirectContainerStreaming, "redirect-container-streaming", s.RedirectContainerStreaming, "Enables container streaming redirect. If false, kubelet will proxy container streaming data between apiserver and container runtime; if true, kubelet will return an http redirect to apiserver, and apiserver will access container runtime directly. The proxy approach is more secure, but introduces some overhead. The redirect approach is more performant, but less secure because the connection between apiserver and container runtime is not authenticated.") // Docker-specific settings. fs.BoolVar(&s.ExperimentalDockershim, "experimental-dockershim", s.ExperimentalDockershim, "Enable dockershim only mode. In this mode, kubelet will only start dockershim without any other functionalities. This flag only serves test purpose, please do not use it unless you are conscious of what you are doing. [default=false]") diff --git a/pkg/kubelet/dockershim/docker_service.go b/pkg/kubelet/dockershim/docker_service.go index 5399094cc3c..2a7cce57f0d 100644 --- a/pkg/kubelet/dockershim/docker_service.go +++ b/pkg/kubelet/dockershim/docker_service.go @@ -85,7 +85,7 @@ const ( type CRIService interface { runtimeapi.RuntimeServiceServer runtimeapi.ImageServiceServer - Start() error + Start(<-chan struct{}) error } // DockerService is an interface that embeds the new RuntimeService and @@ -188,7 +188,8 @@ func NewDockerClientFromConfig(config *ClientConfig) libdocker.Interface { // NOTE: Anything passed to DockerService should be eventually handled in another way when we switch to running the shim as a different process. func NewDockerService(config *ClientConfig, podSandboxImage string, streamingConfig *streaming.Config, - pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string, dockershimRootDir string, disableSharedPID bool) (DockerService, error) { + pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string, dockershimRootDir string, + disableSharedPID, startLocalStreamingServer bool) (DockerService, error) { client := NewDockerClientFromConfig(config) @@ -207,10 +208,11 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon client: client, execHandler: &NativeExecHandler{}, }, - containerManager: cm.NewContainerManager(cgroupsName, client), - checkpointManager: checkpointManager, - disableSharedPID: disableSharedPID, - networkReady: make(map[string]bool), + containerManager: cm.NewContainerManager(cgroupsName, client), + checkpointManager: checkpointManager, + disableSharedPID: disableSharedPID, + startLocalStreamingServer: startLocalStreamingServer, + networkReady: make(map[string]bool), } // check docker version compatibility. @@ -307,6 +309,9 @@ type dockerService struct { // See proposals/pod-pid-namespace.md for details. // TODO: Remove once the escape hatch is no longer used (https://issues.k8s.io/41938) disableSharedPID bool + // startLocalStreamingServer indicates whether dockershim should start a + // streaming server on localhost. + startLocalStreamingServer bool } // TODO: handle context. @@ -395,13 +400,25 @@ func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.Po } // Start initializes and starts components in dockerService. -func (ds *dockerService) Start() error { +func (ds *dockerService) Start(stopCh <-chan struct{}) error { // Initialize the legacy cleanup flag. + if ds.startLocalStreamingServer { + go func() { + <-stopCh + if err := ds.streamingServer.Stop(); err != nil { + glog.Errorf("Failed to stop streaming server: %v", err) + } + }() + go func() { + if err := ds.streamingServer.Start(true); err != nil && err != http.ErrServerClosed { + glog.Fatalf("Failed to start streaming server: %v", err) + } + }() + } return ds.containerManager.Start() } // Status returns the status of the runtime. -// TODO(random-liu): Set network condition accordingly here. func (ds *dockerService) Status(_ context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) { runtimeReady := &runtimeapi.RuntimeCondition{ Type: runtimeapi.RuntimeReady, diff --git a/pkg/kubelet/dockershim/remote/docker_server.go b/pkg/kubelet/dockershim/remote/docker_server.go index 1ac7560d41b..5e8967a8d5f 100644 --- a/pkg/kubelet/dockershim/remote/docker_server.go +++ b/pkg/kubelet/dockershim/remote/docker_server.go @@ -51,7 +51,7 @@ func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer { // Start starts the dockershim grpc server. func (s *DockerServer) Start(stopCh <-chan struct{}) error { // Start the internal service. - if err := s.service.Start(); err != nil { + if err := s.service.Start(stopCh); err != nil { glog.Errorf("Unable to start docker service") return err } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 7b31fdeffbb..2d84aac0ae3 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -512,21 +512,22 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeRef: nodeRef, nodeLabels: nodeLabels, nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration, - os: kubeDeps.OSInterface, - oomWatcher: oomWatcher, - cgroupsPerQOS: kubeCfg.CgroupsPerQOS, - cgroupRoot: kubeCfg.CgroupRoot, - mounter: kubeDeps.Mounter, - writer: kubeDeps.Writer, - maxPods: int(kubeCfg.MaxPods), - podsPerCore: int(kubeCfg.PodsPerCore), - syncLoopMonitor: atomic.Value{}, - daemonEndpoints: daemonEndpoints, - containerManager: kubeDeps.ContainerManager, - containerRuntimeName: containerRuntime, - nodeIP: parsedNodeIP, - nodeIPValidator: validateNodeIP, - clock: clock.RealClock{}, + os: kubeDeps.OSInterface, + oomWatcher: oomWatcher, + cgroupsPerQOS: kubeCfg.CgroupsPerQOS, + cgroupRoot: kubeCfg.CgroupRoot, + mounter: kubeDeps.Mounter, + writer: kubeDeps.Writer, + maxPods: int(kubeCfg.MaxPods), + podsPerCore: int(kubeCfg.PodsPerCore), + syncLoopMonitor: atomic.Value{}, + daemonEndpoints: daemonEndpoints, + containerManager: kubeDeps.ContainerManager, + containerRuntimeName: containerRuntime, + redirectContainerStreaming: crOptions.RedirectContainerStreaming, + nodeIP: parsedNodeIP, + nodeIPValidator: validateNodeIP, + clock: clock.RealClock{}, enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach, iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4), makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains, @@ -605,16 +606,16 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, switch containerRuntime { case kubetypes.DockerContainerRuntime: // Create and start the CRI shim running as a grpc server. - streamingConfig := getStreamingConfig(kubeCfg, kubeDeps) + streamingConfig := getStreamingConfig(kubeCfg, kubeDeps, crOptions) ds, err := dockershim.NewDockerService(kubeDeps.DockerClientConfig, crOptions.PodSandboxImage, streamingConfig, &pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory, - crOptions.DockerDisableSharedPID) + crOptions.DockerDisableSharedPID, !crOptions.RedirectContainerStreaming) if err != nil { return nil, err } - // For now, the CRI shim redirects the streaming requests to the - // kubelet, which handles the requests using DockerService.. - klet.criHandler = ds + if crOptions.RedirectContainerStreaming { + klet.criHandler = ds + } // The unix socket for kubelet <-> dockershim communication. glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", @@ -1000,6 +1001,9 @@ type Kubelet struct { // The name of the container runtime containerRuntimeName string + // redirectContainerStreaming enables container streaming redirect. + redirectContainerStreaming bool + // Container runtime. containerRuntime kubecontainer.Runtime @@ -2097,11 +2101,6 @@ func (kl *Kubelet) BirthCry() { kl.recorder.Eventf(kl.nodeRef, v1.EventTypeNormal, events.StartingKubelet, "Starting kubelet.") } -// StreamingConnectionIdleTimeout returns the timeout for streaming connections to the HTTP server. -func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration { - return kl.streamingConnectionIdleTimeout -} - // ResyncInterval returns the interval used for periodic syncs. func (kl *Kubelet) ResyncInterval() time.Duration { return kl.resyncInterval @@ -2109,12 +2108,12 @@ func (kl *Kubelet) ResyncInterval() time.Duration { // ListenAndServe runs the kubelet HTTP server. func (kl *Kubelet) ListenAndServe(address net.IP, port uint, tlsOptions *server.TLSOptions, auth server.AuthInterface, enableDebuggingHandlers, enableContentionProfiling bool) { - server.ListenAndServeKubeletServer(kl, kl.resourceAnalyzer, address, port, tlsOptions, auth, enableDebuggingHandlers, enableContentionProfiling, kl.containerRuntime, kl.criHandler) + server.ListenAndServeKubeletServer(kl, kl.resourceAnalyzer, address, port, tlsOptions, auth, enableDebuggingHandlers, enableContentionProfiling, kl.redirectContainerStreaming, kl.criHandler) } // ListenAndServeReadOnly runs the kubelet HTTP server in read-only mode. func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) { - server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port, kl.containerRuntime) + server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port) } // Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around. @@ -2138,19 +2137,23 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool { } // Gets the streaming server configuration to use with in-process CRI shims. -func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies) *streaming.Config { +func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, crOptions *config.ContainerRuntimeOptions) *streaming.Config { config := &streaming.Config{ - // Use a relative redirect (no scheme or host). - BaseURL: &url.URL{ - Path: "/cri/", - }, StreamIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration, StreamCreationTimeout: streaming.DefaultConfig.StreamCreationTimeout, SupportedRemoteCommandProtocols: streaming.DefaultConfig.SupportedRemoteCommandProtocols, SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols, } - if kubeDeps.TLSOptions != nil { - config.TLSConfig = kubeDeps.TLSOptions.Config + if !crOptions.RedirectContainerStreaming { + config.Addr = net.JoinHostPort("localhost", "0") + } else { + // Use a relative redirect (no scheme or host). + config.BaseURL = &url.URL{ + Path: "/cri/", + } + if kubeDeps.TLSOptions != nil { + config.TLSConfig = kubeDeps.TLSOptions.Config + } } return config } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 99694d0b5fc..38cc6d5559e 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -30,7 +30,6 @@ import ( "sort" "strings" "sync" - "time" "github.com/golang/glog" "k8s.io/api/core/v1" @@ -41,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/tools/remotecommand" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" podshelper "k8s.io/kubernetes/pkg/apis/core/pods" @@ -1589,27 +1587,6 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe return kl.runner.RunInContainer(container.ID, cmd, 0) } -// ExecInContainer executes a command in a container, connecting the supplied -// stdin/stdout/stderr to the command's IO streams. -func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - // TODO(random-liu): Remove this. - return fmt.Errorf("unimplemented") -} - -// AttachContainer uses the container runtime to attach the given streams to -// the given container. -func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { - // TODO(random-liu): Remove this. - return fmt.Errorf("unimplemented") -} - -// PortForward connects to the pod's port and copies data between the port -// and the stream. -func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { - // TODO(random-liu): Remove this. - return fmt.Errorf("unimplemented") -} - // GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it. func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { container, err := kl.findContainer(podFullName, podUID, containerName) diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index fdde1fee4e8..2e5bbde211f 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -42,14 +42,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/apimachinery/pkg/util/proxy" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/flushwriter" - "k8s.io/client-go/tools/remotecommand" "k8s.io/kubernetes/pkg/api/legacyscheme" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/v1/validation" @@ -74,11 +73,11 @@ const ( // Server is a http.Handler which exposes kubelet functionality over HTTP. type Server struct { - auth AuthInterface - host HostInterface - restfulCont containerInterface - resourceAnalyzer stats.ResourceAnalyzer - runtime kubecontainer.Runtime + auth AuthInterface + host HostInterface + restfulCont containerInterface + resourceAnalyzer stats.ResourceAnalyzer + redirectContainerStreaming bool } type TLSOptions struct { @@ -124,11 +123,11 @@ func ListenAndServeKubeletServer( tlsOptions *TLSOptions, auth AuthInterface, enableDebuggingHandlers, - enableContentionProfiling bool, - runtime kubecontainer.Runtime, + enableContentionProfiling, + redirectContainerStreaming bool, criHandler http.Handler) { glog.Infof("Starting to listen on %s:%d", address, port) - handler := NewServer(host, resourceAnalyzer, auth, enableDebuggingHandlers, enableContentionProfiling, runtime, criHandler) + handler := NewServer(host, resourceAnalyzer, auth, enableDebuggingHandlers, enableContentionProfiling, redirectContainerStreaming, criHandler) s := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), Handler: &handler, @@ -146,9 +145,9 @@ func ListenAndServeKubeletServer( } // ListenAndServeKubeletReadOnlyServer initializes a server to respond to HTTP network requests on the Kubelet. -func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer stats.ResourceAnalyzer, address net.IP, port uint, runtime kubecontainer.Runtime) { +func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer stats.ResourceAnalyzer, address net.IP, port uint) { glog.V(1).Infof("Starting to listen read-only on %s:%d", address, port) - s := NewServer(host, resourceAnalyzer, nil, false, false, runtime, nil) + s := NewServer(host, resourceAnalyzer, nil, false, false, false, nil) server := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), @@ -173,12 +172,8 @@ type HostInterface interface { GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) GetRunningPods() ([]*v1.Pod, error) RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) - ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error - AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error ServeLogs(w http.ResponseWriter, req *http.Request) - PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error - StreamingConnectionIdleTimeout() time.Duration ResyncInterval() time.Duration GetHostname() string LatestLoopEntryTime() time.Time @@ -193,15 +188,15 @@ func NewServer( resourceAnalyzer stats.ResourceAnalyzer, auth AuthInterface, enableDebuggingHandlers, - enableContentionProfiling bool, - runtime kubecontainer.Runtime, + enableContentionProfiling, + redirectContainerStreaming bool, criHandler http.Handler) Server { server := Server{ - host: host, - resourceAnalyzer: resourceAnalyzer, - auth: auth, - restfulCont: &filteringContainer{Container: restful.NewContainer()}, - runtime: runtime, + host: host, + resourceAnalyzer: resourceAnalyzer, + auth: auth, + restfulCont: &filteringContainer{Container: restful.NewContainer()}, + redirectContainerStreaming: redirectContainerStreaming, } if auth != nil { server.InstallAuthFilter() @@ -627,6 +622,15 @@ func getPortForwardRequestParams(req *restful.Request) portForwardRequestParams } } +type responder struct { + errorMessage string +} + +func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) { + glog.Errorf("Error while proxying request: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) +} + // getAttach handles requests to attach to a container. func (s *Server) getAttach(request *restful.Request, response *restful.Response) { params := getExecRequestParams(request) @@ -643,26 +647,18 @@ func (s *Server) getAttach(request *restful.Request, response *restful.Response) } podFullName := kubecontainer.GetPodFullName(pod) - redirect, err := s.host.GetAttach(podFullName, params.podUID, params.containerName, *streamOpts) + url, err := s.host.GetAttach(podFullName, params.podUID, params.containerName, *streamOpts) if err != nil { streaming.WriteError(err, response.ResponseWriter) return } - if redirect != nil { - http.Redirect(response.ResponseWriter, request.Request, redirect.String(), http.StatusFound) + + if s.redirectContainerStreaming { + http.Redirect(response.ResponseWriter, request.Request, url.String(), http.StatusFound) return } - - remotecommandserver.ServeAttach(response.ResponseWriter, - request.Request, - s.host, - podFullName, - params.podUID, - params.containerName, - streamOpts, - s.host.StreamingConnectionIdleTimeout(), - remotecommandconsts.DefaultStreamCreationTimeout, - remotecommandconsts.SupportedStreamingProtocols) + handler := proxy.NewUpgradeAwareHandler(url, nil /*transport*/, false /*wrapTransport*/, false /*upgradeRequired*/, &responder{}) + handler.ServeHTTP(response.ResponseWriter, request.Request) } // getExec handles requests to run a command inside a container. @@ -681,27 +677,17 @@ func (s *Server) getExec(request *restful.Request, response *restful.Response) { } podFullName := kubecontainer.GetPodFullName(pod) - redirect, err := s.host.GetExec(podFullName, params.podUID, params.containerName, params.cmd, *streamOpts) + url, err := s.host.GetExec(podFullName, params.podUID, params.containerName, params.cmd, *streamOpts) if err != nil { streaming.WriteError(err, response.ResponseWriter) return } - if redirect != nil { - http.Redirect(response.ResponseWriter, request.Request, redirect.String(), http.StatusFound) + if s.redirectContainerStreaming { + http.Redirect(response.ResponseWriter, request.Request, url.String(), http.StatusFound) return } - - remotecommandserver.ServeExec(response.ResponseWriter, - request.Request, - s.host, - podFullName, - params.podUID, - params.containerName, - params.cmd, - streamOpts, - s.host.StreamingConnectionIdleTimeout(), - remotecommandconsts.DefaultStreamCreationTimeout, - remotecommandconsts.SupportedStreamingProtocols) + handler := proxy.NewUpgradeAwareHandler(url, nil /*transport*/, false /*wrapTransport*/, false /*upgradeRequired*/, &responder{}) + handler.ServeHTTP(response.ResponseWriter, request.Request) } // getRun handles requests to run a command inside a container. @@ -758,25 +744,17 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp return } - redirect, err := s.host.GetPortForward(pod.Name, pod.Namespace, pod.UID, *portForwardOptions) + url, err := s.host.GetPortForward(pod.Name, pod.Namespace, pod.UID, *portForwardOptions) if err != nil { streaming.WriteError(err, response.ResponseWriter) return } - if redirect != nil { - http.Redirect(response.ResponseWriter, request.Request, redirect.String(), http.StatusFound) + if s.redirectContainerStreaming { + http.Redirect(response.ResponseWriter, request.Request, url.String(), http.StatusFound) return } - - portforward.ServePortForward(response.ResponseWriter, - request.Request, - s.host, - kubecontainer.GetPodFullName(pod), - params.podUID, - portForwardOptions, - s.host.StreamingConnectionIdleTimeout(), - remotecommandconsts.DefaultStreamCreationTimeout, - portforward.SupportedProtocols) + handler := proxy.NewUpgradeAwareHandler(url, nil /*transport*/, false /*wrapTransport*/, false /*upgradeRequired*/, &responder{}) + handler.ServeHTTP(response.ResponseWriter, request.Request) } // ServeHTTP responds to HTTP requests on the Kubelet. diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index cdb978078cf..25776273a1b 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -46,7 +46,6 @@ import ( "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/tools/remotecommand" - utiltesting "k8s.io/client-go/util/testing" api "k8s.io/kubernetes/pkg/apis/core" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" // Do some initialization to decode the query parameters correctly. @@ -203,7 +202,6 @@ type serverTestFramework struct { fakeKubelet *fakeKubelet fakeAuth *fakeAuth testHTTPServer *httptest.Server - criHandler *utiltesting.FakeHandler } func newServerTest() *serverTestFramework { @@ -238,17 +236,13 @@ func newServerTestWithDebug(enableDebugging bool) *serverTestFramework { return authorizer.DecisionAllow, "", nil }, } - fw.criHandler = &utiltesting.FakeHandler{ - StatusCode: http.StatusOK, - } server := NewServer( fw.fakeKubelet, stats.NewResourceAnalyzer(fw.fakeKubelet, time.Minute), fw.fakeAuth, enableDebugging, false, - &kubecontainertesting.Mock{}, - fw.criHandler) + &kubecontainertesting.Mock{}) fw.serverUnderTest = &server fw.testHTTPServer = httptest.NewServer(fw.serverUnderTest) return fw @@ -1599,22 +1593,6 @@ func TestServePortForward(t *testing.T) { } } -func TestCRIHandler(t *testing.T) { - fw := newServerTest() - defer fw.testHTTPServer.Close() - - const ( - path = "/cri/exec/123456abcdef" - query = "cmd=echo+foo" - ) - resp, err := http.Get(fw.testHTTPServer.URL + path + "?" + query) - require.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Equal(t, "GET", fw.criHandler.RequestReceived.Method) - assert.Equal(t, path, fw.criHandler.RequestReceived.URL.Path) - assert.Equal(t, query, fw.criHandler.RequestReceived.URL.RawQuery) -} - func TestDebuggingDisabledHandlers(t *testing.T) { fw := newServerTestWithDebug(false) defer fw.testHTTPServer.Close() diff --git a/pkg/kubelet/server/streaming/server.go b/pkg/kubelet/server/streaming/server.go index ae1c046b025..7cbc424c41e 100644 --- a/pkg/kubelet/server/streaming/server.go +++ b/pkg/kubelet/server/streaming/server.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "errors" "io" + "net" "net/http" "net/url" "path" @@ -71,6 +72,7 @@ type Config struct { Addr string // The optional base URL for constructing streaming URLs. If empty, the baseURL will be // constructed from the serve address. + // Note that for port "0", the URL port will be set to actual port in use. BaseURL *url.URL // How long to leave idle connections open for. @@ -233,10 +235,16 @@ func (s *server) Start(stayUp bool) error { return errors.New("stayUp=false is not yet implemented") } + listener, err := net.Listen("tcp", s.config.Addr) + if err != nil { + return err + } + // Use the actual address as baseURL host. This handles the "0" port case. + s.config.BaseURL.Host = listener.Addr().String() if s.config.TLSConfig != nil { - return s.server.ListenAndServeTLS("", "") // Use certs from TLSConfig. + return s.server.ServeTLS(listener, "", "") // Use certs from TLSConfig. } else { - return s.server.ListenAndServe() + return s.server.Serve(listener) } } From 1eb721248b8e70112cd2b118b435570eef1f1172 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Fri, 18 May 2018 16:08:44 -0700 Subject: [PATCH 303/307] Update unit test. --- pkg/kubelet/kubelet_pods_test.go | 18 +- pkg/kubelet/server/server_test.go | 899 ++++++++++---------- pkg/kubelet/server/server_websocket_test.go | 300 +++---- 3 files changed, 571 insertions(+), 646 deletions(-) diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 146ef3056ef..7fd9d984701 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -17,7 +17,6 @@ limitations under the License. package kubelet import ( - "bytes" "errors" "fmt" "io/ioutil" @@ -2095,7 +2094,7 @@ func (f *fakeReadWriteCloser) Close() error { return nil } -func TestExec(t *testing.T) { +func TestGetExec(t *testing.T) { const ( podName = "podFoo" podNamespace = "nsFoo" @@ -2106,9 +2105,6 @@ func TestExec(t *testing.T) { var ( podFullName = kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, podName, podNamespace)) command = []string{"ls"} - stdin = &bytes.Buffer{} - stdout = &fakeReadWriteCloser{} - stderr = &fakeReadWriteCloser{} ) testcases := []struct { @@ -2161,22 +2157,16 @@ func TestExec(t *testing.T) { assert.NoError(t, err, description) assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect") } - - err = kubelet.ExecInContainer(tc.podFullName, podUID, tc.container, command, stdin, stdout, stderr, tty, nil, 0) - assert.Error(t, err, description) } } -func TestPortForward(t *testing.T) { +func TestGetPortForward(t *testing.T) { const ( podName = "podFoo" podNamespace = "nsFoo" podUID types.UID = "12345678" port int32 = 5000 ) - var ( - stream = &fakeReadWriteCloser{} - ) testcases := []struct { description string @@ -2208,7 +2198,6 @@ func TestPortForward(t *testing.T) { }}, } - podFullName := kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, tc.podName, podNamespace)) description := "streaming - " + tc.description fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime} kubelet.containerRuntime = fakeRuntime @@ -2221,9 +2210,6 @@ func TestPortForward(t *testing.T) { assert.NoError(t, err, description) assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect") } - - err = kubelet.PortForward(podFullName, podUID, port, stream) - assert.Error(t, err, description) } } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 25776273a1b..e84bec4d649 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -46,42 +46,46 @@ import ( "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/client-go/tools/remotecommand" + utiltesting "k8s.io/client-go/util/testing" api "k8s.io/kubernetes/pkg/apis/core" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" // Do some initialization to decode the query parameters correctly. _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/kubelet/cm" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - kubecontainertesting "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/server/portforward" remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" "k8s.io/kubernetes/pkg/kubelet/server/stats" + "k8s.io/kubernetes/pkg/kubelet/server/streaming" "k8s.io/kubernetes/pkg/volume" ) const ( - testUID = "9b01b80f-8fb4-11e4-95ab-4200af06647" + testUID = "9b01b80f-8fb4-11e4-95ab-4200af06647" + testContainerID = "container789" + testPodSandboxID = "pod0987" ) type fakeKubelet struct { - podByNameFunc func(namespace, name string) (*v1.Pod, bool) - containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) - rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) - machineInfoFunc func() (*cadvisorapi.MachineInfo, error) - podsFunc func() []*v1.Pod - runningPodsFunc func() ([]*v1.Pod, error) - logFunc func(w http.ResponseWriter, req *http.Request) - runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) - execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error - attachFunc func(pod string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool) error - portForwardFunc func(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error - containerLogsFunc func(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error - streamingConnectionIdleTimeoutFunc func() time.Duration - hostnameFunc func() string - resyncInterval time.Duration - loopEntryTime time.Time - plegHealth bool - redirectURL *url.URL + podByNameFunc func(namespace, name string) (*v1.Pod, bool) + containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) + machineInfoFunc func() (*cadvisorapi.MachineInfo, error) + podsFunc func() []*v1.Pod + runningPodsFunc func() ([]*v1.Pod, error) + logFunc func(w http.ResponseWriter, req *http.Request) + runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) + getExecCheck func(string, types.UID, string, []string, remotecommandserver.Options) + getAttachCheck func(string, types.UID, string, remotecommandserver.Options) + getPortForwardCheck func(string, string, types.UID, portforward.V4Options) + + containerLogsFunc func(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error + hostnameFunc func() string + resyncInterval time.Duration + loopEntryTime time.Time + plegHealth bool + streamingRuntime streaming.Server } func (fk *fakeKubelet) ResyncInterval() time.Duration { @@ -136,32 +140,109 @@ func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, contain return fk.runFunc(podFullName, uid, containerName, cmd) } -func (fk *fakeKubelet) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - return fk.execFunc(name, uid, container, cmd, in, out, err, tty) +type fakeRuntime struct { + execFunc func(string, []string, io.Reader, io.WriteCloser, io.WriteCloser, bool, <-chan remotecommand.TerminalSize) error + attachFunc func(string, io.Reader, io.WriteCloser, io.WriteCloser, bool, <-chan remotecommand.TerminalSize) error + portForwardFunc func(string, int32, io.ReadWriteCloser) error } -func (fk *fakeKubelet) AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { - return fk.attachFunc(name, uid, container, in, out, err, tty) +func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + return f.execFunc(containerID, cmd, stdin, stdout, stderr, tty, resize) } -func (fk *fakeKubelet) PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { - return fk.portForwardFunc(name, uid, port, stream) +func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + return f.attachFunc(containerID, stdin, stdout, stderr, tty, resize) +} + +func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { + return f.portForwardFunc(podSandboxID, port, stream) +} + +type testStreamingServer struct { + streaming.Server + fakeRuntime *fakeRuntime + testHTTPServer *httptest.Server +} + +func newTestStreamingServer(streamIdleTimeout time.Duration) (s *testStreamingServer, err error) { + s = &testStreamingServer{} + s.testHTTPServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s.ServeHTTP(w, r) + })) + defer func() { + if err != nil { + s.testHTTPServer.Close() + } + }() + + testURL, err := url.Parse(s.testHTTPServer.URL) + if err != nil { + return nil, err + } + + s.fakeRuntime = &fakeRuntime{} + config := streaming.DefaultConfig + config.BaseURL = testURL + if streamIdleTimeout != 0 { + config.StreamIdleTimeout = streamIdleTimeout + } + s.Server, err = streaming.NewServer(config, s.fakeRuntime) + if err != nil { + return nil, err + } + return s, nil } func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { - return fk.redirectURL, nil + if fk.getExecCheck != nil { + fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts) + } + // Always use testContainerID + resp, err := fk.streamingRuntime.GetExec(&runtimeapi.ExecRequest{ + ContainerId: testContainerID, + Cmd: cmd, + Tty: streamOpts.TTY, + Stdin: streamOpts.Stdin, + Stdout: streamOpts.Stdout, + Stderr: streamOpts.Stderr, + }) + if err != nil { + return nil, err + } + return url.Parse(resp.GetUrl()) } func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { - return fk.redirectURL, nil + if fk.getAttachCheck != nil { + fk.getAttachCheck(podFullName, podUID, containerName, streamOpts) + } + // Always use testContainerID + resp, err := fk.streamingRuntime.GetAttach(&runtimeapi.AttachRequest{ + ContainerId: testContainerID, + Tty: streamOpts.TTY, + Stdin: streamOpts.Stdin, + Stdout: streamOpts.Stdout, + Stderr: streamOpts.Stderr, + }) + if err != nil { + return nil, err + } + return url.Parse(resp.GetUrl()) } func (fk *fakeKubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { - return fk.redirectURL, nil -} - -func (fk *fakeKubelet) StreamingConnectionIdleTimeout() time.Duration { - return fk.streamingConnectionIdleTimeoutFunc() + if fk.getPortForwardCheck != nil { + fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts) + } + // Always use testPodSandboxID + resp, err := fk.streamingRuntime.GetPortForward(&runtimeapi.PortForwardRequest{ + PodSandboxId: testPodSandboxID, + Port: portForwardOpts.Ports, + }) + if err != nil { + return nil, err + } + return url.Parse(resp.GetUrl()) } // Unused functions @@ -198,17 +279,20 @@ func (f *fakeAuth) Authorize(a authorizer.Attributes) (authorized authorizer.Dec } type serverTestFramework struct { - serverUnderTest *Server - fakeKubelet *fakeKubelet - fakeAuth *fakeAuth - testHTTPServer *httptest.Server + serverUnderTest *Server + fakeKubelet *fakeKubelet + fakeAuth *fakeAuth + testHTTPServer *httptest.Server + fakeRuntime *fakeRuntime + testStreamingHTTPServer *httptest.Server + criHandler *utiltesting.FakeHandler } func newServerTest() *serverTestFramework { - return newServerTestWithDebug(true) + return newServerTestWithDebug(true, false, nil) } -func newServerTestWithDebug(enableDebugging bool) *serverTestFramework { +func newServerTestWithDebug(enableDebugging, redirectContainerStreaming bool, streamingServer streaming.Server) *serverTestFramework { fw := &serverTestFramework{} fw.fakeKubelet = &fakeKubelet{ hostnameFunc: func() string { @@ -223,7 +307,8 @@ func newServerTestWithDebug(enableDebugging bool) *serverTestFramework { }, }, true }, - plegHealth: true, + plegHealth: true, + streamingRuntime: streamingServer, } fw.fakeAuth = &fakeAuth{ authenticateFunc: func(req *http.Request) (user.Info, bool, error) { @@ -236,13 +321,17 @@ func newServerTestWithDebug(enableDebugging bool) *serverTestFramework { return authorizer.DecisionAllow, "", nil }, } + fw.criHandler = &utiltesting.FakeHandler{ + StatusCode: http.StatusOK, + } server := NewServer( fw.fakeKubelet, stats.NewResourceAnalyzer(fw.fakeKubelet, time.Minute), fw.fakeAuth, enableDebugging, false, - &kubecontainertesting.Mock{}) + redirectContainerStreaming, + fw.criHandler) fw.serverUnderTest = &server fw.testHTTPServer = httptest.NewServer(fw.serverUnderTest) return fw @@ -1064,13 +1153,12 @@ func TestContainerLogsWithFollow(t *testing.T) { } func TestServeExecInContainerIdleTimeout(t *testing.T) { - fw := newServerTest() + ss, err := newTestStreamingServer(100 * time.Millisecond) + require.NoError(t, err) + defer ss.testHTTPServer.Close() + fw := newServerTestWithDebug(true, false, ss) defer fw.testHTTPServer.Close() - fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { - return 100 * time.Millisecond - } - podNamespace := "other" podName := "foo" expectedContainerName := "baz" @@ -1102,280 +1190,221 @@ func TestServeExecInContainerIdleTimeout(t *testing.T) { } func testExecAttach(t *testing.T, verb string) { - tests := []struct { + tests := map[string]struct { stdin bool stdout bool stderr bool tty bool responseStatusCode int uid bool - responseLocation string + redirect bool }{ - {responseStatusCode: http.StatusBadRequest}, - {stdin: true, responseStatusCode: http.StatusSwitchingProtocols}, - {stdout: true, responseStatusCode: http.StatusSwitchingProtocols}, - {stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, - {stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, - {stdout: true, stderr: true, tty: true, responseStatusCode: http.StatusSwitchingProtocols}, - {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, - {stdout: true, responseStatusCode: http.StatusFound, responseLocation: "http://localhost:12345/" + verb}, + "no input or output": {responseStatusCode: http.StatusBadRequest}, + "stdin": {stdin: true, responseStatusCode: http.StatusSwitchingProtocols}, + "stdout": {stdout: true, responseStatusCode: http.StatusSwitchingProtocols}, + "stderr": {stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + "stdout and stderr": {stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + "stdout stderr and tty": {stdout: true, stderr: true, tty: true, responseStatusCode: http.StatusSwitchingProtocols}, + "stdin stdout and stderr": {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols}, + "stdin stdout stderr with uid": {stdin: true, stdout: true, stderr: true, responseStatusCode: http.StatusSwitchingProtocols, uid: true}, + "stdout with redirect": {stdout: true, responseStatusCode: http.StatusFound, redirect: true}, } - for i, test := range tests { - fw := newServerTest() - defer fw.testHTTPServer.Close() - - fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { - return 0 - } - - if test.responseLocation != "" { - var err error - fw.fakeKubelet.redirectURL, err = url.Parse(test.responseLocation) + for desc, test := range tests { + test := test + t.Run(desc, func(t *testing.T) { + ss, err := newTestStreamingServer(0) require.NoError(t, err) - } + defer ss.testHTTPServer.Close() + fw := newServerTestWithDebug(true, test.redirect, ss) + defer fw.testHTTPServer.Close() + fmt.Println(desc) - podNamespace := "other" - podName := "foo" - expectedPodName := getPodName(podName, podNamespace) - expectedContainerName := "baz" - expectedCommand := "ls -a" - expectedStdin := "stdin" - expectedStdout := "stdout" - expectedStderr := "stderr" - done := make(chan struct{}) - clientStdoutReadDone := make(chan struct{}) - clientStderrReadDone := make(chan struct{}) - execInvoked := false - attachInvoked := false + podNamespace := "other" + podName := "foo" + expectedPodName := getPodName(podName, podNamespace) + expectedContainerName := "baz" + expectedCommand := "ls -a" + expectedStdin := "stdin" + expectedStdout := "stdout" + expectedStderr := "stderr" + done := make(chan struct{}) + clientStdoutReadDone := make(chan struct{}) + clientStderrReadDone := make(chan struct{}) + execInvoked := false + attachInvoked := false - testStreamFunc := func(podFullName string, uid types.UID, containerName string, cmd []string, in io.Reader, out, stderr io.WriteCloser, tty bool, done chan struct{}) error { - defer close(done) + checkStream := func(podFullName string, uid types.UID, containerName string, streamOpts remotecommandserver.Options) { + assert.Equal(t, expectedPodName, podFullName, "podFullName") + if test.uid { + assert.Equal(t, testUID, string(uid), "uid") + } + assert.Equal(t, expectedContainerName, containerName, "containerName") + assert.Equal(t, test.stdin, streamOpts.Stdin, "stdin") + assert.Equal(t, test.stdout, streamOpts.Stdout, "stdout") + assert.Equal(t, test.tty, streamOpts.TTY, "tty") + assert.Equal(t, !test.tty && test.stderr, streamOpts.Stderr, "stderr") + } - if podFullName != expectedPodName { - t.Fatalf("%d: podFullName: expected %s, got %s", i, expectedPodName, podFullName) + fw.fakeKubelet.getExecCheck = func(podFullName string, uid types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) { + execInvoked = true + assert.Equal(t, expectedCommand, strings.Join(cmd, " "), "cmd") + checkStream(podFullName, uid, containerName, streamOpts) } - if test.uid && string(uid) != testUID { - t.Fatalf("%d: uid: expected %v, got %v", i, testUID, uid) + + fw.fakeKubelet.getAttachCheck = func(podFullName string, uid types.UID, containerName string, streamOpts remotecommandserver.Options) { + attachInvoked = true + checkStream(podFullName, uid, containerName, streamOpts) } - if containerName != expectedContainerName { - t.Fatalf("%d: containerName: expected %s, got %s", i, expectedContainerName, containerName) + + testStream := func(containerID string, in io.Reader, out, stderr io.WriteCloser, tty bool, done chan struct{}) error { + close(done) + assert.Equal(t, testContainerID, containerID, "containerID") + assert.Equal(t, test.tty, tty, "tty") + require.Equal(t, test.stdin, in != nil, "in") + require.Equal(t, test.stdout, out != nil, "out") + require.Equal(t, !test.tty && test.stderr, stderr != nil, "err") + + if test.stdin { + b := make([]byte, 10) + n, err := in.Read(b) + assert.NoError(t, err, "reading from stdin") + assert.Equal(t, expectedStdin, string(b[0:n]), "content from stdin") + } + + if test.stdout { + _, err := out.Write([]byte(expectedStdout)) + assert.NoError(t, err, "writing to stdout") + out.Close() + <-clientStdoutReadDone + } + + if !test.tty && test.stderr { + _, err := stderr.Write([]byte(expectedStderr)) + assert.NoError(t, err, "writing to stderr") + stderr.Close() + <-clientStderrReadDone + } + return nil } + ss.fakeRuntime.execFunc = func(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + assert.Equal(t, expectedCommand, strings.Join(cmd, " "), "cmd") + return testStream(containerID, stdin, stdout, stderr, tty, done) + } + + ss.fakeRuntime.attachFunc = func(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + return testStream(containerID, stdin, stdout, stderr, tty, done) + } + + var url string + if test.uid { + url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + testUID + "/" + expectedContainerName + "?ignore=1" + } else { + url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?ignore=1" + } + if verb == "exec" { + url += "&command=ls&command=-a" + } + if test.stdin { + url += "&" + api.ExecStdinParam + "=1" + } + if test.stdout { + url += "&" + api.ExecStdoutParam + "=1" + } + if test.stderr && !test.tty { + url += "&" + api.ExecStderrParam + "=1" + } + if test.tty { + url += "&" + api.ExecTTYParam + "=1" + } + + var ( + resp *http.Response + upgradeRoundTripper httpstream.UpgradeRoundTripper + c *http.Client + ) + if test.redirect { + c = &http.Client{} + // Don't follow redirects, since we want to inspect the redirect response. + c.CheckRedirect = func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + } + } else { + upgradeRoundTripper = spdy.NewRoundTripper(nil, true) + c = &http.Client{Transport: upgradeRoundTripper} + } + + resp, err = c.Post(url, "", nil) + require.NoError(t, err, "POSTing") + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp.Body) + assert.NoError(t, err, "reading response body") + + require.Equal(t, test.responseStatusCode, resp.StatusCode, "response status") + if test.responseStatusCode != http.StatusSwitchingProtocols { + return + } + + conn, err := upgradeRoundTripper.NewConnection(resp) + require.NoError(t, err, "creating streaming connection") + defer conn.Close() + + h := http.Header{} + h.Set(api.StreamType, api.StreamTypeError) + _, err = conn.CreateStream(h) + require.NoError(t, err, "creating error stream") + if test.stdin { - if in == nil { - t.Fatalf("%d: stdin: expected non-nil", i) - } - b := make([]byte, 10) - n, err := in.Read(b) - if err != nil { - t.Fatalf("%d: error reading from stdin: %v", i, err) - } - if e, a := expectedStdin, string(b[0:n]); e != a { - t.Fatalf("%d: stdin: expected to read %v, got %v", i, e, a) - } - } else if in != nil { - t.Fatalf("%d: stdin: expected nil: %#v", i, in) + h.Set(api.StreamType, api.StreamTypeStdin) + stream, err := conn.CreateStream(h) + require.NoError(t, err, "creating stdin stream") + _, err = stream.Write([]byte(expectedStdin)) + require.NoError(t, err, "writing to stdin stream") + } + + var stdoutStream httpstream.Stream + if test.stdout { + h.Set(api.StreamType, api.StreamTypeStdout) + stdoutStream, err = conn.CreateStream(h) + require.NoError(t, err, "creating stdout stream") + } + + var stderrStream httpstream.Stream + if test.stderr && !test.tty { + h.Set(api.StreamType, api.StreamTypeStderr) + stderrStream, err = conn.CreateStream(h) + require.NoError(t, err, "creating stderr stream") } if test.stdout { - if out == nil { - t.Fatalf("%d: stdout: expected non-nil", i) - } - _, err := out.Write([]byte(expectedStdout)) - if err != nil { - t.Fatalf("%d:, error writing to stdout: %v", i, err) - } - out.Close() - <-clientStdoutReadDone - } else if out != nil { - t.Fatalf("%d: stdout: expected nil: %#v", i, out) + output := make([]byte, 10) + n, err := stdoutStream.Read(output) + close(clientStdoutReadDone) + assert.NoError(t, err, "reading from stdout stream") + assert.Equal(t, expectedStdout, string(output[0:n]), "stdout") } - if tty { - if stderr != nil { - t.Fatalf("%d: tty set but received non-nil stderr: %v", i, stderr) - } - } else if test.stderr { - if stderr == nil { - t.Fatalf("%d: stderr: expected non-nil", i) - } - _, err := stderr.Write([]byte(expectedStderr)) - if err != nil { - t.Fatalf("%d:, error writing to stderr: %v", i, err) - } - stderr.Close() - <-clientStderrReadDone - } else if stderr != nil { - t.Fatalf("%d: stderr: expected nil: %#v", i, stderr) + if test.stderr && !test.tty { + output := make([]byte, 10) + n, err := stderrStream.Read(output) + close(clientStderrReadDone) + assert.NoError(t, err, "reading from stderr stream") + assert.Equal(t, expectedStderr, string(output[0:n]), "stderr") } - return nil - } + // wait for the server to finish before checking if the attach/exec funcs were invoked + <-done - fw.fakeKubelet.execFunc = func(podFullName string, uid types.UID, containerName string, cmd []string, in io.Reader, out, stderr io.WriteCloser, tty bool) error { - execInvoked = true - if strings.Join(cmd, " ") != expectedCommand { - t.Fatalf("%d: cmd: expected: %s, got %v", i, expectedCommand, cmd) + if verb == "exec" { + assert.True(t, execInvoked, "exec should be invoked") + assert.False(t, attachInvoked, "attach should not be invoked") + } else { + assert.True(t, attachInvoked, "attach should be invoked") + assert.False(t, execInvoked, "exec should not be invoked") } - return testStreamFunc(podFullName, uid, containerName, cmd, in, out, stderr, tty, done) - } - - fw.fakeKubelet.attachFunc = func(podFullName string, uid types.UID, containerName string, in io.Reader, out, stderr io.WriteCloser, tty bool) error { - attachInvoked = true - return testStreamFunc(podFullName, uid, containerName, nil, in, out, stderr, tty, done) - } - - var url string - if test.uid { - url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + testUID + "/" + expectedContainerName + "?ignore=1" - } else { - url = fw.testHTTPServer.URL + "/" + verb + "/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?ignore=1" - } - if verb == "exec" { - url += "&command=ls&command=-a" - } - if test.stdin { - url += "&" + api.ExecStdinParam + "=1" - } - if test.stdout { - url += "&" + api.ExecStdoutParam + "=1" - } - if test.stderr && !test.tty { - url += "&" + api.ExecStderrParam + "=1" - } - if test.tty { - url += "&" + api.ExecTTYParam + "=1" - } - - var ( - resp *http.Response - err error - upgradeRoundTripper httpstream.UpgradeRoundTripper - c *http.Client - ) - - if test.responseStatusCode != http.StatusSwitchingProtocols { - c = &http.Client{} - // Don't follow redirects, since we want to inspect the redirect response. - c.CheckRedirect = func(*http.Request, []*http.Request) error { - return http.ErrUseLastResponse - } - } else { - upgradeRoundTripper = spdy.NewRoundTripper(nil, true) - c = &http.Client{Transport: upgradeRoundTripper} - } - - resp, err = c.Post(url, "", nil) - if err != nil { - t.Fatalf("%d: Got error POSTing: %v", i, err) - } - defer resp.Body.Close() - - _, err = ioutil.ReadAll(resp.Body) - if err != nil { - t.Errorf("%d: Error reading response body: %v", i, err) - } - - if e, a := test.responseStatusCode, resp.StatusCode; e != a { - t.Fatalf("%d: response status: expected %v, got %v", i, e, a) - } - - if e, a := test.responseLocation, resp.Header.Get("Location"); e != a { - t.Errorf("%d: response location: expected %v, got %v", i, e, a) - } - - if test.responseStatusCode != http.StatusSwitchingProtocols { - continue - } - - conn, err := upgradeRoundTripper.NewConnection(resp) - if err != nil { - t.Fatalf("Unexpected error creating streaming connection: %s", err) - } - if conn == nil { - t.Fatalf("%d: unexpected nil conn", i) - } - defer conn.Close() - - h := http.Header{} - h.Set(api.StreamType, api.StreamTypeError) - if _, err := conn.CreateStream(h); err != nil { - t.Fatalf("%d: error creating error stream: %v", i, err) - } - - if test.stdin { - h.Set(api.StreamType, api.StreamTypeStdin) - stream, err := conn.CreateStream(h) - if err != nil { - t.Fatalf("%d: error creating stdin stream: %v", i, err) - } - _, err = stream.Write([]byte(expectedStdin)) - if err != nil { - t.Fatalf("%d: error writing to stdin stream: %v", i, err) - } - } - - var stdoutStream httpstream.Stream - if test.stdout { - h.Set(api.StreamType, api.StreamTypeStdout) - stdoutStream, err = conn.CreateStream(h) - if err != nil { - t.Fatalf("%d: error creating stdout stream: %v", i, err) - } - } - - var stderrStream httpstream.Stream - if test.stderr && !test.tty { - h.Set(api.StreamType, api.StreamTypeStderr) - stderrStream, err = conn.CreateStream(h) - if err != nil { - t.Fatalf("%d: error creating stderr stream: %v", i, err) - } - } - - if test.stdout { - output := make([]byte, 10) - n, err := stdoutStream.Read(output) - close(clientStdoutReadDone) - if err != nil { - t.Fatalf("%d: error reading from stdout stream: %v", i, err) - } - if e, a := expectedStdout, string(output[0:n]); e != a { - t.Fatalf("%d: stdout: expected '%v', got '%v'", i, e, a) - } - } - - if test.stderr && !test.tty { - output := make([]byte, 10) - n, err := stderrStream.Read(output) - close(clientStderrReadDone) - if err != nil { - t.Fatalf("%d: error reading from stderr stream: %v", i, err) - } - if e, a := expectedStderr, string(output[0:n]); e != a { - t.Fatalf("%d: stderr: expected '%v', got '%v'", i, e, a) - } - } - - // wait for the server to finish before checking if the attach/exec funcs were invoked - <-done - - if verb == "exec" { - if !execInvoked { - t.Errorf("%d: exec was not invoked", i) - } - if attachInvoked { - t.Errorf("%d: attach should not have been invoked", i) - } - } else { - if !attachInvoked { - t.Errorf("%d: attach was not invoked", i) - } - if execInvoked { - t.Errorf("%d: exec should not have been invoked", i) - } - } + }) } } @@ -1388,13 +1417,12 @@ func TestServeAttachContainer(t *testing.T) { } func TestServePortForwardIdleTimeout(t *testing.T) { - fw := newServerTest() + ss, err := newTestStreamingServer(100 * time.Millisecond) + require.NoError(t, err) + defer ss.testHTTPServer.Close() + fw := newServerTestWithDebug(true, false, ss) defer fw.testHTTPServer.Close() - fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { - return 100 * time.Millisecond - } - podNamespace := "other" podName := "foo" @@ -1422,179 +1450,160 @@ func TestServePortForwardIdleTimeout(t *testing.T) { } func TestServePortForward(t *testing.T) { - tests := []struct { - port string - uid bool - clientData string - containerData string - shouldError bool - responseLocation string + tests := map[string]struct { + port string + uid bool + clientData string + containerData string + redirect bool + shouldError bool }{ - {port: "", shouldError: true}, - {port: "abc", shouldError: true}, - {port: "-1", shouldError: true}, - {port: "65536", shouldError: true}, - {port: "0", shouldError: true}, - {port: "1", shouldError: false}, - {port: "8000", shouldError: false}, - {port: "8000", clientData: "client data", containerData: "container data", shouldError: false}, - {port: "65535", shouldError: false}, - {port: "65535", uid: true, shouldError: false}, - {port: "65535", responseLocation: "http://localhost:12345/portforward", shouldError: false}, + "no port": {port: "", shouldError: true}, + "none number port": {port: "abc", shouldError: true}, + "negative port": {port: "-1", shouldError: true}, + "too large port": {port: "65536", shouldError: true}, + "0 port": {port: "0", shouldError: true}, + "min port": {port: "1", shouldError: false}, + "normal port": {port: "8000", shouldError: false}, + "normal port with data forward": {port: "8000", clientData: "client data", containerData: "container data", shouldError: false}, + "max port": {port: "65535", shouldError: false}, + "normal port with uid": {port: "8000", uid: true, shouldError: false}, + "normal port with redirect": {port: "8000", redirect: true, shouldError: false}, } podNamespace := "other" podName := "foo" - expectedPodName := getPodName(podName, podNamespace) - for i, test := range tests { - fw := newServerTest() - defer fw.testHTTPServer.Close() - - fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { - return 0 - } - - if test.responseLocation != "" { - var err error - fw.fakeKubelet.redirectURL, err = url.Parse(test.responseLocation) + for desc, test := range tests { + test := test + t.Run(desc, func(t *testing.T) { + ss, err := newTestStreamingServer(0) require.NoError(t, err) - } + defer ss.testHTTPServer.Close() + fw := newServerTestWithDebug(true, test.redirect, ss) + defer fw.testHTTPServer.Close() - portForwardFuncDone := make(chan struct{}) + portForwardFuncDone := make(chan struct{}) - fw.fakeKubelet.portForwardFunc = func(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { - defer close(portForwardFuncDone) - - if e, a := expectedPodName, name; e != a { - t.Fatalf("%d: pod name: expected '%v', got '%v'", i, e, a) + fw.fakeKubelet.getPortForwardCheck = func(name, namespace string, uid types.UID, opts portforward.V4Options) { + assert.Equal(t, podName, name, "pod name") + assert.Equal(t, podNamespace, namespace, "pod namespace") + if test.uid { + assert.Equal(t, testUID, string(uid), "uid") + } } - if e, a := testUID, uid; test.uid && e != string(a) { - t.Fatalf("%d: uid: expected '%v', got '%v'", i, e, a) + ss.fakeRuntime.portForwardFunc = func(podSandboxID string, port int32, stream io.ReadWriteCloser) error { + defer close(portForwardFuncDone) + assert.Equal(t, testPodSandboxID, podSandboxID, "pod sandbox id") + // The port should be valid if it reaches here. + testPort, err := strconv.ParseInt(test.port, 10, 32) + require.NoError(t, err, "parse port") + assert.Equal(t, int32(testPort), port, "port") + + if test.clientData != "" { + fromClient := make([]byte, 32) + n, err := stream.Read(fromClient) + assert.NoError(t, err, "reading client data") + assert.Equal(t, test.clientData, string(fromClient[0:n]), "client data") + } + + if test.containerData != "" { + _, err := stream.Write([]byte(test.containerData)) + assert.NoError(t, err, "writing container data") + } + + return nil } - p, err := strconv.ParseInt(test.port, 10, 32) - if err != nil { - t.Fatalf("%d: error parsing port string '%s': %v", i, test.port, err) + var url string + if test.uid { + url = fmt.Sprintf("%s/portForward/%s/%s/%s", fw.testHTTPServer.URL, podNamespace, podName, testUID) + } else { + url = fmt.Sprintf("%s/portForward/%s/%s", fw.testHTTPServer.URL, podNamespace, podName) } - if e, a := int32(p), port; e != a { - t.Fatalf("%d: port: expected '%v', got '%v'", i, e, a) + + var ( + upgradeRoundTripper httpstream.UpgradeRoundTripper + c *http.Client + ) + + if test.redirect { + c = &http.Client{} + // Don't follow redirects, since we want to inspect the redirect response. + c.CheckRedirect = func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + } + } else { + upgradeRoundTripper = spdy.NewRoundTripper(nil, true) + c = &http.Client{Transport: upgradeRoundTripper} } + resp, err := c.Post(url, "", nil) + require.NoError(t, err, "POSTing") + defer resp.Body.Close() + + if test.redirect { + assert.Equal(t, http.StatusFound, resp.StatusCode, "status code") + return + } else { + assert.Equal(t, http.StatusSwitchingProtocols, resp.StatusCode, "status code") + } + + conn, err := upgradeRoundTripper.NewConnection(resp) + require.NoError(t, err, "creating streaming connection") + defer conn.Close() + + headers := http.Header{} + headers.Set("streamType", "error") + headers.Set("port", test.port) + _, err = conn.CreateStream(headers) + assert.Equal(t, test.shouldError, err != nil, "expect error") + + if test.shouldError { + return + } + + headers.Set("streamType", "data") + headers.Set("port", test.port) + dataStream, err := conn.CreateStream(headers) + require.NoError(t, err, "create stream") + if test.clientData != "" { - fromClient := make([]byte, 32) - n, err := stream.Read(fromClient) - if err != nil { - t.Fatalf("%d: error reading client data: %v", i, err) - } - if e, a := test.clientData, string(fromClient[0:n]); e != a { - t.Fatalf("%d: client data: expected to receive '%v', got '%v'", i, e, a) - } + _, err := dataStream.Write([]byte(test.clientData)) + assert.NoError(t, err, "writing client data") } if test.containerData != "" { - _, err := stream.Write([]byte(test.containerData)) - if err != nil { - t.Fatalf("%d: error writing container data: %v", i, err) - } + fromContainer := make([]byte, 32) + n, err := dataStream.Read(fromContainer) + assert.NoError(t, err, "reading container data") + assert.Equal(t, test.containerData, string(fromContainer[0:n]), "container data") } - return nil - } - - var url string - if test.uid { - url = fmt.Sprintf("%s/portForward/%s/%s/%s", fw.testHTTPServer.URL, podNamespace, podName, testUID) - } else { - url = fmt.Sprintf("%s/portForward/%s/%s", fw.testHTTPServer.URL, podNamespace, podName) - } - - var ( - upgradeRoundTripper httpstream.UpgradeRoundTripper - c *http.Client - ) - - if len(test.responseLocation) > 0 { - c = &http.Client{} - // Don't follow redirects, since we want to inspect the redirect response. - c.CheckRedirect = func(*http.Request, []*http.Request) error { - return http.ErrUseLastResponse - } - } else { - upgradeRoundTripper = spdy.NewRoundTripper(nil, true) - c = &http.Client{Transport: upgradeRoundTripper} - } - - resp, err := c.Post(url, "", nil) - if err != nil { - t.Fatalf("%d: Got error POSTing: %v", i, err) - } - defer resp.Body.Close() - - if test.responseLocation != "" { - assert.Equal(t, http.StatusFound, resp.StatusCode, "%d: status code", i) - assert.Equal(t, test.responseLocation, resp.Header.Get("Location"), "%d: location", i) - continue - } else { - assert.Equal(t, http.StatusSwitchingProtocols, resp.StatusCode, "%d: status code", i) - } - - conn, err := upgradeRoundTripper.NewConnection(resp) - if err != nil { - t.Fatalf("Unexpected error creating streaming connection: %s", err) - } - if conn == nil { - t.Fatalf("%d: Unexpected nil connection", i) - } - defer conn.Close() - - headers := http.Header{} - headers.Set("streamType", "error") - headers.Set("port", test.port) - errorStream, err := conn.CreateStream(headers) - _ = errorStream - haveErr := err != nil - if e, a := test.shouldError, haveErr; e != a { - t.Fatalf("%d: create stream: expected err=%t, got %t: %v", i, e, a, err) - } - - if test.shouldError { - continue - } - - headers.Set("streamType", "data") - headers.Set("port", test.port) - dataStream, err := conn.CreateStream(headers) - haveErr = err != nil - if e, a := test.shouldError, haveErr; e != a { - t.Fatalf("%d: create stream: expected err=%t, got %t: %v", i, e, a, err) - } - - if test.clientData != "" { - _, err := dataStream.Write([]byte(test.clientData)) - if err != nil { - t.Fatalf("%d: unexpected error writing client data: %v", i, err) - } - } - - if test.containerData != "" { - fromContainer := make([]byte, 32) - n, err := dataStream.Read(fromContainer) - if err != nil { - t.Fatalf("%d: unexpected error reading container data: %v", i, err) - } - if e, a := test.containerData, string(fromContainer[0:n]); e != a { - t.Fatalf("%d: expected to receive '%v' from container, got '%v'", i, e, a) - } - } - - <-portForwardFuncDone + <-portForwardFuncDone + }) } } +func TestCRIHandler(t *testing.T) { + fw := newServerTest() + defer fw.testHTTPServer.Close() + + const ( + path = "/cri/exec/123456abcdef" + query = "cmd=echo+foo" + ) + resp, err := http.Get(fw.testHTTPServer.URL + path + "?" + query) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "GET", fw.criHandler.RequestReceived.Method) + assert.Equal(t, path, fw.criHandler.RequestReceived.URL.Path) + assert.Equal(t, query, fw.criHandler.RequestReceived.URL.RawQuery) +} + func TestDebuggingDisabledHandlers(t *testing.T) { - fw := newServerTestWithDebug(false) + fw := newServerTestWithDebug(false, false, nil) defer fw.testHTTPServer.Close() paths := []string{ diff --git a/pkg/kubelet/server/server_websocket_test.go b/pkg/kubelet/server/server_websocket_test.go index 058b67d978a..daf6d356b63 100644 --- a/pkg/kubelet/server/server_websocket_test.go +++ b/pkg/kubelet/server/server_websocket_test.go @@ -23,11 +23,13 @@ import ( "strconv" "sync" "testing" - "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/net/websocket" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/kubelet/server/portforward" ) const ( @@ -36,152 +38,114 @@ const ( ) func TestServeWSPortForward(t *testing.T) { - tests := []struct { + tests := map[string]struct { port string uid bool clientData string containerData string shouldError bool }{ - {port: "", shouldError: true}, - {port: "abc", shouldError: true}, - {port: "-1", shouldError: true}, - {port: "65536", shouldError: true}, - {port: "0", shouldError: true}, - {port: "1", shouldError: false}, - {port: "8000", shouldError: false}, - {port: "8000", clientData: "client data", containerData: "container data", shouldError: false}, - {port: "65535", shouldError: false}, - {port: "65535", uid: true, shouldError: false}, + "no port": {port: "", shouldError: true}, + "none number port": {port: "abc", shouldError: true}, + "negative port": {port: "-1", shouldError: true}, + "too large port": {port: "65536", shouldError: true}, + "0 port": {port: "0", shouldError: true}, + "min port": {port: "1", shouldError: false}, + "normal port": {port: "8000", shouldError: false}, + "normal port with data forward": {port: "8000", clientData: "client data", containerData: "container data", shouldError: false}, + "max port": {port: "65535", shouldError: false}, + "normal port with uid": {port: "8000", uid: true, shouldError: false}, } podNamespace := "other" podName := "foo" - expectedPodName := getPodName(podName, podNamespace) - expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647" - for i, test := range tests { - fw := newServerTest() - defer fw.testHTTPServer.Close() + for desc, test := range tests { + test := test + t.Run(desc, func(t *testing.T) { + ss, err := newTestStreamingServer(0) + require.NoError(t, err) + defer ss.testHTTPServer.Close() + fw := newServerTestWithDebug(true, false, ss) + defer fw.testHTTPServer.Close() - fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { - return 0 - } + portForwardFuncDone := make(chan struct{}) - portForwardFuncDone := make(chan struct{}) - - fw.fakeKubelet.portForwardFunc = func(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { - defer close(portForwardFuncDone) - - if e, a := expectedPodName, name; e != a { - t.Fatalf("%d: pod name: expected '%v', got '%v'", i, e, a) + fw.fakeKubelet.getPortForwardCheck = func(name, namespace string, uid types.UID, opts portforward.V4Options) { + assert.Equal(t, podName, name, "pod name") + assert.Equal(t, podNamespace, namespace, "pod namespace") + if test.uid { + assert.Equal(t, testUID, string(uid), "uid") + } } - if e, a := expectedUid, uid; test.uid && e != string(a) { - t.Fatalf("%d: uid: expected '%v', got '%v'", i, e, a) + ss.fakeRuntime.portForwardFunc = func(podSandboxID string, port int32, stream io.ReadWriteCloser) error { + defer close(portForwardFuncDone) + assert.Equal(t, testPodSandboxID, podSandboxID, "pod sandbox id") + // The port should be valid if it reaches here. + testPort, err := strconv.ParseInt(test.port, 10, 32) + require.NoError(t, err, "parse port") + assert.Equal(t, int32(testPort), port, "port") + + if test.clientData != "" { + fromClient := make([]byte, 32) + n, err := stream.Read(fromClient) + assert.NoError(t, err, "reading client data") + assert.Equal(t, test.clientData, string(fromClient[0:n]), "client data") + } + + if test.containerData != "" { + _, err := stream.Write([]byte(test.containerData)) + assert.NoError(t, err, "writing container data") + } + + return nil } - p, err := strconv.ParseInt(test.port, 10, 32) - if err != nil { - t.Fatalf("%d: error parsing port string '%s': %v", i, test.port, err) + var url string + if test.uid { + url = fmt.Sprintf("ws://%s/portForward/%s/%s/%s?port=%s", fw.testHTTPServer.Listener.Addr().String(), podNamespace, podName, testUID, test.port) + } else { + url = fmt.Sprintf("ws://%s/portForward/%s/%s?port=%s", fw.testHTTPServer.Listener.Addr().String(), podNamespace, podName, test.port) } - if e, a := int32(p), port; e != a { - t.Fatalf("%d: port: expected '%v', got '%v'", i, e, a) + + ws, err := websocket.Dial(url, "", "http://127.0.0.1/") + assert.Equal(t, test.shouldError, err != nil, "websocket dial") + if test.shouldError { + return } + defer ws.Close() + + p, err := strconv.ParseUint(test.port, 10, 16) + require.NoError(t, err, "parse port") + p16 := uint16(p) + + channel, data, err := wsRead(ws) + require.NoError(t, err, "read") + assert.Equal(t, dataChannel, int(channel), "channel") + assert.Len(t, data, binary.Size(p16), "data size") + assert.Equal(t, p16, binary.LittleEndian.Uint16(data), "data") + + channel, data, err = wsRead(ws) + assert.NoError(t, err, "read") + assert.Equal(t, errorChannel, int(channel), "channel") + assert.Len(t, data, binary.Size(p16), "data size") + assert.Equal(t, p16, binary.LittleEndian.Uint16(data), "data") if test.clientData != "" { - fromClient := make([]byte, 32) - n, err := stream.Read(fromClient) - if err != nil { - t.Fatalf("%d: error reading client data: %v", i, err) - } - if e, a := test.clientData, string(fromClient[0:n]); e != a { - t.Fatalf("%d: client data: expected to receive '%v', got '%v'", i, e, a) - } + println("writing the client data") + err := wsWrite(ws, dataChannel, []byte(test.clientData)) + assert.NoError(t, err, "writing client data") } if test.containerData != "" { - _, err := stream.Write([]byte(test.containerData)) - if err != nil { - t.Fatalf("%d: error writing container data: %v", i, err) - } + _, data, err = wsRead(ws) + assert.NoError(t, err, "reading container data") + assert.Equal(t, test.containerData, string(data), "container data") } - return nil - } - - var url string - if test.uid { - url = fmt.Sprintf("ws://%s/portForward/%s/%s/%s?port=%s", fw.testHTTPServer.Listener.Addr().String(), podNamespace, podName, expectedUid, test.port) - } else { - url = fmt.Sprintf("ws://%s/portForward/%s/%s?port=%s", fw.testHTTPServer.Listener.Addr().String(), podNamespace, podName, test.port) - } - - ws, err := websocket.Dial(url, "", "http://127.0.0.1/") - if test.shouldError { - if err == nil { - t.Fatalf("%d: websocket dial expected err", i) - } - continue - } else if err != nil { - t.Fatalf("%d: websocket dial unexpected err: %v", i, err) - } - - defer ws.Close() - - p, err := strconv.ParseUint(test.port, 10, 16) - if err != nil { - t.Fatalf("%d: error parsing port string '%s': %v", i, test.port, err) - } - p16 := uint16(p) - - channel, data, err := wsRead(ws) - if err != nil { - t.Fatalf("%d: read failed: expected no error: got %v", i, err) - } - if channel != dataChannel { - t.Fatalf("%d: wrong channel: got %q: expected %q", i, channel, dataChannel) - } - if len(data) != binary.Size(p16) { - t.Fatalf("%d: wrong data size: got %q: expected %d", i, data, binary.Size(p16)) - } - if e, a := p16, binary.LittleEndian.Uint16(data); e != a { - t.Fatalf("%d: wrong data: got %q: expected %s", i, data, test.port) - } - - channel, data, err = wsRead(ws) - if err != nil { - t.Fatalf("%d: read succeeded: expected no error: got %v", i, err) - } - if channel != errorChannel { - t.Fatalf("%d: wrong channel: got %q: expected %q", i, channel, errorChannel) - } - if len(data) != binary.Size(p16) { - t.Fatalf("%d: wrong data size: got %q: expected %d", i, data, binary.Size(p16)) - } - if e, a := p16, binary.LittleEndian.Uint16(data); e != a { - t.Fatalf("%d: wrong data: got %q: expected %s", i, data, test.port) - } - - if test.clientData != "" { - println("writing the client data") - err := wsWrite(ws, dataChannel, []byte(test.clientData)) - if err != nil { - t.Fatalf("%d: unexpected error writing client data: %v", i, err) - } - } - - if test.containerData != "" { - _, data, err = wsRead(ws) - if err != nil { - t.Fatalf("%d: unexpected error reading container data: %v", i, err) - } - if e, a := test.containerData, string(data); e != a { - t.Fatalf("%d: expected to receive '%v' from container, got '%v'", i, e, a) - } - } - - <-portForwardFuncDone + <-portForwardFuncDone + }) } } @@ -190,27 +154,27 @@ func TestServeWSMultiplePortForward(t *testing.T) { ports := []uint16{7000, 8000, 9000} podNamespace := "other" podName := "foo" - expectedPodName := getPodName(podName, podNamespace) - fw := newServerTest() + ss, err := newTestStreamingServer(0) + require.NoError(t, err) + defer ss.testHTTPServer.Close() + fw := newServerTestWithDebug(true, false, ss) defer fw.testHTTPServer.Close() - fw.fakeKubelet.streamingConnectionIdleTimeoutFunc = func() time.Duration { - return 0 - } - portForwardWG := sync.WaitGroup{} portForwardWG.Add(len(ports)) portsMutex := sync.Mutex{} portsForwarded := map[int32]struct{}{} - fw.fakeKubelet.portForwardFunc = func(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { - defer portForwardWG.Done() + fw.fakeKubelet.getPortForwardCheck = func(name, namespace string, uid types.UID, opts portforward.V4Options) { + assert.Equal(t, podName, name, "pod name") + assert.Equal(t, podNamespace, namespace, "pod namespace") + } - if e, a := expectedPodName, name; e != a { - t.Fatalf("%d: pod name: expected '%v', got '%v'", port, e, a) - } + ss.fakeRuntime.portForwardFunc = func(podSandboxID string, port int32, stream io.ReadWriteCloser) error { + defer portForwardWG.Done() + assert.Equal(t, testPodSandboxID, podSandboxID, "pod sandbox id") portsMutex.Lock() portsForwarded[port] = struct{}{} @@ -218,17 +182,11 @@ func TestServeWSMultiplePortForward(t *testing.T) { fromClient := make([]byte, 32) n, err := stream.Read(fromClient) - if err != nil { - t.Fatalf("%d: error reading client data: %v", port, err) - } - if e, a := fmt.Sprintf("client data on port %d", port), string(fromClient[0:n]); e != a { - t.Fatalf("%d: client data: expected to receive '%v', got '%v'", port, e, a) - } + assert.NoError(t, err, "reading client data") + assert.Equal(t, fmt.Sprintf("client data on port %d", port), string(fromClient[0:n]), "client data") _, err = stream.Write([]byte(fmt.Sprintf("container data on port %d", port))) - if err != nil { - t.Fatalf("%d: error writing container data: %v", port, err) - } + assert.NoError(t, err, "writing container data") return nil } @@ -239,70 +197,42 @@ func TestServeWSMultiplePortForward(t *testing.T) { } ws, err := websocket.Dial(url, "", "http://127.0.0.1/") - if err != nil { - t.Fatalf("websocket dial unexpected err: %v", err) - } + require.NoError(t, err, "websocket dial") defer ws.Close() for i, port := range ports { channel, data, err := wsRead(ws) - if err != nil { - t.Fatalf("%d: read failed: expected no error: got %v", i, err) - } - if int(channel) != i*2+dataChannel { - t.Fatalf("%d: wrong channel: got %q: expected %q", i, channel, i*2+dataChannel) - } - if len(data) != binary.Size(port) { - t.Fatalf("%d: wrong data size: got %q: expected %d", i, data, binary.Size(port)) - } - if e, a := port, binary.LittleEndian.Uint16(data); e != a { - t.Fatalf("%d: wrong data: got %q: expected %d", i, data, port) - } + assert.NoError(t, err, "port %d read", port) + assert.Equal(t, i*2+dataChannel, int(channel), "port %d channel", port) + assert.Len(t, data, binary.Size(port), "port %d data size", port) + assert.Equal(t, binary.LittleEndian.Uint16(data), port, "port %d data", port) channel, data, err = wsRead(ws) - if err != nil { - t.Fatalf("%d: read succeeded: expected no error: got %v", i, err) - } - if int(channel) != i*2+errorChannel { - t.Fatalf("%d: wrong channel: got %q: expected %q", i, channel, i*2+errorChannel) - } - if len(data) != binary.Size(port) { - t.Fatalf("%d: wrong data size: got %q: expected %d", i, data, binary.Size(port)) - } - if e, a := port, binary.LittleEndian.Uint16(data); e != a { - t.Fatalf("%d: wrong data: got %q: expected %d", i, data, port) - } + assert.NoError(t, err, "port %d read", port) + assert.Equal(t, i*2+errorChannel, int(channel), "port %d channel", port) + assert.Len(t, data, binary.Size(port), "port %d data size", port) + assert.Equal(t, binary.LittleEndian.Uint16(data), port, "port %d data", port) } for i, port := range ports { - println("writing the client data", port) + t.Logf("port %d writing the client data", port) err := wsWrite(ws, byte(i*2+dataChannel), []byte(fmt.Sprintf("client data on port %d", port))) - if err != nil { - t.Fatalf("%d: unexpected error writing client data: %v", i, err) - } + assert.NoError(t, err, "port %d write client data", port) channel, data, err := wsRead(ws) - if err != nil { - t.Fatalf("%d: unexpected error reading container data: %v", i, err) - } - - if int(channel) != i*2+dataChannel { - t.Fatalf("%d: wrong channel: got %q: expected %q", port, channel, i*2+dataChannel) - } - if e, a := fmt.Sprintf("container data on port %d", port), string(data); e != a { - t.Fatalf("%d: expected to receive '%v' from container, got '%v'", i, e, a) - } + assert.NoError(t, err, "port %d read container data", port) + assert.Equal(t, i*2+dataChannel, int(channel), "port %d channel", port) + assert.Equal(t, fmt.Sprintf("container data on port %d", port), string(data), "port %d container data", port) } portForwardWG.Wait() portsMutex.Lock() defer portsMutex.Unlock() - if len(ports) != len(portsForwarded) { - t.Fatalf("expected to forward %d ports; got %v", len(ports), portsForwarded) - } + assert.Len(t, portsForwarded, len(ports), "all ports forwarded") } + func wsWrite(conn *websocket.Conn, channel byte, data []byte) error { frame := make([]byte, len(data)+1) frame[0] = channel From 746c32db4cf47a0245ef1c1aa053df49fcf51c13 Mon Sep 17 00:00:00 2001 From: Lantao Liu Date: Thu, 17 May 2018 18:18:36 -0700 Subject: [PATCH 304/307] Update bazel. --- pkg/kubelet/BUILD | 1 - pkg/kubelet/container/BUILD | 1 - pkg/kubelet/server/BUILD | 6 +++--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index b4d6d166fa9..a9249c879a1 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -131,7 +131,6 @@ go_library( "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", - "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", "//vendor/k8s.io/client-go/util/certificate:go_default_library", "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/k8s.io/client-go/util/integer:go_default_library", diff --git a/pkg/kubelet/container/BUILD b/pkg/kubelet/container/BUILD index 0950b53b3da..287cd501367 100644 --- a/pkg/kubelet/container/BUILD +++ b/pkg/kubelet/container/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/util/format:go_default_library", - "//pkg/kubelet/util/ioutils:go_default_library", "//pkg/util/hash:go_default_library", "//pkg/volume:go_default_library", "//third_party/forked/golang/expansion:go_default_library", diff --git a/pkg/kubelet/server/BUILD b/pkg/kubelet/server/BUILD index c2e338a1ad8..69a71140f0f 100644 --- a/pkg/kubelet/server/BUILD +++ b/pkg/kubelet/server/BUILD @@ -37,7 +37,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/remotecommand:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", @@ -45,7 +45,6 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//vendor/k8s.io/apiserver/pkg/util/flushwriter:go_default_library", - "//vendor/k8s.io/client-go/tools/remotecommand:go_default_library", ], ) @@ -60,13 +59,14 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/core/install:go_default_library", + "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/container:go_default_library", - "//pkg/kubelet/container/testing:go_default_library", "//pkg/kubelet/server/portforward:go_default_library", "//pkg/kubelet/server/remotecommand:go_default_library", "//pkg/kubelet/server/stats:go_default_library", + "//pkg/kubelet/server/streaming:go_default_library", "//pkg/volume:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", From 7e444a453b18504adcaa106ff4ce358f9e8f7450 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 31 May 2018 16:04:19 -0700 Subject: [PATCH 305/307] Quote shell variable expansion --- cluster/gce/gci/configure.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index d8bd2baebd1..6ef58a87610 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -265,7 +265,7 @@ EOF } function install-exec-auth-plugin { - if [[ ! ${EXEC_AUTH_PLUGIN_URL:-} ]]; then + if [[ ! "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then return fi local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}" From d5bada212ec81b30a6a46f97a72d538b5d52c90e Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Fri, 1 Jun 2018 00:12:10 +0000 Subject: [PATCH 306/307] Update CHANGELOG-1.11.md for v1.11.0-beta.1. --- CHANGELOG-1.11.md | 243 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 236 insertions(+), 7 deletions(-) diff --git a/CHANGELOG-1.11.md b/CHANGELOG-1.11.md index 264f31666ca..7f0344496ca 100644 --- a/CHANGELOG-1.11.md +++ b/CHANGELOG-1.11.md @@ -1,24 +1,253 @@ -- [v1.11.0-alpha.2](#v1110-alpha2) - - [Downloads for v1.11.0-alpha.2](#downloads-for-v1110-alpha2) +- [v1.11.0-beta.1](#v1110-beta1) + - [Downloads for v1.11.0-beta.1](#downloads-for-v1110-beta1) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.11.0-alpha.1](#changelog-since-v1110-alpha1) + - [Changelog since v1.11.0-alpha.2](#changelog-since-v1110-alpha2) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes) -- [v1.11.0-alpha.1](#v1110-alpha1) - - [Downloads for v1.11.0-alpha.1](#downloads-for-v1110-alpha1) +- [v1.11.0-alpha.2](#v1110-alpha2) + - [Downloads for v1.11.0-alpha.2](#downloads-for-v1110-alpha2) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.10.0](#changelog-since-v1100) - - [Action Required](#action-required) + - [Changelog since v1.11.0-alpha.1](#changelog-since-v1110-alpha1) - [Other notable changes](#other-notable-changes-1) +- [v1.11.0-alpha.1](#v1110-alpha1) + - [Downloads for v1.11.0-alpha.1](#downloads-for-v1110-alpha1) + - [Client Binaries](#client-binaries-2) + - [Server Binaries](#server-binaries-2) + - [Node Binaries](#node-binaries-2) + - [Changelog since v1.10.0](#changelog-since-v1100) + - [Action Required](#action-required-1) + - [Other notable changes](#other-notable-changes-2) +# v1.11.0-beta.1 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) + +## Downloads for v1.11.0-beta.1 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes.tar.gz) | `3209303a10ca8dd311c500ee858b9151b43c1bb5c2b3a9fb9281722e021d6871` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-src.tar.gz) | `c2e4d3b1beb4cd0b2a775394a30da2c2949d380e57f729dc48c541069c103326` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-darwin-386.tar.gz) | `cbded4d58b3d2cbeb2e43c48c9dd359834c9c9aa376751a7f8960be45601fb40` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | `ceccd21fda90b96865801053f1784d4062d69b11e2e911483223860dfe6c3a17` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-linux-386.tar.gz) | `75c9794a7f43f891aa839b2571fa44ffced25197578adc31b4c3cb28d7fbf158` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | `184905f6b8b856306483d811d015cf0b28c0703ceb372594622732da2a07989f` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-linux-arm.tar.gz) | `2d985829499588d32483d7c6a36b3b0f2b6d4031eda31c65b066b77bc51bae66` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | `268556ede751058162a42d0156f27e42e37b23d60b2485e350cffe6e1b376fa4` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | `8859bd7a37bf5a659eb17e47d2c54d228950b2ef48243c93f11799c455789983` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | `90bbe2fc45ae722a05270820336b9178baaab198401bb6888e817afe6a1a304e` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-windows-386.tar.gz) | `948b01f555abfc30990345004d5ce679d4b9d0a32d699a50b6d8309040b2b2f2` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | `091e9d4e7fa611cf06d2907d159e0cc36ae8602403ad0819d62df4ddbaba6095` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | `727a5e8241035d631d90f3d119a27384abe93cde14c242c4d2d1cf948f84a650` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-server-linux-arm.tar.gz) | `6eb7479348e9480d9d1ee31dc991297b93e076dd21b567c595f82d45b66ef949` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | `9eab5ccdfba2803a743ed12b4323ad0e8e0215779edf5752224103b6667a35c1` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | `d86b07ee28ed3d2c0668a2737fff4b3d025d4cd7b6f1aadc85f8f13b4c12e578` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | `c2d19acb88684a52a74f469ab26874ab224023f29290865e08c86338d30dd598` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | `2957bf3e9dc9cd9570597434909e5ef03e996f8443c02f9d95fa6de2cd17126f` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-node-linux-arm.tar.gz) | `5995b8b9628fca9eaa92c283cfb4199ab353efa8953b980eec994f49ac3a0ebd` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | `996691b3b894ec9769be1ee45c5053ff1560e3ef161de8f8b9ac067c0d3559d3` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | `8bb7fe72ec704afa5ad96356787972144b0f7923fc68678894424f1f62da7041` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | `4c1f0314ad60537c8a7866b0cabdece21284ee91ae692d1999b3d5273ee7cbaf` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.11.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | `158832f41cd452f93482cc8a8f1dd69cc243eb63ce3581e7f2eab2de323f6202` + +## Changelog since v1.11.0-alpha.2 + +### Action Required + +* [action required] `.NodeName` and `.CRISocket` in the `MasterConfiguration` and `NodeConfiguration` v1alpha1 API objects are now `.NodeRegistration.Name` and `.NodeRegistration.CRISocket` respectively in the v1alpha2 API. The `.NoTaintMaster` field has been removed in the v1alpha2 API. ([#64210](https://github.com/kubernetes/kubernetes/pull/64210), [@luxas](https://github.com/luxas)) +* (ACTION REQUIRED) PersisntVolumeLabel admission controller is now disabled by default. If you depend on this feature (AWS/GCE) then ensure it is added to the `--enable-admission-plugins` flag on the kube-apiserver. ([#64326](https://github.com/kubernetes/kubernetes/pull/64326), [@andrewsykim](https://github.com/andrewsykim)) +* [action required] kubeadm: The `:Etcd` struct has been refactored in the v1alpha2 API. All the options now reside under either `.Etcd.Local` or `.Etcd.External`. Automatic conversions from the v1alpha1 API are supported. ([#64066](https://github.com/kubernetes/kubernetes/pull/64066), [@luxas](https://github.com/luxas)) +* [action required] kubeadm: kubelets in kubeadm clusters now disable the readonly port (10255). If you're relying on unauthenticated access to the readonly port, please switch to using the secure port (10250). Instead, you can now use ServiceAccount tokens when talking to the secure port, which will make it easier to get access to e.g. the `/metrics` endpoint of the kubelet securely. ([#64187](https://github.com/kubernetes/kubernetes/pull/64187), [@luxas](https://github.com/luxas)) +* [action required] kubeadm: Support for `.AuthorizationModes` in the kubeadm v1alpha2 API has been removed. Instead, you can use the `.APIServerExtraArgs` and `.APIServerExtraVolumes` fields to achieve the same effect. Files using the v1alpha1 API and setting this field will be automatically upgraded to this v1alpha2 API and the information will be preserved. ([#64068](https://github.com/kubernetes/kubernetes/pull/64068), [@luxas](https://github.com/luxas)) +* [action required] The formerly publicly-available cAdvisor web UI that the kubelet ran on port 4194 by default is now turned off by default. The flag configuring what port to run this UI on `--cadvisor-port` was deprecated in v1.10. Now the default is `--cadvisor-port=0`, in other words, to not run the web server. The recommended way to run cAdvisor if you still need it, is via a DaemonSet. The `--cadvisor-port` will be removed in v1.12 ([#63881](https://github.com/kubernetes/kubernetes/pull/63881), [@luxas](https://github.com/luxas)) +* [action required] kubeadm: The `.ImagePullPolicy` field has been removed in the v1alpha2 API version. Instead it's set statically to `IfNotPresent` for all required images. If you want to always pull the latest images before cluster init (like what `Always` would do), run `kubeadm config images pull` before each `kubeadm init`. If you don't want the kubelet to pull any images at `kubeadm init` time, as you for instance don't have an internet connection, you can also run `kubeadm config images pull` before `kubeadm init` or side-load the images some other way (e.g. `docker load -i image.tar`). Having the images locally cached will result in no pull at runtime, which makes it possible to run without any internet connection. ([#64096](https://github.com/kubernetes/kubernetes/pull/64096), [@luxas](https://github.com/luxas)) +* [action required] In the new v1alpha2 kubeadm Configuration API, the `.CloudProvider` and `.PrivilegedPods` fields don't exist anymore. ([#63866](https://github.com/kubernetes/kubernetes/pull/63866), [@luxas](https://github.com/luxas)) + * Instead, you should use the out-of-tree cloud provider implementations which are beta in v1.11. + * If you have to use the legacy in-tree cloud providers, you can rearrange your config like the example below. In case you need the `cloud-config` file (located in `{cloud-config-path}`), you can mount it into the API Server and controller-manager containers using ExtraVolumes like the example below. + * If you need to use the `.PrivilegedPods` functionality, you can still edit the manifests in + * `/etc/kubernetes/manifests/`, and set `.SecurityContext.Privileged=true` for the apiserver + * and controller manager. + * --- + * kind: MasterConfiguration + * apiVersion: kubeadm.k8s.io/v1alpha2 + * apiServerExtraArgs: + * cloud-provider: "{cloud}" + * cloud-config: "{cloud-config-path}" + * apiServerExtraVolumes: + * - name: cloud + * hostPath: "{cloud-config-path}" + * mountPath: "{cloud-config-path}" + * controllerManagerExtraArgs: + * cloud-provider: "{cloud}" + * cloud-config: "{cloud-config-path}" + * controllerManagerExtraVolumes: + * - name: cloud + * hostPath: "{cloud-config-path}" + * mountPath: "{cloud-config-path}" + * --- +* [action required] kubeadm now uses an upgraded API version for the configuration file, `kubeadm.k8s.io/v1alpha2`. kubeadm in v1.11 will still be able to read `v1alpha1` configuration, and will automatically convert the configuration to `v1alpha2` internally and when storing the configuration in the ConfigMap in the cluster. ([#63788](https://github.com/kubernetes/kubernetes/pull/63788), [@luxas](https://github.com/luxas)) +* The annotation `service.alpha.kubernetes.io/tolerate-unready-endpoints` is deprecated. Users should use Service.spec.publishNotReadyAddresses instead. ([#63742](https://github.com/kubernetes/kubernetes/pull/63742), [@thockin](https://github.com/thockin)) +* avoid duplicate status in audit events ([#62695](https://github.com/kubernetes/kubernetes/pull/62695), [@CaoShuFeng](https://github.com/CaoShuFeng)) + +### Other notable changes + +* Remove rescheduler from master. ([#64364](https://github.com/kubernetes/kubernetes/pull/64364), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) +* Declare IPVS-based kube-proxy GA ([#58442](https://github.com/kubernetes/kubernetes/pull/58442), [@m1093782566](https://github.com/m1093782566)) +* kubeadm: conditionally set the kubelet cgroup driver for Docker ([#64347](https://github.com/kubernetes/kubernetes/pull/64347), [@neolit123](https://github.com/neolit123)) +* kubectl built for darwin from darwin now enables cgo to use the system-native C libraries for DNS resolution. Cross-compiled kubectl (e.g. from an official kubernetes release) still uses the go-native netgo DNS implementation. ([#64219](https://github.com/kubernetes/kubernetes/pull/64219), [@ixdy](https://github.com/ixdy)) +* AWS EBS volumes can be now used as ReadOnly in pods. ([#64403](https://github.com/kubernetes/kubernetes/pull/64403), [@jsafrane](https://github.com/jsafrane)) +* Exec authenticator plugin supports TLS client certificates. ([#61803](https://github.com/kubernetes/kubernetes/pull/61803), [@awly](https://github.com/awly)) +* Use Patch instead of Put to sync pod status ([#62306](https://github.com/kubernetes/kubernetes/pull/62306), [@freehan](https://github.com/freehan)) +* kubectl apply --prune supports CronJob resource. ([#62991](https://github.com/kubernetes/kubernetes/pull/62991), [@tomoe](https://github.com/tomoe)) +* Label ExternalEtcdClientCertificates can be used for ignoring all preflight check issues related to client certificate files for external etcd. ([#64269](https://github.com/kubernetes/kubernetes/pull/64269), [@kad](https://github.com/kad)) +* Provide a meaningful error message in openstack cloud provider when no valid IP address can be found for a node ([#64318](https://github.com/kubernetes/kubernetes/pull/64318), [@gonzolino](https://github.com/gonzolino)) +* kubeadm: Add a 'kubeadm config migrate' command to convert old API types to their newer counterparts in the new, supported API types. This is just a client-side tool, it just executes locally without requiring a cluster to be running. You can think about this as an Unix pipe that upgrades config files. ([#64232](https://github.com/kubernetes/kubernetes/pull/64232), [@luxas](https://github.com/luxas)) +* The --dry-run flag has been enabled for kubectl auth reconcile ([#64458](https://github.com/kubernetes/kubernetes/pull/64458), [@mrogers950](https://github.com/mrogers950)) +* Add probe based mechanism for kubelet plugin discovery ([#63328](https://github.com/kubernetes/kubernetes/pull/63328), [@vikaschoudhary16](https://github.com/vikaschoudhary16)) +* Add Establishing Controller on CRDs to avoid race between Established condition and CRs actually served. In HA setups, the Established condition is delayed by 5 seconds. ([#63068](https://github.com/kubernetes/kubernetes/pull/63068), [@xmudrii](https://github.com/xmudrii)) +* CoreDNS is now v1.1.3 ([#64258](https://github.com/kubernetes/kubernetes/pull/64258), [@rajansandeep](https://github.com/rajansandeep)) +* kubeadm will pull required images during preflight checks if it cannot find them on the system ([#64105](https://github.com/kubernetes/kubernetes/pull/64105), [@chuckha](https://github.com/chuckha)) +* kubeadm: rename the addon parameter `kube-dns` to `coredns` for `kubeadm alpha phases addons` as CoreDNS is now the default DNS server in 1.11. ([#64274](https://github.com/kubernetes/kubernetes/pull/64274), [@neolit123](https://github.com/neolit123)) +* kubeadm: when starting the API server use the arguments --enable-admission-plugins and --disable-admission-plugins instead of the deprecated --admission-control. ([#64165](https://github.com/kubernetes/kubernetes/pull/64165), [@neolit123](https://github.com/neolit123)) +* Add spec.additionalPrinterColumns to CRDs to define server side printing columns. ([#60991](https://github.com/kubernetes/kubernetes/pull/60991), [@sttts](https://github.com/sttts)) +* fix azure file size grow issue ([#64383](https://github.com/kubernetes/kubernetes/pull/64383), [@andyzhangx](https://github.com/andyzhangx)) +* Fix issue of colliding nodePorts when the cluster has services with externalTrafficPolicy=Local ([#64349](https://github.com/kubernetes/kubernetes/pull/64349), [@nicksardo](https://github.com/nicksardo)) +* fixes a panic applying json patches containing out of bounds operations ([#64355](https://github.com/kubernetes/kubernetes/pull/64355), [@liggitt](https://github.com/liggitt)) +* Fail fast if cgroups-per-qos is set on Windows ([#62984](https://github.com/kubernetes/kubernetes/pull/62984), [@feiskyer](https://github.com/feiskyer)) +* Move Volume expansion to Beta ([#64288](https://github.com/kubernetes/kubernetes/pull/64288), [@gnufied](https://github.com/gnufied)) +* kubectl delete does not use reapers for removing objects anymore, but relies on server-side GC entirely ([#63979](https://github.com/kubernetes/kubernetes/pull/63979), [@soltysh](https://github.com/soltysh)) +* Basic plumbing for volume topology aware dynamic provisioning ([#63232](https://github.com/kubernetes/kubernetes/pull/63232), [@lichuqiang](https://github.com/lichuqiang)) +* API server properly parses propagationPolicy as a query parameter sent with a delete request ([#63414](https://github.com/kubernetes/kubernetes/pull/63414), [@roycaihw](https://github.com/roycaihw)) +* Property `serverAddressByClientCIDRs` in `metav1.APIGroup` (discovery API) now become optional instead of required ([#61963](https://github.com/kubernetes/kubernetes/pull/61963), [@roycaihw](https://github.com/roycaihw)) +* The dynamic Kubelet config feature is now beta, and the DynamicKubeletConfig feature gate is on by default. In order to use dynamic Kubelet config, ensure that the Kubelet's --dynamic-config-dir option is set. ([#64275](https://github.com/kubernetes/kubernetes/pull/64275), [@mtaufen](https://github.com/mtaufen)) +* Add reason message logs for non-exist Azure resources ([#64248](https://github.com/kubernetes/kubernetes/pull/64248), [@feiskyer](https://github.com/feiskyer)) +* Fix SessionAffinity not updated issue for Azure load balancer ([#64180](https://github.com/kubernetes/kubernetes/pull/64180), [@feiskyer](https://github.com/feiskyer)) +* The kube-apiserver openapi doc now includes extensions identifying APIService and CustomResourceDefinition kinds ([#64174](https://github.com/kubernetes/kubernetes/pull/64174), [@liggitt](https://github.com/liggitt)) +* apiservices/status and certificatesigningrequests/status now support GET and PATCH ([#64063](https://github.com/kubernetes/kubernetes/pull/64063), [@roycaihw](https://github.com/roycaihw)) +* kubectl: This client version requires the `apps/v1` APIs, so it will not work against a cluster version older than v1.9.0. Note that kubectl only guarantees compatibility with clusters that are +/-1 minor version away. ([#61419](https://github.com/kubernetes/kubernetes/pull/61419), [@enisoc](https://github.com/enisoc)) +* Correct the way we reset containers and pods in kubeadm via crictl ([#63862](https://github.com/kubernetes/kubernetes/pull/63862), [@runcom](https://github.com/runcom)) +* Allow env from resource with keys & updated tests ([#60636](https://github.com/kubernetes/kubernetes/pull/60636), [@PhilipGough](https://github.com/PhilipGough)) +* The kubelet certificate rotation feature can now be enabled via the `.RotateCertificates` field in the kubelet's config file. The `--rotate-certificates` flag is now deprecated, and will be removed in a future release. ([#63912](https://github.com/kubernetes/kubernetes/pull/63912), [@luxas](https://github.com/luxas)) +* Use DeleteOptions.PropagationPolicy instead of OrphanDependents in kubectl ([#59851](https://github.com/kubernetes/kubernetes/pull/59851), [@nilebox](https://github.com/nilebox)) +* add block device support for azure disk ([#63841](https://github.com/kubernetes/kubernetes/pull/63841), [@andyzhangx](https://github.com/andyzhangx)) +* Fix incorrectly propagated ResourceVersion in ListRequests returning 0 items. ([#64150](https://github.com/kubernetes/kubernetes/pull/64150), [@wojtek-t](https://github.com/wojtek-t)) +* Changes ext3/ext4 volume creation to not reserve any portion of the volume for the root user. ([#64102](https://github.com/kubernetes/kubernetes/pull/64102), [@atombender](https://github.com/atombender)) +* Add CRD Versioning with NOP converter ([#63830](https://github.com/kubernetes/kubernetes/pull/63830), [@mbohlool](https://github.com/mbohlool)) +* adds a kubectl wait command ([#64034](https://github.com/kubernetes/kubernetes/pull/64034), [@deads2k](https://github.com/deads2k)) +* "kubeadm init" now writes a structured and versioned kubelet ComponentConfiguration file to `/var/lib/kubelet/config.yaml` and an environment file with runtime flags (you can source this file in the systemd kubelet dropin) to `/var/lib/kubelet/kubeadm-flags.env`. ([#63887](https://github.com/kubernetes/kubernetes/pull/63887), [@luxas](https://github.com/luxas)) +* `kubectl auth reconcile` only works with rbac.v1 ([#63967](https://github.com/kubernetes/kubernetes/pull/63967), [@deads2k](https://github.com/deads2k)) +* The dynamic Kubelet config feature will now update config in the event of a ConfigMap mutation, which reduces the chance for silent config skew. Only name, namespace, and kubeletConfigKey may now be set in Node.Spec.ConfigSource.ConfigMap. The least disruptive pattern for config management is still to create a new ConfigMap and incrementally roll out a new Node.Spec.ConfigSource. ([#63221](https://github.com/kubernetes/kubernetes/pull/63221), [@mtaufen](https://github.com/mtaufen)) +* Graduate CRI container log rotation to beta, and enable it by default. ([#64046](https://github.com/kubernetes/kubernetes/pull/64046), [@yujuhong](https://github.com/yujuhong)) +* APIServices with kube-like versions (e.g. v1, v2beta1, etc.) will be sorted appropriately within each group. ([#64004](https://github.com/kubernetes/kubernetes/pull/64004), [@mbohlool](https://github.com/mbohlool)) +* kubectl and client-go now detects duplicated name for user, cluster and context when loading kubeconfig and reports error ([#60464](https://github.com/kubernetes/kubernetes/pull/60464), [@roycaihw](https://github.com/roycaihw)) +* event object references with apiversion will now report an apiversion. ([#63913](https://github.com/kubernetes/kubernetes/pull/63913), [@deads2k](https://github.com/deads2k)) +* Subresources for custom resources is now beta and enabled by default. With this, updates to the `/status` subresource will disallow updates to all fields other than `.status` (not just `.spec` and `.metadata` as before). Also, `required` can be used at the root of the CRD OpenAPI validation schema when the `/status` subresource is enabled. ([#63598](https://github.com/kubernetes/kubernetes/pull/63598), [@nikhita](https://github.com/nikhita)) +* increase grpc client default response size ([#63977](https://github.com/kubernetes/kubernetes/pull/63977), [@runcom](https://github.com/runcom)) +* HTTP transport now uses `context.Context` to cancel dial operations. k8s.io/client-go/transport/Config struct has been updated to accept a function with a `context.Context` parameter. This is a breaking change if you use this field in your code. ([#60012](https://github.com/kubernetes/kubernetes/pull/60012), [@ash2k](https://github.com/ash2k)) +* Adds a mechanism in vSphere Cloud Provider to get credentials from Kubernetes secrets ([#63902](https://github.com/kubernetes/kubernetes/pull/63902), [@abrarshivani](https://github.com/abrarshivani)) +* kubeadm: A `kubeadm config print-default` command has now been added that you can use as a starting point when writing your own kubeadm configuration files ([#63969](https://github.com/kubernetes/kubernetes/pull/63969), [@luxas](https://github.com/luxas)) +* Update event-exporter to version v0.2.0 that supports old (gke_container/gce_instance) and new (k8s_container/k8s_node/k8s_pod) stackdriver resources. ([#63918](https://github.com/kubernetes/kubernetes/pull/63918), [@cezarygerard](https://github.com/cezarygerard)) +* Cluster Autoscaler 1.2.2 (release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.2.2) ([#63974](https://github.com/kubernetes/kubernetes/pull/63974), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) +* Update kubeadm's minimum supported kubernetes in v1.11.x to 1.10 ([#63920](https://github.com/kubernetes/kubernetes/pull/63920), [@dixudx](https://github.com/dixudx)) +* Add 'UpdateStrategyType' and 'RollingUpdateStrategy' to 'kubectl describe sts' command output. ([#63844](https://github.com/kubernetes/kubernetes/pull/63844), [@tossmilestone](https://github.com/tossmilestone)) +* Remove UID mutation from request.context. ([#63957](https://github.com/kubernetes/kubernetes/pull/63957), [@hzxuzhonghu](https://github.com/hzxuzhonghu)) +* kubeadm has removed `.Etcd.SelfHosting` from its configuration API. It was never used in practice. ([#63871](https://github.com/kubernetes/kubernetes/pull/63871), [@luxas](https://github.com/luxas)) +* list/watch API requests with a fieldSelector that specifies `metadata.name` can now be authorized as requests for an individual named resource ([#63469](https://github.com/kubernetes/kubernetes/pull/63469), [@wojtek-t](https://github.com/wojtek-t)) +* Add a way to pass extra arguments to etcd. ([#63961](https://github.com/kubernetes/kubernetes/pull/63961), [@mborsz](https://github.com/mborsz)) +* minor fix for VolumeZoneChecker predicate, storageclass can be in annotation and spec. ([#63749](https://github.com/kubernetes/kubernetes/pull/63749), [@wenlxie](https://github.com/wenlxie)) +* vSphere Cloud Provider: add SAML token authentication support ([#63824](https://github.com/kubernetes/kubernetes/pull/63824), [@dougm](https://github.com/dougm)) +* adds the `kubeadm upgrade diff` command to show how static pod manifests will be changed by an upgrade. ([#63930](https://github.com/kubernetes/kubernetes/pull/63930), [@liztio](https://github.com/liztio)) +* Fix memory cgroup notifications, and reduce associated log spam. ([#63220](https://github.com/kubernetes/kubernetes/pull/63220), [@dashpole](https://github.com/dashpole)) +* Adds a `kubeadm config images pull` command to pull container images used by kubeadm. ([#63833](https://github.com/kubernetes/kubernetes/pull/63833), [@chuckha](https://github.com/chuckha)) +* Restores the pre-1.10 behavior of the openstack cloud provider which uses the instance name as the Kubernetes Node name. This requires instances be named with RFC-1123 compatible names. ([#63903](https://github.com/kubernetes/kubernetes/pull/63903), [@liggitt](https://github.com/liggitt)) +* Added support for NFS relations on kubernetes-worker charm. ([#63817](https://github.com/kubernetes/kubernetes/pull/63817), [@hyperbolic2346](https://github.com/hyperbolic2346)) +* Stop using InfluxDB as default cluster monitoring ([#62328](https://github.com/kubernetes/kubernetes/pull/62328), [@serathius](https://github.com/serathius)) + * InfluxDB cluster monitoring is deprecated and will be removed in v1.12 +* GCE: Fix to make the built-in `kubernetes` service properly point to the master's load balancer address in clusters that use multiple master VMs. ([#63696](https://github.com/kubernetes/kubernetes/pull/63696), [@grosskur](https://github.com/grosskur)) +* Kubernetes cluster on GCE have crictl installed now. Users can use it to help debug their node. The documentation of crictl can be found https://github.com/kubernetes-incubator/cri-tools/blob/master/docs/crictl.md. ([#63357](https://github.com/kubernetes/kubernetes/pull/63357), [@Random-Liu](https://github.com/Random-Liu)) +* The NodeRestriction admission plugin now prevents kubelets from modifying/removing taints applied to their Node API object. ([#63167](https://github.com/kubernetes/kubernetes/pull/63167), [@liggitt](https://github.com/liggitt)) +* The status of dynamic Kubelet config is now reported via Node.Status.Config, rather than the KubeletConfigOk node condition. ([#63314](https://github.com/kubernetes/kubernetes/pull/63314), [@mtaufen](https://github.com/mtaufen)) +* kubeadm now checks that IPv4/IPv6 forwarding is enabled ([#63872](https://github.com/kubernetes/kubernetes/pull/63872), [@kad](https://github.com/kad)) +* kubeadm will now deploy CoreDNS by default instead of KubeDNS ([#63509](https://github.com/kubernetes/kubernetes/pull/63509), [@detiber](https://github.com/detiber)) +* This PR will leverage subtests on the existing table tests for the scheduler units. ([#63658](https://github.com/kubernetes/kubernetes/pull/63658), [@xchapter7x](https://github.com/xchapter7x)) + * Some refactoring of error/status messages and functions to align with new approach. +* kubeadm upgrade now supports external etcd setups again ([#63495](https://github.com/kubernetes/kubernetes/pull/63495), [@detiber](https://github.com/detiber)) +* fix mount unmount failure for a Windows pod ([#63272](https://github.com/kubernetes/kubernetes/pull/63272), [@andyzhangx](https://github.com/andyzhangx)) +* CRI: update documents for container logpath. The container log path has been changed from containername_attempt#.log to containername/attempt#.log ([#62015](https://github.com/kubernetes/kubernetes/pull/62015), [@feiskyer](https://github.com/feiskyer)) +* Create a new `dryRun` query parameter for mutating endpoints. If the parameter is set, then the query will be rejected, as the feature is not implemented yet. This will allow forward compatibility with future clients; otherwise, future clients talking with older apiservers might end up modifying a resource even if they include the `dryRun` query parameter. ([#63557](https://github.com/kubernetes/kubernetes/pull/63557), [@apelisse](https://github.com/apelisse)) +* kubelet: fix hangs in updating Node status after network interruptions/changes between the kubelet and API server ([#63492](https://github.com/kubernetes/kubernetes/pull/63492), [@liggitt](https://github.com/liggitt)) +* The `PriorityClass` API is promoted to `scheduling.k8s.io/v1beta1` ([#63100](https://github.com/kubernetes/kubernetes/pull/63100), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) +* Services can listen on same host ports on different interfaces with --nodeport-addresses specified ([#62003](https://github.com/kubernetes/kubernetes/pull/62003), [@m1093782566](https://github.com/m1093782566)) +* kubeadm will no longer generate an unused etcd CA and certificates when configured to use an external etcd cluster. ([#63806](https://github.com/kubernetes/kubernetes/pull/63806), [@detiber](https://github.com/detiber)) +* corrects a race condition in bootstrapping aggregated cluster roles in new HA clusters ([#63761](https://github.com/kubernetes/kubernetes/pull/63761), [@liggitt](https://github.com/liggitt)) +* Adding initial Korean translation for kubectl ([#62040](https://github.com/kubernetes/kubernetes/pull/62040), [@ianychoi](https://github.com/ianychoi)) +* Report node DNS info with --node-ip flag ([#63170](https://github.com/kubernetes/kubernetes/pull/63170), [@micahhausler](https://github.com/micahhausler)) +* The old dynamic client has been replaced by a new one. The previous dynamic client will exist for one release in `client-go/deprecated-dynamic`. Switch as soon as possible. ([#63446](https://github.com/kubernetes/kubernetes/pull/63446), [@deads2k](https://github.com/deads2k)) +* CustomResourceDefinitions Status subresource now supports GET and PATCH ([#63619](https://github.com/kubernetes/kubernetes/pull/63619), [@roycaihw](https://github.com/roycaihw)) +* Re-enable nodeipam controller for external clouds. ([#63049](https://github.com/kubernetes/kubernetes/pull/63049), [@andrewsykim](https://github.com/andrewsykim)) +* Removes a preflight check for kubeadm that validated custom kube-apiserver, kube-controller-manager and kube-scheduler arguments. ([#63673](https://github.com/kubernetes/kubernetes/pull/63673), [@chuckha](https://github.com/chuckha)) +* Adds a list-images subcommand to kubeadm that lists required images for a kubeadm install. ([#63450](https://github.com/kubernetes/kubernetes/pull/63450), [@chuckha](https://github.com/chuckha)) +* Apply pod name and namespace labels to pod cgroup in cAdvisor metrics ([#63406](https://github.com/kubernetes/kubernetes/pull/63406), [@derekwaynecarr](https://github.com/derekwaynecarr)) +* try to read openstack auth config from client config and fall back to read from the environment variables if not available ([#60200](https://github.com/kubernetes/kubernetes/pull/60200), [@dixudx](https://github.com/dixudx)) +* GC is now bound by QPS (it wasn't before) and so if you need more QPS to avoid ratelimiting GC, you'll have to set it. ([#63657](https://github.com/kubernetes/kubernetes/pull/63657), [@shyamjvs](https://github.com/shyamjvs)) +* The Kubelet's deprecated --allow-privileged flag now defaults to true. This enables users to stop setting --allow-privileged in order to transition to PodSecurityPolicy. Previously, users had to continue setting --allow-privileged, because the default was false. ([#63442](https://github.com/kubernetes/kubernetes/pull/63442), [@mtaufen](https://github.com/mtaufen)) +* You must now specify Node.Spec.ConfigSource.ConfigMap.KubeletConfigKey when using dynamic Kubelet config to tell the Kubelet which key of the ConfigMap identifies its config file. ([#59847](https://github.com/kubernetes/kubernetes/pull/59847), [@mtaufen](https://github.com/mtaufen)) +* Kubernetes version command line parameter in kubeadm has been updated to drop an unnecessary redirection from ci/latest.txt to ci-cross/latest.txt. Users should know exactly where the builds are stored on Google Cloud storage buckets from now on. For example for 1.9 and 1.10, users can specify ci/latest-1.9 and ci/latest-1.10 as the CI build jobs what build images correctly updates those. The CI jobs for master update the ci-cross/latest location, so if you are looking for latest master builds, then the correct parameter to use would be ci-cross/latest. ([#63504](https://github.com/kubernetes/kubernetes/pull/63504), [@dims](https://github.com/dims)) +* Search standard KubeConfig file locations when using `kubeadm token` without `--kubeconfig`. ([#62850](https://github.com/kubernetes/kubernetes/pull/62850), [@neolit123](https://github.com/neolit123)) +* Include the list of security groups when failing with the errors that more then one is tagged ([#58874](https://github.com/kubernetes/kubernetes/pull/58874), [@sorenmat](https://github.com/sorenmat)) +* Allow "required" to be used at the CRD OpenAPI validation schema when the /status subresource is enabled. ([#63533](https://github.com/kubernetes/kubernetes/pull/63533), [@sttts](https://github.com/sttts)) +* When updating /status subresource of a custom resource, only the value at the `.status` subpath for the update is considered. ([#63385](https://github.com/kubernetes/kubernetes/pull/63385), [@CaoShuFeng](https://github.com/CaoShuFeng)) +* Supported nodeSelector.matchFields (node's `metadata.node`) in scheduler. ([#62453](https://github.com/kubernetes/kubernetes/pull/62453), [@k82cn](https://github.com/k82cn)) +* Do not check vmSetName when getting Azure node's IP ([#63541](https://github.com/kubernetes/kubernetes/pull/63541), [@feiskyer](https://github.com/feiskyer)) +* Fix stackdriver metrics for node memory using wrong metric type ([#63535](https://github.com/kubernetes/kubernetes/pull/63535), [@serathius](https://github.com/serathius)) +* [fluentd-gcp addon] Use the logging agent's node name as the metadata agent URL. ([#63353](https://github.com/kubernetes/kubernetes/pull/63353), [@bmoyles0117](https://github.com/bmoyles0117)) +* `kubectl cp` supports completion. ([#60371](https://github.com/kubernetes/kubernetes/pull/60371), [@superbrothers](https://github.com/superbrothers)) +* Azure VMSS: support VM names to contain the `_` character ([#63526](https://github.com/kubernetes/kubernetes/pull/63526), [@djsly](https://github.com/djsly)) +* OpenStack built-in cloud provider is now deprecated. Please use the external cloud provider for OpenStack. ([#63524](https://github.com/kubernetes/kubernetes/pull/63524), [@dims](https://github.com/dims)) +* the shortcuts which were moved server-side in at least 1.9 have been removed from being hardcoded in kubectl ([#63507](https://github.com/kubernetes/kubernetes/pull/63507), [@deads2k](https://github.com/deads2k)) +* Fixes fake client generation for non-namespaced subresources ([#60445](https://github.com/kubernetes/kubernetes/pull/60445), [@jhorwit2](https://github.com/jhorwit2)) +* `kubectl delete` with selection criteria defaults to ignoring not found errors ([#63490](https://github.com/kubernetes/kubernetes/pull/63490), [@deads2k](https://github.com/deads2k)) +* Increase scheduler cache generation number monotonically in order to avoid collision and use of stale information in scheduler. ([#63264](https://github.com/kubernetes/kubernetes/pull/63264), [@bsalamat](https://github.com/bsalamat)) +* Fixes issue where subpath readOnly mounts failed ([#63045](https://github.com/kubernetes/kubernetes/pull/63045), [@msau42](https://github.com/msau42)) +* Update to use go1.10.2 ([#63412](https://github.com/kubernetes/kubernetes/pull/63412), [@praseodym](https://github.com/praseodym)) +* `kubectl create [secret | configmap] --from-file` now works on Windows with fully-qualified paths ([#63439](https://github.com/kubernetes/kubernetes/pull/63439), [@liggitt](https://github.com/liggitt)) +* kube-apiserver: the default `--endpoint-reconciler-type` is now `lease`. The `master-count` endpoint reconciler type is deprecated and will be removed in 1.13. ([#63383](https://github.com/kubernetes/kubernetes/pull/63383), [@liggitt](https://github.com/liggitt)) +* owner references can be set during creation without deletion power ([#63403](https://github.com/kubernetes/kubernetes/pull/63403), [@deads2k](https://github.com/deads2k)) +* Lays groundwork for OIDC distributed claims handling in the apiserver authentication token checker. ([#63213](https://github.com/kubernetes/kubernetes/pull/63213), [@filmil](https://github.com/filmil)) + * A distributed claim allows the OIDC provider to delegate a claim to a + * separate URL. Distributed claims are of the form as seen below, and are + * defined in the OIDC Connect Core 1.0, section 5.6.2. + * For details, see: + * http://openid.net/specs/openid-connect-core-1_0.html#AggregatedDistributedClaims +* Use /usr/bin/env in all script shebangs to increase portability. ([#62657](https://github.com/kubernetes/kubernetes/pull/62657), [@matthyx](https://github.com/matthyx)) + + + # v1.11.0-alpha.2 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) From e680780424a3f9f4cbc15f5b0b6372199c411363 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Thu, 31 May 2018 20:13:00 -0400 Subject: [PATCH 307/307] add debugging for aggregator flake --- test/e2e/apimachinery/aggregator.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index a1139655e41..30e6cb22b52 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -319,7 +319,16 @@ func TestSampleAPIServer(f *framework.Framework, image string) { }) framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.k8s.io", namespace) + var ( + currentAPIService *apiregistrationv1beta1.APIService + currentPods *v1.PodList + ) + err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) { + + currentAPIService, _ = aggrclient.ApiregistrationV1beta1().APIServices().Get("v1alpha1.wardle.k8s.io", metav1.GetOptions{}) + currentPods, _ = client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + request := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders") request.SetHeader("Accept", "application/json") _, err := request.DoRaw() @@ -338,6 +347,22 @@ func TestSampleAPIServer(f *framework.Framework, image string) { } return true, nil }) + if err != nil { + currentAPIServiceJSON, _ := json.Marshal(currentAPIService) + framework.Logf("current APIService: %s", string(currentAPIServiceJSON)) + + currentPodsJSON, _ := json.Marshal(currentPods) + framework.Logf("current pods: %s", string(currentPodsJSON)) + + if currentPods != nil { + for _, pod := range currentPods.Items { + for _, container := range pod.Spec.Containers { + logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name) + framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs) + } + } + } + } framework.ExpectNoError(err, "gave up waiting for apiservice wardle to come up successfully") flunderName := generateFlunderName("rest-flunder")