Merge pull request #78780 from spiffxp/name-test-e2e-api-imports
rename test/e2e{,_node} api imports to groupversion
This commit is contained in:
@@ -1,3 +1,49 @@
|
||||
{
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1"
|
||||
}
|
||||
"k8s.io/api/admissionregistration/v1beta1": "admissionregistrationv1beta1",
|
||||
"k8s.io/api/admission/v1beta1": "admissionv1beta1",
|
||||
"k8s.io/api/apps/v1": "appsv1",
|
||||
"k8s.io/api/apps/v1beta1": "appsv1beta1",
|
||||
"k8s.io/api/apps/v1beta2": "appsv1beta2",
|
||||
"k8s.io/api/auditregistration/v1alpha1": "auditregistrationv1alpha1",
|
||||
"k8s.io/api/authentication/v1": "authenticationv1",
|
||||
"k8s.io/api/authentication/v1beta1": "authenticationv1beta1",
|
||||
"k8s.io/api/authorization/v1": "authorizationv1",
|
||||
"k8s.io/api/authorization/v1beta1": "authorizationv1beta1",
|
||||
"k8s.io/api/autoscaling/v1": "autoscalingv1",
|
||||
"k8s.io/api/batch/v1": "batchv1",
|
||||
"k8s.io/api/batch/v1beta1": "batchv1beta1",
|
||||
"k8s.io/api/certificates/v1beta1": "certificatesv1beta1",
|
||||
"k8s.io/api/coordination/v1": "coordinationv1",
|
||||
"k8s.io/api/coordination/v1beta1": "coordinationv1beta1",
|
||||
"k8s.io/api/core/v1": "v1",
|
||||
"k8s.io/api/events/v1beta1": "eventsv1beta1",
|
||||
"k8s.io/api/extensions/v1beta1": "extensionsv1beta1",
|
||||
"k8s.io/api/imagepolicy/v1alpha1": "imagepolicyv1alpha1",
|
||||
"k8s.io/api/networking/v1": "networkingv1",
|
||||
"k8s.io/api/networking/v1beta1": "networkingv1beta1",
|
||||
"k8s.io/api/node/v1alpha1": "nodev1alpha1",
|
||||
"k8s.io/api/node/v1beta1": "nodev1beta1",
|
||||
"k8s.io/api/policy/v1beta1": "policyv1beta1",
|
||||
"k8s.io/api/rbac/v1": "rbacv1",
|
||||
"k8s.io/api/rbac/v1alpha1": "rbacv1alpha1",
|
||||
"k8s.io/api/rbac/v1beta1": "rbacv1beta1",
|
||||
"k8s.io/api/scheduling/v1": "schedulingv1",
|
||||
"k8s.io/api/scheduling/v1alpha1": "schedulingv1alpha1",
|
||||
"k8s.io/api/scheduling/v1beta1": "schedulingv1beta1",
|
||||
"k8s.io/api/settings/v1alpha1": "settingsv1alpha1",
|
||||
"k8s.io/api/storage/v1": "storagev1",
|
||||
"k8s.io/api/storage/v1alpha1": "storagev1alpha1",
|
||||
"k8s.io/api/storage/v1beta1": "storagev1beta1",
|
||||
"k8s.io/kubernetes/pkg/controller/apis/config/v1alpha1": "controllerconfigv1alpha1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1": "kubeletconfigv1beta1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha": "kubeletdevicepluginv1alpha",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1": "kubeletdevicepluginv1beta1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1": "kubeletpluginregistrationv1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1": "kubeletpluginregistrationv1alpha1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1beta1": "kubeletpluginregistrationv1beta1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1": "kubeletpodresourcesv1alpha1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1": "kubeletresourcemetricsv1alpha1",
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
||||
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1"
|
||||
}
|
@@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -238,18 +238,18 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
},
|
||||
},
|
||||
}
|
||||
d := &apps.Deployment{
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -278,18 +278,18 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
|
||||
Image: image,
|
||||
},
|
||||
}
|
||||
d := &apps.Deployment{
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentCRDName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -113,17 +113,17 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
|
||||
}
|
||||
}
|
||||
|
||||
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *apps.Deployment {
|
||||
func newOwnerDeployment(f *framework.Framework, deploymentName string, labels map[string]string) *appsv1.Deployment {
|
||||
replicas := int32(2)
|
||||
return &apps.Deployment{
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labels},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpec(labels),
|
||||
},
|
||||
|
@@ -18,12 +18,8 @@ package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/admissionregistration/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
@@ -44,6 +40,9 @@ import (
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@@ -234,8 +233,8 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("Should honor timeout", func() {
|
||||
policyFail := v1beta1.Fail
|
||||
policyIgnore := v1beta1.Ignore
|
||||
policyFail := admissionregistrationv1beta1.Fail
|
||||
policyIgnore := admissionregistrationv1beta1.Ignore
|
||||
|
||||
ginkgo.By("Setting timeout (1s) shorter than webhook latency (5s)")
|
||||
slowWebhookCleanup := registerSlowWebhook(f, context, &policyFail, pointer.Int32Ptr(1))
|
||||
@@ -351,18 +350,18 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
|
||||
Image: image,
|
||||
},
|
||||
}
|
||||
d := &apps.Deployment{
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -422,26 +421,26 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
configName := webhookConfigName
|
||||
// A webhook that cannot talk to server, with fail-open policy
|
||||
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
|
||||
policyIgnore := v1beta1.Ignore
|
||||
policyIgnore := admissionregistrationv1beta1.Ignore
|
||||
failOpenHook.FailurePolicy = &policyIgnore
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "deny-unwanted-pod-container-name-and-label.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/pods"),
|
||||
@@ -452,9 +451,9 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
},
|
||||
{
|
||||
Name: "deny-unwanted-configmap-data.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update, v1beta1.Delete},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create, admissionregistrationv1beta1.Update, admissionregistrationv1beta1.Delete},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
@@ -470,8 +469,8 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/configmaps"),
|
||||
@@ -503,26 +502,26 @@ func registerWebhookForAttachingPod(f *framework.Framework, context *certContext
|
||||
configName := attachingPodWebhookConfigName
|
||||
// A webhook that cannot talk to server, with fail-open policy
|
||||
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
|
||||
policyIgnore := v1beta1.Ignore
|
||||
policyIgnore := admissionregistrationv1beta1.Ignore
|
||||
failOpenHook.FailurePolicy = &policyIgnore
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "deny-attaching-pod.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Connect},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Connect},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods/attach"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/pods/attach"),
|
||||
@@ -550,23 +549,23 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo
|
||||
namespace := f.Namespace.Name
|
||||
configName := mutatingWebhookConfigName
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionregistrationv1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.MutatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.MutatingWebhook{
|
||||
{
|
||||
Name: "adding-configmap-data-stage-1.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-configmaps"),
|
||||
@@ -577,16 +576,16 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo
|
||||
},
|
||||
{
|
||||
Name: "adding-configmap-data-stage-2.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-configmaps"),
|
||||
@@ -627,23 +626,23 @@ func registerMutatingWebhookForPod(f *framework.Framework, context *certContext)
|
||||
namespace := f.Namespace.Name
|
||||
configName := podMutatingWebhookConfigName
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionregistrationv1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.MutatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.MutatingWebhook{
|
||||
{
|
||||
Name: "adding-init-container.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-pods"),
|
||||
@@ -834,19 +833,19 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
||||
|
||||
// failingWebhook returns a webhook with rule of create configmaps,
|
||||
// but with an invalid client config so that server cannot communicate with it
|
||||
func failingWebhook(namespace, name string) v1beta1.ValidatingWebhook {
|
||||
return v1beta1.ValidatingWebhook{
|
||||
func failingWebhook(namespace, name string) admissionregistrationv1beta1.ValidatingWebhook {
|
||||
return admissionregistrationv1beta1.ValidatingWebhook{
|
||||
Name: name,
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/configmaps"),
|
||||
@@ -865,7 +864,7 @@ func registerFailClosedWebhook(f *framework.Framework, context *certContext) fun
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookFailClosedConfigName
|
||||
// A webhook that cannot talk to server, with fail-closed policy
|
||||
policyFail := v1beta1.Fail
|
||||
policyFail := admissionregistrationv1beta1.Fail
|
||||
hook := failingWebhook(namespace, "fail-closed.k8s.io")
|
||||
hook.FailurePolicy = &policyFail
|
||||
hook.NamespaceSelector = &metav1.LabelSelector{
|
||||
@@ -878,11 +877,11 @@ func registerFailClosedWebhook(f *framework.Framework, context *certContext) fun
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
// Server cannot talk to this webhook, so it always fails.
|
||||
// Because this webhook is configured fail-closed, request should be rejected after the call fails.
|
||||
hook,
|
||||
@@ -929,21 +928,21 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := validatingWebhookForWebhooksConfigName
|
||||
failurePolicy := v1beta1.Fail
|
||||
failurePolicy := admissionregistrationv1beta1.Fail
|
||||
|
||||
// This webhook denies all requests to Delete validating webhook configuration and
|
||||
// mutating webhook configuration objects. It should never be called, however, because
|
||||
// dynamic admission webhooks should not be called on requests involving webhook configuration objects.
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "deny-webhook-configuration-deletions.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Delete},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Delete},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{"admissionregistration.k8s.io"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{
|
||||
@@ -952,8 +951,8 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
|
||||
},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/always-deny"),
|
||||
@@ -982,21 +981,21 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := mutatingWebhookForWebhooksConfigName
|
||||
failurePolicy := v1beta1.Fail
|
||||
failurePolicy := admissionregistrationv1beta1.Fail
|
||||
|
||||
// This webhook adds a label to all requests create to validating webhook configuration and
|
||||
// mutating webhook configuration objects. It should never be called, however, because
|
||||
// dynamic admission webhooks should not be called on requests involving webhook configuration objects.
|
||||
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionregistrationv1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.MutatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.MutatingWebhook{
|
||||
{
|
||||
Name: "add-label-to-webhook-configurations.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{"admissionregistration.k8s.io"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{
|
||||
@@ -1005,8 +1004,8 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
|
||||
},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/add-label"),
|
||||
@@ -1037,26 +1036,26 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework) {
|
||||
ginkgo.By("Creating a dummy validating-webhook-configuration object")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
failurePolicy := v1beta1.Ignore
|
||||
failurePolicy := admissionregistrationv1beta1.Ignore
|
||||
|
||||
mutatedValidatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
mutatedValidatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dummyValidatingWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "dummy-validating-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
// This will not match any real resources so this webhook should never be called.
|
||||
Rule: v1beta1.Rule{
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"invalid"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
// This path not recognized by the webhook service,
|
||||
@@ -1087,24 +1086,24 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Creating a dummy mutating-webhook-configuration object")
|
||||
|
||||
mutatedMutatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
mutatedMutatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionregistrationv1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dummyMutatingWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.MutatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.MutatingWebhook{
|
||||
{
|
||||
Name: "dummy-mutating-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
// This will not match any real resources so this webhook should never be called.
|
||||
Rule: v1beta1.Rule{
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"invalid"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
// This path not recognized by the webhook service,
|
||||
@@ -1295,23 +1294,23 @@ func registerWebhookForCustomResource(f *framework.Framework, context *certConte
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crWebhookConfigName
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "deny-unwanted-custom-resource-data.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update, v1beta1.Delete},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create, admissionregistrationv1beta1.Update, admissionregistrationv1beta1.Delete},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{testcrd.Crd.Spec.Group},
|
||||
APIVersions: servedAPIVersions(testcrd.Crd),
|
||||
Resources: []string{testcrd.Crd.Spec.Names.Plural},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/custom-resource"),
|
||||
@@ -1337,23 +1336,23 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := f.UniqueName
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&admissionregistrationv1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.MutatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.MutatingWebhook{
|
||||
{
|
||||
Name: "mutate-custom-resource-data-stage-1.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create, admissionregistrationv1beta1.Update},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{testcrd.Crd.Spec.Group},
|
||||
APIVersions: servedAPIVersions(testcrd.Crd),
|
||||
Resources: []string{testcrd.Crd.Spec.Names.Plural},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-custom-resource"),
|
||||
@@ -1364,16 +1363,16 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
||||
},
|
||||
{
|
||||
Name: "mutate-custom-resource-data-stage-2.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{testcrd.Crd.Spec.Group},
|
||||
APIVersions: servedAPIVersions(testcrd.Crd),
|
||||
Resources: []string{testcrd.Crd.Spec.Names.Plural},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-custom-resource"),
|
||||
@@ -1532,23 +1531,23 @@ func registerValidatingWebhookForCRD(f *framework.Framework, context *certContex
|
||||
// label "webhook-e2e-test":"webhook-disallow"
|
||||
// NOTE: Because tests are run in parallel and in an unpredictable order, it is critical
|
||||
// that no other test attempts to create CRD with that label.
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "deny-crd-with-unwanted-label.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{"apiextensions.k8s.io"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{"customresourcedefinitions"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/crd"),
|
||||
@@ -1621,7 +1620,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
|
||||
func registerSlowWebhook(f *framework.Framework, context *certContext, policy *v1beta1.FailurePolicyType, timeout *int32) func() {
|
||||
func registerSlowWebhook(f *framework.Framework, context *certContext, policy *admissionregistrationv1beta1.FailurePolicyType, timeout *int32) func() {
|
||||
client := f.ClientSet
|
||||
ginkgo.By("Registering slow webhook via the AdmissionRegistration API")
|
||||
|
||||
@@ -1638,23 +1637,23 @@ func registerSlowWebhook(f *framework.Framework, context *certContext, policy *v
|
||||
_, err = client.CoreV1().Namespaces().Update(ns)
|
||||
framework.ExpectNoError(err, "error labeling namespace %s", namespace)
|
||||
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.ValidatingWebhook{
|
||||
Webhooks: []admissionregistrationv1beta1.ValidatingWebhook{
|
||||
{
|
||||
Name: "allow-configmap-with-delay-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
Rules: []admissionregistrationv1beta1.RuleWithOperations{{
|
||||
Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create},
|
||||
Rule: admissionregistrationv1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{
|
||||
Service: &admissionregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/always-allow-delay-5s"),
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -280,7 +280,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
e2elog.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
|
||||
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -293,7 +293,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
firstHash := first.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
gomega.Expect(first.Revision).To(gomega.Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
|
||||
@@ -316,7 +316,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
|
||||
gomega.Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash))
|
||||
gomega.Expect(cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]).NotTo(gomega.Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
})
|
||||
|
||||
@@ -329,7 +329,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
e2elog.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -342,7 +342,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
hash := cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
|
||||
@@ -371,7 +371,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
hash = cur.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
})
|
||||
@@ -387,7 +387,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
e2elog.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -398,7 +398,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
e2elog.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -430,7 +430,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
gomega.Expect(len(newPods)).NotTo(gomega.Equal(0))
|
||||
|
||||
e2elog.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -456,12 +456,12 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
|
||||
return &apps.DaemonSet{
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
@@ -576,7 +576,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -615,14 +615,14 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
|
||||
func schedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
@@ -649,7 +649,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
|
||||
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
@@ -661,7 +661,7 @@ func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
|
||||
return fit
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
@@ -677,7 +677,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -718,7 +718,7 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
||||
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
podHash := pod.Labels[appsv1.DefaultDaemonSetUniqueLabelKey]
|
||||
gomega.Expect(len(podHash)).To(gomega.BeNumerically(">", 0))
|
||||
if len(hash) > 0 {
|
||||
gomega.Expect(podHash).To(gomega.Equal(hash))
|
||||
@@ -744,7 +744,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
||||
framework.ExpectNoError(err, "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
|
||||
@@ -753,13 +753,13 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri
|
||||
return historyList
|
||||
}
|
||||
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *apps.ControllerRevision {
|
||||
var curHistory *apps.ControllerRevision
|
||||
func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet) *appsv1.ControllerRevision {
|
||||
var curHistory *appsv1.ControllerRevision
|
||||
foundCurHistories := 0
|
||||
for i := range historyList.Items {
|
||||
history := &historyList.Items[i]
|
||||
// Every history should have the hash label
|
||||
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
|
||||
gomega.Expect(len(history.Labels[appsv1.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
framework.ExpectNoError(err)
|
||||
if match {
|
||||
|
@@ -25,9 +25,9 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -53,7 +53,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
nilRs *apps.ReplicaSet
|
||||
nilRs *appsv1.ReplicaSet
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Deployment", func() {
|
||||
@@ -182,11 +182,11 @@ func intOrStrP(num int) *intstr.IntOrString {
|
||||
return &intstr
|
||||
}
|
||||
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
|
||||
return &extensions.DeploymentRollback{
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensionsv1beta1.DeploymentRollback {
|
||||
return &extensionsv1beta1.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: annotations,
|
||||
RollbackTo: extensions.RollbackConfig{Revision: revision},
|
||||
RollbackTo: extensionsv1beta1.RollbackConfig{Revision: revision},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,7 +234,7 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
e2elog.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -281,7 +281,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -310,7 +310,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, appsv1.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -325,7 +325,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
@@ -396,7 +396,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -436,10 +436,10 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
MaxSurge: intOrStrP(1),
|
||||
}
|
||||
@@ -469,7 +469,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
e2elog.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
@@ -499,7 +499,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
func ensureReplicas(rs *apps.ReplicaSet, replicas int32) {
|
||||
func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
|
||||
gomega.Expect(*rs.Spec.Replicas).Should(gomega.Equal(replicas))
|
||||
gomega.Expect(rs.Status.Replicas).Should(gomega.Equal(replicas))
|
||||
}
|
||||
@@ -519,7 +519,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
|
||||
e2elog.Logf("Creating deployment %s", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
@@ -542,7 +542,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
|
||||
deployment, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
deployment, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
@@ -645,7 +645,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func randomScale(d *apps.Deployment, i int) {
|
||||
func randomScale(d *appsv1.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
e2elog.Logf("%02d: scaling up", i)
|
||||
@@ -670,7 +670,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx"
|
||||
thirty := int32(30)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
@@ -688,7 +688,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
e2elog.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
@@ -698,18 +698,18 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
e2elog.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
if update.Annotations == nil {
|
||||
update.Annotations = make(map[string]string)
|
||||
}
|
||||
update.Annotations[apps.DeprecatedRollbackTo] = "0"
|
||||
update.Annotations[appsv1.DeprecatedRollbackTo] = "0"
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
e2elog.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -718,14 +718,14 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
e2elog.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
@@ -762,7 +762,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
}
|
||||
@@ -776,7 +776,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)
|
||||
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, appsv1.DeploymentProgressing)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@@ -788,7 +788,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
e2elog.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
@@ -815,7 +815,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
@@ -845,8 +845,8 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx-deployment"
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment)
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
@@ -873,7 +873,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -938,7 +938,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
e2elog.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -986,7 +986,7 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
|
||||
}
|
||||
}
|
||||
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *apps.ReplicaSetList {
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
@@ -995,7 +995,7 @@ func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[strin
|
||||
return rsList
|
||||
}
|
||||
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *apps.Deployment) error {
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
|
@@ -23,9 +23,9 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -168,7 +168,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
pod, err := locateRunningPod(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e := &policy.Eviction{
|
||||
e := &policyv1beta1.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: ns,
|
||||
@@ -208,7 +208,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
waitForPodsOrDie(cs, ns, 3) // make sure that they are running and so would be evictable with a different pdb
|
||||
e := &policy.Eviction{
|
||||
e := &policyv1beta1.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: ns,
|
||||
@@ -228,12 +228,12 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
})
|
||||
|
||||
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
pdb := policyv1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Spec: policyv1beta1.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
@@ -244,12 +244,12 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
|
||||
}
|
||||
|
||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
pdb := policyv1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Spec: policyv1beta1.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
@@ -340,12 +340,12 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
|
||||
}
|
||||
}
|
||||
|
||||
rs := &apps.ReplicaSet{
|
||||
rs := &appsv1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rs",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Spec: appsv1.ReplicaSetSpec{
|
||||
Replicas: &size,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -38,14 +38,14 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *apps.ReplicaSet {
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *appsv1.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &apps.ReplicaSet{
|
||||
return &appsv1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Spec: appsv1.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: rsPodLabels,
|
||||
},
|
||||
@@ -220,7 +220,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
|
||||
cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure)
|
||||
return cond != nil, nil
|
||||
|
||||
})
|
||||
@@ -230,7 +230,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
||||
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
@@ -250,7 +250,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
|
||||
cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure)
|
||||
return cond == nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
klabels "k8s.io/apimachinery/pkg/labels"
|
||||
@@ -71,7 +71,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
headlessSvcName := "test"
|
||||
var statefulPodMounts, podMounts []v1.VolumeMount
|
||||
var ss *apps.StatefulSet
|
||||
var ss *appsv1.StatefulSet
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
statefulPodMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
@@ -285,10 +285,10 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
return &apps.RollingUpdateStatefulSetStrategy{
|
||||
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
return &appsv1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: func() *int32 {
|
||||
i := int32(3)
|
||||
return &i
|
||||
@@ -305,11 +305,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to currentRevision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
newImage := NewNginxImage
|
||||
@@ -317,7 +317,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -336,30 +336,30 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
|
||||
ginkgo.By("Performing a canary update")
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
return &apps.RollingUpdateStatefulSetStrategy{
|
||||
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
return &appsv1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: func() *int32 {
|
||||
i := int32(2)
|
||||
return &i
|
||||
}()}
|
||||
}(),
|
||||
}
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
return &apps.RollingUpdateStatefulSetStrategy{
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
return &appsv1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: func() *int32 {
|
||||
i := int32(2)
|
||||
return &i
|
||||
@@ -377,11 +377,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
} else {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
@@ -390,11 +390,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
}
|
||||
@@ -413,11 +413,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
} else {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
@@ -426,23 +426,23 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("Performing a phased rolling update")
|
||||
for i := int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) - 1; i >= 0; i-- {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *appsv1.RollingUpdateStatefulSetStrategy {
|
||||
j := int32(i)
|
||||
return &apps.RollingUpdateStatefulSetStrategy{
|
||||
return &appsv1.RollingUpdateStatefulSetStrategy{
|
||||
Partition: &j,
|
||||
}
|
||||
}(),
|
||||
@@ -458,11 +458,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
} else {
|
||||
gomega.Expect(pods.Items[i].Spec.Containers[0].Image).To(gomega.Equal(newImage),
|
||||
@@ -471,11 +471,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to new revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
}
|
||||
@@ -496,8 +496,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.OnDeleteStatefulSetStrategyType,
|
||||
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
|
||||
Type: appsv1.OnDeleteStatefulSetStrategyType,
|
||||
}
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -509,11 +509,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
|
||||
@@ -525,11 +525,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss = sst.GetStatefulSet(ss.Namespace, ss.Name)
|
||||
pods = sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
newImage := NewNginxImage
|
||||
@@ -537,7 +537,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating stateful set template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -562,11 +562,11 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s has revision %s not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
})
|
||||
@@ -666,7 +666,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
|
||||
ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
@@ -885,7 +885,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
|
||||
}
|
||||
|
||||
type statefulPodTester interface {
|
||||
deploy(ns string) *apps.StatefulSet
|
||||
deploy(ns string) *appsv1.StatefulSet
|
||||
write(statefulPodIndex int, kv map[string]string)
|
||||
read(statefulPodIndex int, key string) string
|
||||
name() string
|
||||
@@ -922,7 +922,7 @@ func (c *clusterAppTester) run() {
|
||||
}
|
||||
|
||||
type zookeeperTester struct {
|
||||
ss *apps.StatefulSet
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
@@ -930,7 +930,7 @@ func (z *zookeeperTester) name() string {
|
||||
return "zookeeper"
|
||||
}
|
||||
|
||||
func (z *zookeeperTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (z *zookeeperTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
z.ss = z.tester.CreateStatefulSet(zookeeperManifestPath, ns)
|
||||
return z.ss
|
||||
}
|
||||
@@ -952,7 +952,7 @@ func (z *zookeeperTester) read(statefulPodIndex int, key string) string {
|
||||
}
|
||||
|
||||
type mysqlGaleraTester struct {
|
||||
ss *apps.StatefulSet
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
@@ -968,7 +968,7 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
|
||||
return kubectlExecWithRetries(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns)
|
||||
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
|
||||
@@ -995,7 +995,7 @@ func (m *mysqlGaleraTester) read(statefulPodIndex int, key string) string {
|
||||
}
|
||||
|
||||
type redisTester struct {
|
||||
ss *apps.StatefulSet
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
@@ -1008,7 +1008,7 @@ func (m *redisTester) redisExec(cmd, ns, podName string) string {
|
||||
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (m *redisTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
m.ss = m.tester.CreateStatefulSet(redisManifestPath, ns)
|
||||
return m.ss
|
||||
}
|
||||
@@ -1026,7 +1026,7 @@ func (m *redisTester) read(statefulPodIndex int, key string) string {
|
||||
}
|
||||
|
||||
type cockroachDBTester struct {
|
||||
ss *apps.StatefulSet
|
||||
ss *appsv1.StatefulSet
|
||||
tester *framework.StatefulSetTester
|
||||
}
|
||||
|
||||
@@ -1039,7 +1039,7 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
|
||||
return framework.RunKubectlOrDie(fmt.Sprintf("--namespace=%v", ns), "exec", podName, "--", "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (c *cockroachDBTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
|
||||
c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns)
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
@@ -1087,7 +1087,7 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
|
||||
|
||||
// This function is used by two tests to test StatefulSet rollbacks: one using
|
||||
// PVCs and one using no storage.
|
||||
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHTTPProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
@@ -1100,11 +1100,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
@@ -1116,7 +1116,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
gomega.Expect(oldImage).NotTo(gomega.Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -1147,11 +1147,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
|
||||
@@ -1161,7 +1161,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *appsv1.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -1192,11 +1192,11 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
gomega.Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(gomega.Equal(priorRevision),
|
||||
gomega.Expect(pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel]).To(gomega.Equal(priorRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel],
|
||||
priorRevision))
|
||||
}
|
||||
}
|
||||
|
@@ -22,8 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
@@ -68,18 +68,18 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch pods.", func() {
|
||||
pod := &apiv1.Pod{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-pod",
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
}
|
||||
updatePod := func(pod *apiv1.Pod) {}
|
||||
updatePod := func(pod *v1.Pod) {}
|
||||
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch deployments.", func() {
|
||||
podLabels := map[string]string{"name": "audit-deployment-pod"}
|
||||
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), appsv1.RecreateDeploymentStrategyType)
|
||||
|
||||
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "failed to create audit-deployment")
|
||||
@@ -329,7 +329,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch configmaps.", func() {
|
||||
configMap := &apiv1.ConfigMap{
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-configmap",
|
||||
},
|
||||
@@ -462,7 +462,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should audit API calls to create, get, update, patch, delete, list, watch secrets.", func() {
|
||||
secret := &apiv1.Secret{
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-secret",
|
||||
},
|
||||
|
@@ -23,8 +23,8 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -59,26 +59,26 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
anonymousClient, err := clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err, "failed to create the anonymous client")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Namespaces().Create(&apiv1.Namespace{
|
||||
_, err = f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit",
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to create namespace")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace).Create(&apiv1.Pod{
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-proxy",
|
||||
Labels: map[string]string{
|
||||
"app": "audit",
|
||||
},
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "proxy",
|
||||
Image: imageutils.GetE2EImage(imageutils.AuditProxy),
|
||||
Ports: []apiv1.ContainerPort{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8080,
|
||||
},
|
||||
@@ -89,12 +89,12 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to create proxy pod")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace).Create(&apiv1.Service{
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit",
|
||||
},
|
||||
Spec: apiv1.ServiceSpec{
|
||||
Ports: []apiv1.ServicePort{
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: 8080},
|
||||
@@ -131,22 +131,22 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
|
||||
podURL := fmt.Sprintf("http://%s:8080", podIP)
|
||||
// create audit sink
|
||||
sink := auditregv1alpha1.AuditSink{
|
||||
sink := auditregistrationv1alpha1.AuditSink{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: auditregv1alpha1.AuditSinkSpec{
|
||||
Policy: auditregv1alpha1.Policy{
|
||||
Level: auditregv1alpha1.LevelRequestResponse,
|
||||
Stages: []auditregv1alpha1.Stage{
|
||||
auditregv1alpha1.StageRequestReceived,
|
||||
auditregv1alpha1.StageResponseStarted,
|
||||
auditregv1alpha1.StageResponseComplete,
|
||||
auditregv1alpha1.StagePanic,
|
||||
Spec: auditregistrationv1alpha1.AuditSinkSpec{
|
||||
Policy: auditregistrationv1alpha1.Policy{
|
||||
Level: auditregistrationv1alpha1.LevelRequestResponse,
|
||||
Stages: []auditregistrationv1alpha1.Stage{
|
||||
auditregistrationv1alpha1.StageRequestReceived,
|
||||
auditregistrationv1alpha1.StageResponseStarted,
|
||||
auditregistrationv1alpha1.StageResponseComplete,
|
||||
auditregistrationv1alpha1.StagePanic,
|
||||
},
|
||||
},
|
||||
Webhook: auditregv1alpha1.Webhook{
|
||||
ClientConfig: auditregv1alpha1.WebhookClientConfig{
|
||||
Webhook: auditregistrationv1alpha1.Webhook{
|
||||
ClientConfig: auditregistrationv1alpha1.WebhookClientConfig{
|
||||
URL: &podURL,
|
||||
},
|
||||
},
|
||||
@@ -182,18 +182,18 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
// https://github.com/kubernetes/kubernetes/issues/70818
|
||||
{
|
||||
func() {
|
||||
pod := &apiv1.Pod{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-pod",
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
}
|
||||
updatePod := func(pod *apiv1.Pod) {}
|
||||
updatePod := func(pod *v1.Pod) {}
|
||||
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
|
@@ -20,9 +20,7 @@ import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/certificates/v1beta1"
|
||||
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1beta1client "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
|
||||
@@ -30,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@@ -52,16 +51,16 @@ var _ = SIGDescribe("Certificates API", func() {
|
||||
csrb, err := cert.MakeCSR(pk, &pkix.Name{CommonName: commonName, Organization: []string{"system:masters"}}, nil, nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
csr := &v1beta1.CertificateSigningRequest{
|
||||
csr := &certificatesv1beta1.CertificateSigningRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: commonName + "-",
|
||||
},
|
||||
Spec: v1beta1.CertificateSigningRequestSpec{
|
||||
Spec: certificatesv1beta1.CertificateSigningRequestSpec{
|
||||
Request: csrb,
|
||||
Usages: []v1beta1.KeyUsage{
|
||||
v1beta1.UsageSigning,
|
||||
v1beta1.UsageKeyEncipherment,
|
||||
v1beta1.UsageClientAuth,
|
||||
Usages: []certificatesv1beta1.KeyUsage{
|
||||
certificatesv1beta1.UsageSigning,
|
||||
certificatesv1beta1.UsageKeyEncipherment,
|
||||
certificatesv1beta1.UsageClientAuth,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -75,9 +74,9 @@ var _ = SIGDescribe("Certificates API", func() {
|
||||
|
||||
e2elog.Logf("approving CSR")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr.Status.Conditions = []v1beta1.CertificateSigningRequestCondition{
|
||||
csr.Status.Conditions = []certificatesv1beta1.CertificateSigningRequestCondition{
|
||||
{
|
||||
Type: v1beta1.CertificateApproved,
|
||||
Type: certificatesv1beta1.CertificateApproved,
|
||||
Reason: "E2E",
|
||||
Message: "Set from an e2e test",
|
||||
},
|
||||
|
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package auth
|
||||
|
||||
import (
|
||||
batch "k8s.io/api/batch/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -33,11 +33,11 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
ginkgo.It("should run a check-metadata-concealment job to completion", func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
ginkgo.By("Creating a job")
|
||||
job := &batch.Job{
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "check-metadata-concealment",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "check-metadata-concealment",
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -208,7 +208,7 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
}
|
||||
|
||||
// createAndBindPSP creates a PSP in the policy API group.
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *policy.PodSecurityPolicy) (psp *policy.PodSecurityPolicy, cleanup func()) {
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecurityPolicy) (psp *policyv1beta1.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
@@ -274,35 +274,35 @@ func restrictedPod(name string) *v1.Pod {
|
||||
}
|
||||
|
||||
// privilegedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that allows everything.
|
||||
func privilegedPSP(name string) *policy.PodSecurityPolicy {
|
||||
return &policy.PodSecurityPolicy{
|
||||
func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(true),
|
||||
AllowedCapabilities: []v1.Capability{"*"},
|
||||
Volumes: []policy.FSType{policy.All},
|
||||
Volumes: []policyv1beta1.FSType{policyv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []policy.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostPorts: []policyv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyRunAsAny,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
RunAsGroup: &policy.RunAsGroupStrategyOptions{
|
||||
Rule: policy.RunAsGroupStrategyRunAsAny,
|
||||
RunAsGroup: &policyv1beta1.RunAsGroupStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsGroupStrategyRunAsAny,
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
@@ -310,8 +310,8 @@ func privilegedPSP(name string) *policy.PodSecurityPolicy {
|
||||
}
|
||||
|
||||
// restrictedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that is most strict.
|
||||
func restrictedPSP(name string) *policy.PodSecurityPolicy {
|
||||
return &policy.PodSecurityPolicy{
|
||||
func restrictedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
@@ -321,7 +321,7 @@ func restrictedPSP(name string) *policy.PodSecurityPolicy {
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(false),
|
||||
RequiredDropCapabilities: []v1.Capability{
|
||||
@@ -337,32 +337,32 @@ func restrictedPSP(name string) *policy.PodSecurityPolicy {
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []policy.FSType{
|
||||
policy.ConfigMap,
|
||||
policy.EmptyDir,
|
||||
policy.PersistentVolumeClaim,
|
||||
Volumes: []policyv1beta1.FSType{
|
||||
policyv1beta1.ConfigMap,
|
||||
policyv1beta1.EmptyDir,
|
||||
policyv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
policy.Secret,
|
||||
policyv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyMustRunAsNonRoot,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
RunAsGroup: &policy.RunAsGroupStrategyOptions{
|
||||
Rule: policy.RunAsGroupStrategyMustRunAs,
|
||||
Ranges: []policy.IDRange{
|
||||
RunAsGroup: &policyv1beta1.RunAsGroupStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsGroupStrategyMustRunAs,
|
||||
Ranges: []policyv1beta1.IDRange{
|
||||
{Min: nobodyUser, Max: nobodyUser}},
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
|
@@ -29,8 +29,8 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -1015,12 +1015,12 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
|
||||
ginkgo.By("Create a PodDisruptionBudget")
|
||||
minAvailable := intstr.FromInt(numPods - pdbSize)
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
pdb := &policyv1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pdb",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Spec: policyv1beta1.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
@@ -1891,12 +1891,12 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
labelMap := map[string]string{"k8s-app": pdbData.label}
|
||||
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
|
||||
minAvailable := intstr.FromInt(pdbData.minAvailable)
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
pdb := &policyv1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pdbName,
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Spec: policyv1beta1.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: labelMap},
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
@@ -1917,7 +1917,7 @@ func createPriorityClasses(f *framework.Framework) func() {
|
||||
highPriorityClassName: 1000,
|
||||
}
|
||||
for className, priority := range priorityClasses {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
if err != nil {
|
||||
klog.Errorf("Error creating priority class: %v", err)
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -222,7 +222,7 @@ type CustomMetricTestCase struct {
|
||||
hpa *as.HorizontalPodAutoscaler
|
||||
kubeClient clientset.Interface
|
||||
deployment *appsv1.Deployment
|
||||
pod *corev1.Pod
|
||||
pod *v1.Pod
|
||||
initialReplicas int
|
||||
scaledReplicas int
|
||||
}
|
||||
@@ -285,7 +285,7 @@ func (tc *CustomMetricTestCase) Run() {
|
||||
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *corev1.Pod) error {
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
@@ -301,7 +301,7 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *corev1.Pod) {
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ package common
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
@@ -58,7 +58,7 @@ func LoadAppArmorProfiles(f *framework.Framework) {
|
||||
// CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with
|
||||
// an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after
|
||||
// a single test, otherwise it will repeat the test every 1 second until failure.
|
||||
func CreateAppArmorTestPod(f *framework.Framework, unconfined bool, runOnce bool) *api.Pod {
|
||||
func CreateAppArmorTestPod(f *framework.Framework, unconfined bool, runOnce bool) *v1.Pod {
|
||||
profile := "localhost/" + appArmorProfilePrefix + f.Namespace.Name
|
||||
testCmd := fmt.Sprintf(`
|
||||
if touch %[1]s; then
|
||||
@@ -92,9 +92,9 @@ sleep 1
|
||||
done`, testCmd)
|
||||
}
|
||||
|
||||
loaderAffinity := &api.Affinity{
|
||||
PodAffinity: &api.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{{
|
||||
loaderAffinity := &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{
|
||||
Namespaces: []string{f.Namespace.Name},
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{loaderLabelKey: loaderLabelValue},
|
||||
@@ -104,7 +104,7 @@ done`, testCmd)
|
||||
},
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-apparmor-",
|
||||
Annotations: map[string]string{
|
||||
@@ -114,14 +114,14 @@ done`, testCmd)
|
||||
"test": "apparmor",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: loaderAffinity,
|
||||
Containers: []api.Container{{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", testCmd},
|
||||
}},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ profile %s flags=(attach_disconnected) {
|
||||
}
|
||||
`, profileName, appArmorDeniedPath, appArmorAllowedPath)
|
||||
|
||||
cm := &api.ConfigMap{
|
||||
cm := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "apparmor-profiles",
|
||||
Namespace: f.Namespace.Name,
|
||||
@@ -173,26 +173,26 @@ profile %s flags=(attach_disconnected) {
|
||||
func createAppArmorProfileLoader(f *framework.Framework) {
|
||||
True := true
|
||||
One := int32(1)
|
||||
loader := &api.ReplicationController{
|
||||
loader := &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "apparmor-loader",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &One,
|
||||
Template: &api.PodTemplateSpec{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{loaderLabelKey: loaderLabelValue},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "apparmor-loader",
|
||||
Image: imageutils.GetE2EImage(imageutils.AppArmorLoader),
|
||||
Args: []string{"-poll", "10s", "/profiles"},
|
||||
SecurityContext: &api.SecurityContext{
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &True,
|
||||
},
|
||||
VolumeMounts: []api.VolumeMount{{
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: "sys",
|
||||
MountPath: "/sys",
|
||||
ReadOnly: true,
|
||||
@@ -206,25 +206,25 @@ func createAppArmorProfileLoader(f *framework.Framework) {
|
||||
ReadOnly: true,
|
||||
}},
|
||||
}},
|
||||
Volumes: []api.Volume{{
|
||||
Volumes: []v1.Volume{{
|
||||
Name: "sys",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/sys",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "apparmor-includes",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/etc/apparmor.d",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "profiles",
|
||||
VolumeSource: api.VolumeSource{
|
||||
ConfigMap: &api.ConfigMapVolumeSource{
|
||||
LocalObjectReference: api.LocalObjectReference{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "apparmor-profiles",
|
||||
},
|
||||
},
|
||||
@@ -241,7 +241,7 @@ func createAppArmorProfileLoader(f *framework.Framework) {
|
||||
getRunningLoaderPod(f)
|
||||
}
|
||||
|
||||
func getRunningLoaderPod(f *framework.Framework) *api.Pod {
|
||||
func getRunningLoaderPod(f *framework.Framework) *v1.Pod {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue}))
|
||||
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
|
||||
framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod")
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
coordv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
@@ -48,10 +48,10 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
||||
|
||||
ginkgo.Context("when the NodeLease feature is enabled", func() {
|
||||
ginkgo.It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
|
||||
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(v1.NamespaceNodeLease)
|
||||
var (
|
||||
err error
|
||||
lease *coordv1beta1.Lease
|
||||
lease *coordinationv1beta1.Lease
|
||||
)
|
||||
ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace")
|
||||
gomega.Eventually(func() error {
|
||||
@@ -91,9 +91,9 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
||||
|
||||
ginkgo.By("wait until there is node lease")
|
||||
var err error
|
||||
var lease *coordv1beta1.Lease
|
||||
var lease *coordinationv1beta1.Lease
|
||||
gomega.Eventually(func() error {
|
||||
lease, err = f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
|
||||
lease, err = f.ClientSet.CoordinationV1beta1().Leases(v1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -154,23 +154,23 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
||||
// run controller manager, i.e., no node lifecycle controller.
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
|
||||
gomega.Expect(readyCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
|
||||
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
gomega.Expect(readyCondition.Status).To(gomega.Equal(v1.ConditionTrue))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, corev1.NodeStatus) {
|
||||
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) {
|
||||
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
_, readyCondition := testutils.GetNodeCondition(&node.Status, corev1.NodeReady)
|
||||
gomega.Expect(readyCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
|
||||
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
gomega.Expect(readyCondition.Status).To(gomega.Equal(v1.ConditionTrue))
|
||||
heartbeatTime := readyCondition.LastHeartbeatTime.Time
|
||||
readyCondition.LastHeartbeatTime = metav1.Time{}
|
||||
return heartbeatTime, node.Status
|
||||
}
|
||||
|
||||
func expectLease(lease *coordv1beta1.Lease, nodeName string) error {
|
||||
func expectLease(lease *coordinationv1beta1.Lease, nodeName string) error {
|
||||
// expect values for HolderIdentity, LeaseDurationSeconds, and RenewTime
|
||||
if lease.Spec.HolderIdentity == nil {
|
||||
return fmt.Errorf("Spec.HolderIdentity should not be nil")
|
||||
|
@@ -23,10 +23,10 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -307,9 +307,9 @@ func (f *Framework) PatchNamespace(item *string) {
|
||||
|
||||
func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
switch item := item.(type) {
|
||||
case *rbac.Subject:
|
||||
case *rbacv1.Subject:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
case *rbac.RoleRef:
|
||||
case *rbacv1.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
@@ -317,21 +317,21 @@ func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
f.PatchName(&item.Name)
|
||||
}
|
||||
case *rbac.ClusterRole:
|
||||
case *rbacv1.ClusterRole:
|
||||
f.PatchName(&item.Name)
|
||||
case *rbac.Role:
|
||||
case *rbacv1.Role:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
// Roles are namespaced, but because for RoleRef above we don't
|
||||
// know whether the referenced role is a ClusterRole or Role
|
||||
// and therefore always renames, we have to do the same here.
|
||||
f.PatchName(&item.Name)
|
||||
case *storage.StorageClass:
|
||||
case *storagev1.StorageClass:
|
||||
f.PatchName(&item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *rbac.ClusterRoleBinding:
|
||||
case *rbacv1.ClusterRoleBinding:
|
||||
f.PatchName(&item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
@@ -341,7 +341,7 @@ func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *rbac.RoleBinding:
|
||||
case *rbacv1.RoleBinding:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
@@ -353,9 +353,9 @@ func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
}
|
||||
case *v1.Service:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.StatefulSet:
|
||||
case *appsv1.StatefulSet:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.DaemonSet:
|
||||
case *appsv1.DaemonSet:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
default:
|
||||
return errors.Errorf("missing support for patching item of type %T", item)
|
||||
@@ -391,11 +391,11 @@ func (*serviceAccountFactory) Create(f *Framework, i interface{}) (func() error,
|
||||
type clusterRoleFactory struct{}
|
||||
|
||||
func (f *clusterRoleFactory) New() runtime.Object {
|
||||
return &rbac.ClusterRole{}
|
||||
return &rbacv1.ClusterRole{}
|
||||
}
|
||||
|
||||
func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.ClusterRole)
|
||||
item, ok := i.(*rbacv1.ClusterRole)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
@@ -413,11 +413,11 @@ func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, er
|
||||
type clusterRoleBindingFactory struct{}
|
||||
|
||||
func (f *clusterRoleBindingFactory) New() runtime.Object {
|
||||
return &rbac.ClusterRoleBinding{}
|
||||
return &rbacv1.ClusterRoleBinding{}
|
||||
}
|
||||
|
||||
func (*clusterRoleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.ClusterRoleBinding)
|
||||
item, ok := i.(*rbacv1.ClusterRoleBinding)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
@@ -434,11 +434,11 @@ func (*clusterRoleBindingFactory) Create(f *Framework, i interface{}) (func() er
|
||||
type roleFactory struct{}
|
||||
|
||||
func (f *roleFactory) New() runtime.Object {
|
||||
return &rbac.Role{}
|
||||
return &rbacv1.Role{}
|
||||
}
|
||||
|
||||
func (*roleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.Role)
|
||||
item, ok := i.(*rbacv1.Role)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
@@ -455,11 +455,11 @@ func (*roleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
type roleBindingFactory struct{}
|
||||
|
||||
func (f *roleBindingFactory) New() runtime.Object {
|
||||
return &rbac.RoleBinding{}
|
||||
return &rbacv1.RoleBinding{}
|
||||
}
|
||||
|
||||
func (*roleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.RoleBinding)
|
||||
item, ok := i.(*rbacv1.RoleBinding)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
@@ -497,11 +497,11 @@ func (*serviceFactory) Create(f *Framework, i interface{}) (func() error, error)
|
||||
type statefulSetFactory struct{}
|
||||
|
||||
func (f *statefulSetFactory) New() runtime.Object {
|
||||
return &apps.StatefulSet{}
|
||||
return &appsv1.StatefulSet{}
|
||||
}
|
||||
|
||||
func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*apps.StatefulSet)
|
||||
item, ok := i.(*appsv1.StatefulSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
@@ -518,11 +518,11 @@ func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, er
|
||||
type daemonSetFactory struct{}
|
||||
|
||||
func (f *daemonSetFactory) New() runtime.Object {
|
||||
return &apps.DaemonSet{}
|
||||
return &appsv1.DaemonSet{}
|
||||
}
|
||||
|
||||
func (*daemonSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*apps.DaemonSet)
|
||||
item, ok := i.(*appsv1.DaemonSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
@@ -539,11 +539,11 @@ func (*daemonSetFactory) Create(f *Framework, i interface{}) (func() error, erro
|
||||
type storageClassFactory struct{}
|
||||
|
||||
func (f *storageClassFactory) New() runtime.Object {
|
||||
return &storage.StorageClass{}
|
||||
return &storagev1.StorageClass{}
|
||||
}
|
||||
|
||||
func (*storageClassFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*storage.StorageClass)
|
||||
item, ok := i.(*storagev1.StorageClass)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
)
|
||||
|
||||
// UpdateDeploymentWithRetries updates the specified deployment with retries.
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*appsv1.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
@@ -50,8 +50,8 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*apps.Deployment)
|
||||
d := event.Object.(*appsv1.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
@@ -92,17 +92,17 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
}
|
||||
|
||||
// NewDeployment returns a deployment spec with the specified argument.
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment {
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {
|
||||
zero := int64(0)
|
||||
return &apps.Deployment{
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@@ -125,7 +125,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
|
||||
}
|
||||
|
||||
// CreateDeployment creates a deployment.
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) {
|
||||
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
|
||||
if err != nil {
|
||||
@@ -140,7 +140,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
|
||||
}
|
||||
|
||||
// GetPodsForDeployment gets pods for the given deployment
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
|
||||
@@ -151,7 +151,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deploymen
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.CoreV1().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*apps.ReplicaSet{replicaSet}
|
||||
rsList := []*appsv1.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
|
||||
@@ -169,18 +169,18 @@ func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
|
||||
// testDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
|
||||
func testDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *appsv1.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
zero := int64(0)
|
||||
deploymentName := "deployment-" + string(uuid.NewUUID())
|
||||
deploymentSpec := &apps.Deployment{
|
||||
deploymentSpec := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
|
@@ -17,16 +17,16 @@ limitations under the License.
|
||||
package deployment
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
|
||||
func logReplicaSetsOfDeployment(deployment *appsv1.Deployment, allOldRSs []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, e2elog.Logf)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, e2elog.Logf)
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -42,7 +42,7 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
||||
}
|
||||
|
||||
// WaitForDeploymentWithCondition waits for the specified deployment condition.
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error {
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, e2elog.Logf, poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
@@ -56,13 +56,13 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
// WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, e2elog.Logf, poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, e2elog.Logf, poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
@@ -79,8 +79,8 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
||||
|
||||
// WaitForDeploymentOldRSsNum waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*apps.ReplicaSet
|
||||
var d *apps.Deployment
|
||||
var oldRSs []*appsv1.ReplicaSet
|
||||
var d *appsv1.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
@@ -103,7 +103,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevision waits for becoming the target revision of a delopyment.
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *appsv1.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(poll, pollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@@ -37,7 +37,7 @@ import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -908,12 +908,12 @@ func generateBacksideHTTPSServiceSpec() *v1.Service {
|
||||
}
|
||||
}
|
||||
|
||||
func generateBacksideHTTPSDeploymentSpec() *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-https",
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
|
||||
"app": "echoheaders-https",
|
||||
}},
|
||||
@@ -941,7 +941,7 @@ func generateBacksideHTTPSDeploymentSpec() *apps.Deployment {
|
||||
}
|
||||
|
||||
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
||||
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*apps.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) {
|
||||
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) {
|
||||
deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@@ -965,7 +965,7 @@ func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace st
|
||||
}
|
||||
|
||||
// DeleteTestResource deletes given deployment, service and ingress.
|
||||
func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *apps.Deployment, svc *v1.Service, ing *networkingv1beta1.Ingress) []error {
|
||||
func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *appsv1.Deployment, svc *v1.Service, ing *networkingv1beta1.Ingress) []error {
|
||||
var errs []error
|
||||
if ing != nil {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
|
@@ -19,7 +19,7 @@ package job
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
|
||||
func GetJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
|
||||
func GetJob(c clientset.Interface, ns, name string) (*batchv1.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
@@ -43,18 +43,18 @@ func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error)
|
||||
|
||||
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been created.
|
||||
func CreateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Create(job)
|
||||
}
|
||||
|
||||
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been updated.
|
||||
func UpdateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Update(job)
|
||||
}
|
||||
|
||||
// UpdateJobWithRetries updates job with retries.
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batch.Job)) (job *batch.Job, err error) {
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
|
@@ -19,10 +19,10 @@ package pod
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -59,19 +59,19 @@ func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||
switch typed := obj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
return labels.SelectorFromSet(typed.Spec.Selector), nil
|
||||
case *extensions.ReplicaSet:
|
||||
case *extensionsv1beta1.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.ReplicaSet:
|
||||
case *appsv1.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.Deployment:
|
||||
case *extensionsv1beta1.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.Deployment:
|
||||
case *appsv1.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.DaemonSet:
|
||||
case *extensionsv1beta1.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.DaemonSet:
|
||||
case *appsv1.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *batch.Job:
|
||||
case *batchv1.Job:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||
@@ -87,31 +87,31 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.ReplicaSet:
|
||||
case *extensionsv1beta1.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.ReplicaSet:
|
||||
case *appsv1.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.Deployment:
|
||||
case *extensionsv1beta1.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.Deployment:
|
||||
case *appsv1.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.DaemonSet:
|
||||
case *extensionsv1beta1.DaemonSet:
|
||||
return 0, nil
|
||||
case *apps.DaemonSet:
|
||||
case *appsv1.DaemonSet:
|
||||
return 0, nil
|
||||
case *batch.Job:
|
||||
case *batchv1.Job:
|
||||
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
|
||||
// that actually finish we need a better way to do this.
|
||||
if typed.Spec.Parallelism != nil {
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -44,33 +44,33 @@ var (
|
||||
)
|
||||
|
||||
// privilegedPSP creates a PodSecurityPolicy that allows everything.
|
||||
func privilegedPSP(name string) *policy.PodSecurityPolicy {
|
||||
func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
allowPrivilegeEscalation := true
|
||||
return &policy.PodSecurityPolicy{
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
AllowedCapabilities: []corev1.Capability{"*"},
|
||||
Volumes: []policy.FSType{policy.All},
|
||||
AllowedCapabilities: []v1.Capability{"*"},
|
||||
Volumes: []policyv1beta1.FSType{policyv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []policy.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostPorts: []policyv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyRunAsAny,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
AllowedUnsafeSysctls: []string{"*"},
|
||||
|
@@ -17,14 +17,14 @@ limitations under the License.
|
||||
package replicaset
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NewReplicaSet returns a new ReplicaSet.
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *appsv1.ReplicaSet {
|
||||
return &appsv1.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "apps/v1",
|
||||
@@ -33,7 +33,7 @@ func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Spec: appsv1.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
|
@@ -19,7 +19,7 @@ package replicaset
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
// UpdateReplicaSetWithRetries updates replicaset template with retries.
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*appsv1.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, framework.Poll, framework.PollShortTimeout)
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -53,7 +53,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
}
|
||||
|
||||
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
@@ -69,7 +69,7 @@ func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
@@ -85,7 +85,7 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
|
@@ -26,8 +26,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsV1beta2 "k8s.io/api/apps/v1beta2"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -82,7 +82,7 @@ func NewStatefulSetTester(c clientset.Interface) *StatefulSetTester {
|
||||
}
|
||||
|
||||
// GetStatefulSet gets the StatefulSet named name in namespace.
|
||||
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.StatefulSet {
|
||||
func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *appsv1.StatefulSet {
|
||||
ss, err := s.c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
|
||||
@@ -91,7 +91,7 @@ func (s *StatefulSetTester) GetStatefulSet(namespace, name string) *apps.Statefu
|
||||
}
|
||||
|
||||
// CreateStatefulSet creates a StatefulSet from the manifest at manifestPath in the Namespace ns using kubectl create.
|
||||
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.StatefulSet {
|
||||
func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *appsv1.StatefulSet {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(manifestPath, file)
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func (s *StatefulSetTester) CreateStatefulSet(manifestPath, ns string) *apps.Sta
|
||||
}
|
||||
|
||||
// CheckMount checks that the mount at mountPath is valid for all Pods in ss.
|
||||
func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) error {
|
||||
func (s *StatefulSetTester) CheckMount(ss *appsv1.StatefulSet, mountPath string) error {
|
||||
for _, cmd := range []string{
|
||||
// Print inode, size etc
|
||||
fmt.Sprintf("ls -idlh %v", mountPath),
|
||||
@@ -132,7 +132,7 @@ func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) e
|
||||
}
|
||||
|
||||
// ExecInStatefulPods executes cmd in all Pods in ss. If a error occurs it is returned and cmd is not execute in any subsequent Pods.
|
||||
func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string) error {
|
||||
func (s *StatefulSetTester) ExecInStatefulPods(ss *appsv1.StatefulSet, cmd string) error {
|
||||
podList := s.GetPodList(ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout)
|
||||
@@ -145,7 +145,7 @@ func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string)
|
||||
}
|
||||
|
||||
// CheckHostname verifies that all Pods in ss have the correct Hostname. If the returned error is not nil than verification failed.
|
||||
func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
|
||||
func (s *StatefulSetTester) CheckHostname(ss *appsv1.StatefulSet) error {
|
||||
cmd := "printf $(hostname)"
|
||||
podList := s.GetPodList(ss)
|
||||
for _, statefulPod := range podList.Items {
|
||||
@@ -161,7 +161,7 @@ func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error {
|
||||
}
|
||||
|
||||
// Saturate waits for all Pods in ss to become Running and Ready.
|
||||
func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) Saturate(ss *appsv1.StatefulSet) {
|
||||
var i int32
|
||||
for i = 0; i < *(ss.Spec.Replicas); i++ {
|
||||
e2elog.Logf("Waiting for stateful pod at index %v to enter Running", i)
|
||||
@@ -172,7 +172,7 @@ func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) {
|
||||
}
|
||||
|
||||
// DeleteStatefulPodAtIndex deletes the Pod with ordinal index in ss.
|
||||
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *appsv1.StatefulSet) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
noGrace := int64(0)
|
||||
if err := s.c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
|
||||
@@ -184,26 +184,26 @@ func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.Statefu
|
||||
type VerifyStatefulPodFunc func(*v1.Pod)
|
||||
|
||||
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is applied to the Pod to "visit" it.
|
||||
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *appsv1.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
||||
ExpectNoError(err, fmt.Sprintf("Failed to get stateful pod %s for StatefulSet %s/%s", name, ss.Namespace, ss.Name))
|
||||
verify(pod)
|
||||
}
|
||||
|
||||
func getStatefulSetPodNameAtIndex(index int, ss *apps.StatefulSet) string {
|
||||
func getStatefulSetPodNameAtIndex(index int, ss *appsv1.StatefulSet) string {
|
||||
// TODO: we won't use "-index" as the name strategy forever,
|
||||
// pull the name out from an identity mapper.
|
||||
return fmt.Sprintf("%v-%v", ss.Name, index)
|
||||
}
|
||||
|
||||
// Scale scales ss to count replicas.
|
||||
func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.StatefulSet, error) {
|
||||
func (s *StatefulSetTester) Scale(ss *appsv1.StatefulSet, count int32) (*appsv1.StatefulSet, error) {
|
||||
name := ss.Name
|
||||
ns := ss.Namespace
|
||||
|
||||
e2elog.Logf("Scaling statefulset %s to %d", name, count)
|
||||
ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
ss = s.update(ns, name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
|
||||
var statefulPodList *v1.PodList
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) {
|
||||
@@ -227,12 +227,12 @@ func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.Stat
|
||||
}
|
||||
|
||||
// UpdateReplicas updates the replicas of ss to count.
|
||||
func (s *StatefulSetTester) UpdateReplicas(ss *apps.StatefulSet, count int32) {
|
||||
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
func (s *StatefulSetTester) UpdateReplicas(ss *appsv1.StatefulSet, count int32) {
|
||||
s.update(ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = count })
|
||||
}
|
||||
|
||||
// Restart scales ss to 0 and then back to its previous number of replicas.
|
||||
func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) Restart(ss *appsv1.StatefulSet) {
|
||||
oldReplicas := *(ss.Spec.Replicas)
|
||||
ss, err := s.Scale(ss, 0)
|
||||
ExpectNoError(err)
|
||||
@@ -240,10 +240,10 @@ func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) {
|
||||
// This way we know the controller has observed all Pod deletions
|
||||
// before we scale it back up.
|
||||
s.WaitForStatusReplicas(ss, 0)
|
||||
s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
|
||||
s.update(ss.Namespace, ss.Name, func(ss *appsv1.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas })
|
||||
}
|
||||
|
||||
func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) *apps.StatefulSet {
|
||||
func (s *StatefulSetTester) update(ns, name string, update func(ss *appsv1.StatefulSet)) *appsv1.StatefulSet {
|
||||
for i := 0; i < 3; i++ {
|
||||
ss, err := s.c.AppsV1().StatefulSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -263,7 +263,7 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.Statefu
|
||||
}
|
||||
|
||||
// GetPodList gets the current Pods in ss.
|
||||
func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
|
||||
func (s *StatefulSetTester) GetPodList(ss *appsv1.StatefulSet) *v1.PodList {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
podList, err := s.c.CoreV1().Pods(ss.Namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||
@@ -273,7 +273,7 @@ func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
|
||||
|
||||
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
|
||||
// to scale to count.
|
||||
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.StatefulSet, timeout time.Duration, hard bool) {
|
||||
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *appsv1.StatefulSet, timeout time.Duration, hard bool) {
|
||||
start := time.Now()
|
||||
deadline := start.Add(timeout)
|
||||
for t := time.Now(); t.Before(deadline); t = time.Now() {
|
||||
@@ -296,7 +296,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful
|
||||
|
||||
// WaitForRunning waits for numPodsRunning in ss to be Running and for the first
|
||||
// numPodsReady ordinals to be Ready.
|
||||
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *appsv1.StatefulSet) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
podList := s.GetPodList(ss)
|
||||
@@ -325,7 +325,7 @@ func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, s
|
||||
}
|
||||
|
||||
// WaitForState periodically polls for the ss and its pods until the until function returns either true or an error
|
||||
func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.StatefulSet, *v1.PodList) (bool, error)) {
|
||||
func (s *StatefulSetTester) WaitForState(ss *appsv1.StatefulSet, until func(*appsv1.StatefulSet, *v1.PodList) (bool, error)) {
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := s.c.AppsV1().StatefulSets(ss.Namespace).Get(ss.Name, metav1.GetOptions{})
|
||||
@@ -342,8 +342,8 @@ func (s *StatefulSetTester) WaitForState(ss *apps.StatefulSet, until func(*apps.
|
||||
|
||||
// WaitForStatus waits for the StatefulSetStatus's ObservedGeneration to be greater than or equal to set's Generation.
|
||||
// The returned StatefulSet contains such a StatefulSetStatus
|
||||
func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulSet {
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
func (s *StatefulSetTester) WaitForStatus(set *appsv1.StatefulSet) *appsv1.StatefulSet {
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods *v1.PodList) (bool, error) {
|
||||
if set2.Status.ObservedGeneration >= set.Generation {
|
||||
set = set2
|
||||
return true, nil
|
||||
@@ -354,14 +354,14 @@ func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulS
|
||||
}
|
||||
|
||||
// WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready.
|
||||
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
s.WaitForRunning(numStatefulPods, numStatefulPods, ss)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition.
|
||||
func (s *StatefulSetTester) WaitForPodReady(set *apps.StatefulSet, podName string) (*apps.StatefulSet, *v1.PodList) {
|
||||
func (s *StatefulSetTester) WaitForPodReady(set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
@@ -376,9 +376,9 @@ func (s *StatefulSetTester) WaitForPodReady(set *apps.StatefulSet, podName strin
|
||||
}
|
||||
|
||||
// WaitForPodNotReady waist for the Pod named podName in set to exist and to not have a Ready condition.
|
||||
func (s *StatefulSetTester) WaitForPodNotReady(set *apps.StatefulSet, podName string) (*apps.StatefulSet, *v1.PodList) {
|
||||
func (s *StatefulSetTester) WaitForPodNotReady(set *appsv1.StatefulSet, podName string) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
for i := range pods.Items {
|
||||
@@ -394,15 +394,15 @@ func (s *StatefulSetTester) WaitForPodNotReady(set *apps.StatefulSet, podName st
|
||||
|
||||
// WaitForRollingUpdate waits for all Pods in set to exist and have the correct revision and for the RollingUpdate to
|
||||
// complete. set must have a RollingUpdateStatefulSetStrategyType.
|
||||
func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.StatefulSet, *v1.PodList) {
|
||||
func (s *StatefulSetTester) WaitForRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
Failf("StatefulSet %s/%s attempt to wait for rolling update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
set.Spec.UpdateStrategy.Type)
|
||||
}
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
if len(pods.Items) < int(*set.Spec.Replicas) {
|
||||
@@ -415,12 +415,12 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.S
|
||||
)
|
||||
s.SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
@@ -434,9 +434,9 @@ func (s *StatefulSetTester) WaitForRollingUpdate(set *apps.StatefulSet) (*apps.S
|
||||
// a RollingUpdateStatefulSetStrategyType with a non-nil RollingUpdate and Partition. All Pods with ordinals less
|
||||
// than or equal to the Partition are expected to be at set's current revision. All other Pods are expected to be
|
||||
// at its update revision.
|
||||
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSet) (*apps.StatefulSet, *v1.PodList) {
|
||||
func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *appsv1.StatefulSet) (*appsv1.StatefulSet, *v1.PodList) {
|
||||
var pods *v1.PodList
|
||||
if set.Spec.UpdateStrategy.Type != apps.RollingUpdateStatefulSetStrategyType {
|
||||
if set.Spec.UpdateStrategy.Type != appsv1.RollingUpdateStatefulSetStrategyType {
|
||||
Failf("StatefulSet %s/%s attempt to wait for partitioned update with updateStrategy %s",
|
||||
set.Namespace,
|
||||
set.Name,
|
||||
@@ -447,7 +447,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
|
||||
set.Namespace,
|
||||
set.Name)
|
||||
}
|
||||
s.WaitForState(set, func(set2 *apps.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
s.WaitForState(set, func(set2 *appsv1.StatefulSet, pods2 *v1.PodList) (bool, error) {
|
||||
set = set2
|
||||
pods = pods2
|
||||
partition := int(*set.Spec.UpdateStrategy.RollingUpdate.Partition)
|
||||
@@ -461,23 +461,23 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
|
||||
)
|
||||
s.SortStatefulPods(pods)
|
||||
for i := range pods.Items {
|
||||
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
for i := int(*set.Spec.Replicas) - 1; i >= partition; i-- {
|
||||
if pods.Items[i].Labels[apps.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
if pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel] != set.Status.UpdateRevision {
|
||||
e2elog.Logf("Waiting for Pod %s/%s to have revision %s update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
set.Status.UpdateRevision,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel])
|
||||
pods.Items[i].Labels[appsv1.StatefulSetRevisionLabel])
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@@ -487,7 +487,7 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe
|
||||
}
|
||||
|
||||
// WaitForRunningAndNotReady waits for numStatefulPods in ss to be Running and not Ready.
|
||||
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *appsv1.StatefulSet) {
|
||||
s.WaitForRunning(numStatefulPods, 0, ss)
|
||||
}
|
||||
|
||||
@@ -506,12 +506,12 @@ var httpProbe = &v1.Probe{
|
||||
// SetHTTPProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers.
|
||||
// This probe can then be controlled with BreakHTTPProbe() and RestoreHTTPProbe().
|
||||
// Note that this cannot be used together with PauseNewPods().
|
||||
func (s *StatefulSetTester) SetHTTPProbe(ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) SetHTTPProbe(ss *appsv1.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe
|
||||
}
|
||||
|
||||
// BreakHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func (s *StatefulSetTester) BreakHTTPProbe(ss *apps.StatefulSet) error {
|
||||
func (s *StatefulSetTester) BreakHTTPProbe(ss *appsv1.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
@@ -522,7 +522,7 @@ func (s *StatefulSetTester) BreakHTTPProbe(ss *apps.StatefulSet) error {
|
||||
}
|
||||
|
||||
// BreakPodHTTPProbe breaks the readiness probe for Nginx StatefulSet containers in one pod.
|
||||
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
|
||||
func (s *StatefulSetTester) BreakPodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
@@ -535,7 +535,7 @@ func (s *StatefulSetTester) BreakPodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod)
|
||||
}
|
||||
|
||||
// RestoreHTTPProbe restores the readiness probe for Nginx StatefulSet containers in ss.
|
||||
func (s *StatefulSetTester) RestoreHTTPProbe(ss *apps.StatefulSet) error {
|
||||
func (s *StatefulSetTester) RestoreHTTPProbe(ss *appsv1.StatefulSet) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
@@ -546,7 +546,7 @@ func (s *StatefulSetTester) RestoreHTTPProbe(ss *apps.StatefulSet) error {
|
||||
}
|
||||
|
||||
// RestorePodHTTPProbe restores the readiness probe for Nginx StatefulSet containers in pod.
|
||||
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *apps.StatefulSet, pod *v1.Pod) error {
|
||||
func (s *StatefulSetTester) RestorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
|
||||
path := httpProbe.HTTPGet.Path
|
||||
if path == "" {
|
||||
return fmt.Errorf("Path expected to be not empty: %v", path)
|
||||
@@ -576,7 +576,7 @@ func hasPauseProbe(pod *v1.Pod) bool {
|
||||
// This causes all newly-created Pods to stay Unready until they are manually resumed
|
||||
// with ResumeNextPod().
|
||||
// Note that this cannot be used together with SetHTTPProbe().
|
||||
func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) PauseNewPods(ss *appsv1.StatefulSet) {
|
||||
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe
|
||||
}
|
||||
|
||||
@@ -585,7 +585,7 @@ func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) {
|
||||
// It fails the test if it finds any pods that are not in phase Running,
|
||||
// or if it finds more than one paused Pod existing at the same time.
|
||||
// This is a no-op if there are no paused pods.
|
||||
func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
|
||||
func (s *StatefulSetTester) ResumeNextPod(ss *appsv1.StatefulSet) {
|
||||
podList := s.GetPodList(ss)
|
||||
resumedPod := ""
|
||||
for _, pod := range podList.Items {
|
||||
@@ -606,7 +606,7 @@ func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
|
||||
}
|
||||
|
||||
// WaitForStatusReadyReplicas waits for the ss.Status.ReadyReplicas to be equal to expectedReplicas
|
||||
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
|
||||
func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
@@ -631,7 +631,7 @@ func (s *StatefulSetTester) WaitForStatusReadyReplicas(ss *apps.StatefulSet, exp
|
||||
}
|
||||
|
||||
// WaitForStatusReplicas waits for the ss.Status.Replicas to be equal to expectedReplicas
|
||||
func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expectedReplicas int32) {
|
||||
func (s *StatefulSetTester) WaitForStatusReplicas(ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
e2elog.Logf("Waiting for statefulset status.replicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
@@ -656,7 +656,7 @@ func (s *StatefulSetTester) WaitForStatusReplicas(ss *apps.StatefulSet, expected
|
||||
}
|
||||
|
||||
// CheckServiceName asserts that the ServiceName for ss is equivalent to expectedServiceName.
|
||||
func (s *StatefulSetTester) CheckServiceName(ss *apps.StatefulSet, expectedServiceName string) error {
|
||||
func (s *StatefulSetTester) CheckServiceName(ss *appsv1.StatefulSet, expectedServiceName string) error {
|
||||
e2elog.Logf("Checking if statefulset spec.serviceName is %s", expectedServiceName)
|
||||
|
||||
if expectedServiceName != ss.Spec.ServiceName {
|
||||
@@ -767,7 +767,7 @@ func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
||||
// NewStatefulSet creates a new NGINX StatefulSet for testing. The StatefulSet is named name, is in namespace ns,
|
||||
// statefulPodsMounts are the mounts that will be backed by PVs. podsMounts are the mounts that are mounted directly
|
||||
// to the Pod. labels are the labels that will be usd for the StatefulSet selector.
|
||||
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *apps.StatefulSet {
|
||||
func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulPodMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *appsv1.StatefulSet {
|
||||
mounts := append(statefulPodMounts, podMounts...)
|
||||
claims := []v1.PersistentVolumeClaim{}
|
||||
for _, m := range statefulPodMounts {
|
||||
@@ -786,7 +786,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
})
|
||||
}
|
||||
|
||||
return &apps.StatefulSet{
|
||||
return &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1",
|
||||
@@ -795,7 +795,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
@@ -817,7 +817,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
Volumes: vols,
|
||||
},
|
||||
},
|
||||
UpdateStrategy: apps.StatefulSetUpdateStrategy{Type: apps.RollingUpdateStatefulSetStrategyType},
|
||||
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{Type: appsv1.RollingUpdateStatefulSetStrategyType},
|
||||
VolumeClaimTemplates: claims,
|
||||
ServiceName: governingSvcName,
|
||||
},
|
||||
@@ -825,17 +825,17 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
}
|
||||
|
||||
// NewStatefulSetScale creates a new StatefulSet scale subresource and returns it
|
||||
func NewStatefulSetScale(ss *apps.StatefulSet) *appsV1beta2.Scale {
|
||||
return &appsV1beta2.Scale{
|
||||
func NewStatefulSetScale(ss *appsv1.StatefulSet) *appsv1beta2.Scale {
|
||||
return &appsv1beta2.Scale{
|
||||
// TODO: Create a variant of ObjectMeta type that only contains the fields below.
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ss.Name,
|
||||
Namespace: ss.Namespace,
|
||||
},
|
||||
Spec: appsV1beta2.ScaleSpec{
|
||||
Spec: appsv1beta2.ScaleSpec{
|
||||
Replicas: *(ss.Spec.Replicas),
|
||||
},
|
||||
Status: appsV1beta2.ScaleStatus{
|
||||
Status: appsv1beta2.ScaleStatus{
|
||||
Replicas: ss.Status.Replicas,
|
||||
},
|
||||
}
|
||||
@@ -869,10 +869,10 @@ func (sp statefulPodsByOrdinal) Less(i, j int) bool {
|
||||
return getStatefulPodOrdinal(&sp[i]) < getStatefulPodOrdinal(&sp[j])
|
||||
}
|
||||
|
||||
type updateStatefulSetFunc func(*apps.StatefulSet)
|
||||
type updateStatefulSetFunc func(*appsv1.StatefulSet)
|
||||
|
||||
// UpdateStatefulSetWithRetries updates statfulset template with retries.
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
|
||||
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) {
|
||||
statefulSets := c.AppsV1().StatefulSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
|
@@ -47,10 +47,10 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -2247,19 +2247,19 @@ func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||
switch typed := obj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
return labels.SelectorFromSet(typed.Spec.Selector), nil
|
||||
case *extensions.ReplicaSet:
|
||||
case *extensionsv1beta1.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.ReplicaSet:
|
||||
case *appsv1.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.Deployment:
|
||||
case *extensionsv1beta1.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.Deployment:
|
||||
case *appsv1.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.DaemonSet:
|
||||
case *extensionsv1beta1.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.DaemonSet:
|
||||
case *appsv1.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *batch.Job:
|
||||
case *batchv1.Job:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||
@@ -2273,31 +2273,31 @@ func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.ReplicaSet:
|
||||
case *extensionsv1beta1.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.ReplicaSet:
|
||||
case *appsv1.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.Deployment:
|
||||
case *extensionsv1beta1.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.Deployment:
|
||||
case *appsv1.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.DaemonSet:
|
||||
case *extensionsv1beta1.DaemonSet:
|
||||
return 0, nil
|
||||
case *apps.DaemonSet:
|
||||
case *appsv1.DaemonSet:
|
||||
return 0, nil
|
||||
case *batch.Job:
|
||||
case *batchv1.Job:
|
||||
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
|
||||
// that actually finish we need a better way to do this.
|
||||
if typed.Spec.Parallelism != nil {
|
||||
@@ -2379,11 +2379,11 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
return nil
|
||||
}
|
||||
|
||||
type updateDSFunc func(*apps.DaemonSet)
|
||||
type updateDSFunc func(*appsv1.DaemonSet)
|
||||
|
||||
// UpdateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
|
||||
// until it succeeds or a timeout expires.
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *apps.DaemonSet, err error) {
|
||||
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
|
||||
daemonsets := c.AppsV1().DaemonSets(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
@@ -3432,8 +3432,8 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
}
|
||||
|
||||
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
|
||||
func DsFromManifest(url string) (*apps.DaemonSet, error) {
|
||||
var controller apps.DaemonSet
|
||||
func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
|
||||
var controller appsv1.DaemonSet
|
||||
e2elog.Logf("Parsing ds from %v", url)
|
||||
|
||||
var response *http.Response
|
||||
|
@@ -19,7 +19,7 @@ package utils
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@@ -85,7 +85,7 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLoggingAgentPods(f *framework.Framework, appName string) (*api_v1.PodList, error) {
|
||||
func getLoggingAgentPods(f *framework.Framework, appName string) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": appName}))
|
||||
options := meta_v1.ListOptions{LabelSelector: label.String()}
|
||||
return f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"fmt"
|
||||
|
||||
api_v1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -96,23 +96,23 @@ func (p *loadLoggingPod) Name() string {
|
||||
|
||||
func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
||||
e2elog.Logf("Starting load logging pod %s", p.name)
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: p.name,
|
||||
},
|
||||
Spec: api_v1.PodSpec{
|
||||
RestartPolicy: api_v1.RestartPolicyNever,
|
||||
Containers: []api_v1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: loggingContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"logs-generator", "-log-lines-total", strconv.Itoa(p.expectedLinesCount), "-run-duration", p.runDuration.String()},
|
||||
Resources: api_v1.ResourceRequirements{
|
||||
Requests: api_v1.ResourceList{
|
||||
api_v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
loggingContainerCPURequest,
|
||||
resource.DecimalSI),
|
||||
api_v1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
loggingContainerMemoryRequest,
|
||||
resource.BinarySI),
|
||||
},
|
||||
@@ -162,22 +162,22 @@ func (p *execLoggingPod) Name() string {
|
||||
|
||||
func (p *execLoggingPod) Start(f *framework.Framework) error {
|
||||
e2elog.Logf("Starting repeating logging pod %s", p.name)
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: p.name,
|
||||
},
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: loggingContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: p.cmd,
|
||||
Resources: api_v1.ResourceRequirements{
|
||||
Requests: api_v1.ResourceList{
|
||||
api_v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
loggingContainerCPURequest,
|
||||
resource.DecimalSI),
|
||||
api_v1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
loggingContainerMemoryRequest,
|
||||
resource.BinarySI),
|
||||
},
|
||||
|
@@ -23,8 +23,8 @@ import (
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
@@ -44,16 +44,16 @@ var (
|
||||
StackdriverExporter = "stackdriver-exporter"
|
||||
// HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for
|
||||
// HPA for testing purposes, i.e. it should grant permission to read custom metrics.
|
||||
HPAPermissions = &rbac.ClusterRoleBinding{
|
||||
HPAPermissions = &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-reader",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:controller:horizontal-pod-autoscaler",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Group",
|
||||
@@ -99,7 +99,7 @@ func SimpleStackdriverExporterDeployment(name, namespace string, replicas int32,
|
||||
// is exposed by a different container in one pod.
|
||||
// The metric names and values are configured via the containers parameter.
|
||||
func StackdriverExporterDeployment(name, namespace string, replicas int32, containers []CustomMetricContainerSpec) *appsv1.Deployment {
|
||||
podSpec := corev1.PodSpec{Containers: []corev1.Container{}}
|
||||
podSpec := v1.PodSpec{Containers: []v1.Container{}}
|
||||
for _, containerSpec := range containers {
|
||||
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue))
|
||||
}
|
||||
@@ -113,7 +113,7 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
@@ -128,8 +128,8 @@ func StackdriverExporterDeployment(name, namespace string, replicas int32, conta
|
||||
|
||||
// StackdriverExporterPod is a Pod of simple application that exports a metric of fixed value to
|
||||
// Stackdriver in a loop.
|
||||
func StackdriverExporterPod(podName, namespace, podLabel, metricName string, metricValue int64) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
func StackdriverExporterPod(podName, namespace, podLabel, metricName string, metricValue int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
@@ -137,17 +137,17 @@ func StackdriverExporterPod(podName, namespace, podLabel, metricName string, met
|
||||
"name": podLabel,
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, namespace, metricName, metricValue)},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{stackdriverExporterContainerSpec(StackdriverExporter, namespace, metricName, metricValue)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stackdriverExporterContainerSpec(name string, namespace string, metricName string, metricValue int64) corev1.Container {
|
||||
return corev1.Container{
|
||||
func stackdriverExporterContainerSpec(name string, namespace string, metricName string, metricValue int64) v1.Container {
|
||||
return v1.Container{
|
||||
Name: name,
|
||||
Image: imageutils.GetE2EImage(imageutils.SdDummyExporter),
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
ImagePullPolicy: v1.PullPolicy("Always"),
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
@@ -162,25 +162,25 @@ func stackdriverExporterContainerSpec(name string, namespace string, metricName
|
||||
"--use-new-resource-model",
|
||||
}, " "),
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_ID",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,7 +197,7 @@ func PrometheusExporterDeployment(name, namespace string, replicas int32, metric
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
@@ -210,36 +210,36 @@ func PrometheusExporterDeployment(name, namespace string, replicas int32, metric
|
||||
}
|
||||
}
|
||||
|
||||
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) corev1.PodSpec {
|
||||
return corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "prometheus-exporter",
|
||||
Image: imageutils.GetE2EImage(imageutils.PrometheusDummyExporter),
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
ImagePullPolicy: v1.PullPolicy("Always"),
|
||||
Command: []string{"/prometheus_dummy_exporter", "--metric-name=" + metricName,
|
||||
fmt.Sprintf("--metric-value=%v", metricValue), fmt.Sprintf("=--port=%d", port)},
|
||||
Ports: []corev1.ContainerPort{{ContainerPort: port}},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: port}},
|
||||
},
|
||||
{
|
||||
Name: "prometheus-to-sd",
|
||||
Image: imageutils.GetE2EImage(imageutils.PrometheusToSd),
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
ImagePullPolicy: v1.PullPolicy("Always"),
|
||||
Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port),
|
||||
"--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"},
|
||||
Env: []corev1.EnvVar{
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_ID",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -99,7 +99,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
|
||||
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||
leaseClient := c.CoordinationV1beta1().Leases(v1.NamespaceNodeLease)
|
||||
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
|
||||
|
@@ -20,10 +20,10 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -119,8 +119,8 @@ func IngressToManifest(ing *networkingv1beta1.Ingress, path string) error {
|
||||
}
|
||||
|
||||
// StatefulSetFromManifest returns a StatefulSet from a manifest stored in fileName in the Namespace indicated by ns.
|
||||
func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
|
||||
var ss apps.StatefulSet
|
||||
func StatefulSetFromManifest(fileName, ns string) (*appsv1.StatefulSet, error) {
|
||||
var ss appsv1.StatefulSet
|
||||
data, err := testfiles.Read(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -143,8 +143,8 @@ func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
|
||||
}
|
||||
|
||||
// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns.
|
||||
func DaemonSetFromManifest(fileName, ns string) (*apps.DaemonSet, error) {
|
||||
var ds apps.DaemonSet
|
||||
func DaemonSetFromManifest(fileName, ns string) (*appsv1.DaemonSet, error) {
|
||||
var ds appsv1.DaemonSet
|
||||
data, err := testfiles.Read(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -163,8 +163,8 @@ func DaemonSetFromManifest(fileName, ns string) (*apps.DaemonSet, error) {
|
||||
}
|
||||
|
||||
// RoleFromManifest returns a Role from a manifest stored in fileName in the Namespace indicated by ns.
|
||||
func RoleFromManifest(fileName, ns string) (*rbac.Role, error) {
|
||||
var role rbac.Role
|
||||
func RoleFromManifest(fileName, ns string) (*rbacv1.Role, error) {
|
||||
var role rbacv1.Role
|
||||
data, err := testfiles.Read(fileName)
|
||||
|
||||
json, err := utilyaml.ToJSON(data)
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -73,7 +73,7 @@ type IngressScaleFramework struct {
|
||||
NumIngressesTest []int
|
||||
OutputFile string
|
||||
|
||||
ScaleTestDeploy *apps.Deployment
|
||||
ScaleTestDeploy *appsv1.Deployment
|
||||
ScaleTestSvcs []*v1.Service
|
||||
ScaleTestIngs []*networkingv1beta1.Ingress
|
||||
|
||||
@@ -438,12 +438,12 @@ func generateScaleTestServiceSpec(suffix string) *v1.Service {
|
||||
}
|
||||
}
|
||||
|
||||
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *apps.Deployment {
|
||||
return &apps.Deployment{
|
||||
func generateScaleTestBackendDeploymentSpec(numReplicas int32) *appsv1.Deployment {
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleTestBackendName,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &numReplicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: scaleTestLabels},
|
||||
Template: v1.PodTemplateSpec{
|
||||
|
@@ -19,7 +19,7 @@ package node
|
||||
import (
|
||||
"time"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
@@ -41,12 +41,12 @@ var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLA
|
||||
})
|
||||
})
|
||||
|
||||
func cleanupJob(f *framework.Framework, job *batch.Job) {
|
||||
func cleanupJob(f *framework.Framework, job *batchv1.Job) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
e2elog.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
|
||||
removeFinalizerFunc := func(j *batch.Job) {
|
||||
removeFinalizerFunc := func(j *batchv1.Job) {
|
||||
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
||||
}
|
||||
_, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||
|
@@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -52,7 +52,7 @@ type priorityPair struct {
|
||||
|
||||
var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
var cs clientset.Interface
|
||||
var nodeList *corev1.NodeList
|
||||
var nodeList *v1.NodeList
|
||||
var ns string
|
||||
f := framework.NewDefaultFramework("sched-preemption")
|
||||
|
||||
@@ -75,9 +75,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
nodeList = &corev1.NodeList{}
|
||||
nodeList = &v1.NodeList{}
|
||||
for _, pair := range priorityPairs {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: pair.name}, Value: pair.value})
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
||||
}
|
||||
|
||||
@@ -92,10 +92,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
// enough resources is found, scheduler preempts a lower priority pod to schedule
|
||||
// the high priority pod.
|
||||
ginkgo.It("validates basic preemption works", func() {
|
||||
var podRes corev1.ResourceList
|
||||
var podRes v1.ResourceList
|
||||
// Create one pod per node that uses a lot of the node's resources.
|
||||
ginkgo.By("Create pods that use 60% of node resources.")
|
||||
pods := make([]*corev1.Pod, len(nodeList.Items))
|
||||
pods := make([]*v1.Pod, len(nodeList.Items))
|
||||
for i, node := range nodeList.Items {
|
||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
@@ -103,9 +103,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
memory := memAllocatable.Value() * 60 / 100
|
||||
podRes = corev1.ResourceList{}
|
||||
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
|
||||
podRes = v1.ResourceList{}
|
||||
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
|
||||
|
||||
// make the first pod low priority and the rest medium priority.
|
||||
priorityName := mediumPriorityClassName
|
||||
@@ -115,7 +115,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
pods[i] = createPausePod(f, pausePodConfig{
|
||||
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
|
||||
PriorityClassName: priorityName,
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
},
|
||||
})
|
||||
@@ -131,7 +131,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
runPausePod(f, pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
PriorityClassName: highPriorityClassName,
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
},
|
||||
})
|
||||
@@ -152,10 +152,10 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
// enough resources is found, scheduler preempts a lower priority pod to schedule
|
||||
// this critical pod.
|
||||
ginkgo.It("validates lower priority pod preemption by critical pod", func() {
|
||||
var podRes corev1.ResourceList
|
||||
var podRes v1.ResourceList
|
||||
// Create one pod per node that uses a lot of the node's resources.
|
||||
ginkgo.By("Create pods that use 60% of node resources.")
|
||||
pods := make([]*corev1.Pod, len(nodeList.Items))
|
||||
pods := make([]*v1.Pod, len(nodeList.Items))
|
||||
for i, node := range nodeList.Items {
|
||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
@@ -163,9 +163,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
||||
gomega.Expect(found).To(gomega.Equal(true))
|
||||
memory := memAllocatable.Value() * 60 / 100
|
||||
podRes = corev1.ResourceList{}
|
||||
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
|
||||
podRes = v1.ResourceList{}
|
||||
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
|
||||
|
||||
// make the first pod low priority and the rest medium priority.
|
||||
priorityName := mediumPriorityClassName
|
||||
@@ -175,7 +175,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
pods[i] = createPausePod(f, pausePodConfig{
|
||||
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
|
||||
PriorityClassName: priorityName,
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
},
|
||||
})
|
||||
@@ -192,7 +192,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
Name: "critical-pod",
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
PriorityClassName: scheduling.SystemClusterCritical,
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
},
|
||||
})
|
||||
@@ -220,14 +220,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
// It also verifies that existing low priority pods are not preempted as their
|
||||
// preemption wouldn't help.
|
||||
ginkgo.It("validates pod anti-affinity works in preemption", func() {
|
||||
var podRes corev1.ResourceList
|
||||
var podRes v1.ResourceList
|
||||
// Create a few pods that uses a small amount of resources.
|
||||
ginkgo.By("Create pods that use 10% of node resources.")
|
||||
numPods := 4
|
||||
if len(nodeList.Items) < numPods {
|
||||
numPods = len(nodeList.Items)
|
||||
}
|
||||
pods := make([]*corev1.Pod, numPods)
|
||||
pods := make([]*v1.Pod, numPods)
|
||||
for i := 0; i < numPods; i++ {
|
||||
node := nodeList.Items[i]
|
||||
cpuAllocatable, found := node.Status.Allocatable["cpu"]
|
||||
@@ -236,9 +236,9 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
memAllocatable, found := node.Status.Allocatable["memory"]
|
||||
gomega.Expect(found).To(gomega.BeTrue())
|
||||
memory := memAllocatable.Value() * 10 / 100
|
||||
podRes = corev1.ResourceList{}
|
||||
podRes[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
podRes[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
|
||||
podRes = v1.ResourceList{}
|
||||
podRes[v1.ResourceCPU] = *resource.NewMilliQuantity(int64(milliCPU), resource.DecimalSI)
|
||||
podRes[v1.ResourceMemory] = *resource.NewQuantity(int64(memory), resource.BinarySI)
|
||||
|
||||
// Apply node label to each node
|
||||
framework.AddOrUpdateLabelOnNode(cs, node.Name, "node", node.Name)
|
||||
@@ -252,12 +252,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
pods[i] = createPausePod(f, pausePodConfig{
|
||||
Name: fmt.Sprintf("pod%d-%v", i, priorityName),
|
||||
PriorityClassName: priorityName,
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
},
|
||||
Affinity: &corev1.Affinity{
|
||||
PodAntiAffinity: &corev1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
@@ -272,14 +272,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeAffinity: &corev1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
|
||||
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node",
|
||||
Operator: corev1.NodeSelectorOpIn,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{node.Name},
|
||||
},
|
||||
},
|
||||
@@ -308,15 +308,15 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
Name: "preemptor-pod",
|
||||
PriorityClassName: highPriorityClassName,
|
||||
Labels: map[string]string{"service": "blah"},
|
||||
Affinity: &corev1.Affinity{
|
||||
NodeAffinity: &corev1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
|
||||
NodeSelectorTerms: []corev1.NodeSelectorTerm{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []corev1.NodeSelectorRequirement{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node",
|
||||
Operator: corev1.NodeSelectorOpIn,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodeList.Items[0].Name},
|
||||
},
|
||||
},
|
||||
@@ -379,11 +379,11 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() {
|
||||
|
||||
// construct a fakecpu so as to set it to status of Node object
|
||||
// otherwise if we update CPU/Memory/etc, those values will be corrected back by kubelet
|
||||
var fakecpu corev1.ResourceName = "example.com/fakecpu"
|
||||
var fakecpu v1.ResourceName = "example.com/fakecpu"
|
||||
|
||||
var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
var cs clientset.Interface
|
||||
var node *corev1.Node
|
||||
var node *v1.Node
|
||||
var ns, nodeHostNameLabel string
|
||||
f := framework.NewDefaultFramework("sched-preemption-path")
|
||||
|
||||
@@ -451,7 +451,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
priorityName := fmt.Sprintf("p%d", i)
|
||||
priorityVal := int32(i)
|
||||
priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal})
|
||||
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
|
||||
_, err := cs.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal})
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err)
|
||||
e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err)
|
||||
@@ -475,11 +475,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
|
||||
},
|
||||
},
|
||||
&corev1.Pod{},
|
||||
&v1.Pod{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if pod, ok := obj.(*corev1.Pod); ok {
|
||||
if pod, ok := obj.(*v1.Pod); ok {
|
||||
podNamesSeen[pod.Name] = struct{}{}
|
||||
}
|
||||
},
|
||||
@@ -498,9 +498,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
Labels: map[string]string{"name": "pod1"},
|
||||
PriorityClassName: "p1",
|
||||
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{fakecpu: resource.MustParse("40")},
|
||||
Limits: corev1.ResourceList{fakecpu: resource.MustParse("40")},
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{fakecpu: resource.MustParse("40")},
|
||||
Limits: v1.ResourceList{fakecpu: resource.MustParse("40")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -512,9 +512,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
Labels: map[string]string{"name": "pod2"},
|
||||
PriorityClassName: "p2",
|
||||
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{fakecpu: resource.MustParse("50")},
|
||||
Limits: corev1.ResourceList{fakecpu: resource.MustParse("50")},
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{fakecpu: resource.MustParse("50")},
|
||||
Limits: v1.ResourceList{fakecpu: resource.MustParse("50")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -526,9 +526,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
Labels: map[string]string{"name": "pod3"},
|
||||
PriorityClassName: "p3",
|
||||
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{fakecpu: resource.MustParse("95")},
|
||||
Limits: corev1.ResourceList{fakecpu: resource.MustParse("95")},
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{fakecpu: resource.MustParse("95")},
|
||||
Limits: v1.ResourceList{fakecpu: resource.MustParse("95")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -540,9 +540,9 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() {
|
||||
Labels: map[string]string{"name": "pod4"},
|
||||
PriorityClassName: "p4",
|
||||
NodeSelector: map[string]string{"kubernetes.io/hostname": nodeHostNameLabel},
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{fakecpu: resource.MustParse("400")},
|
||||
Limits: corev1.ResourceList{fakecpu: resource.MustParse("400")},
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{fakecpu: resource.MustParse("400")},
|
||||
Limits: v1.ResourceList{fakecpu: resource.MustParse("400")},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -604,7 +604,7 @@ func initPauseRS(f *framework.Framework, conf pauseRSConfig) *appsv1.ReplicaSet
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: pausePod.Labels,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: pausePod.ObjectMeta.Labels},
|
||||
Spec: pausePod.Spec,
|
||||
},
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
settings "k8s.io/api/settings/v1alpha1"
|
||||
settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -50,12 +50,12 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
||||
ginkgo.It("should create a pod preset", func() {
|
||||
ginkgo.By("Creating a pod preset")
|
||||
|
||||
pip := &settings.PodPreset{
|
||||
pip := &settingsv1alpha1.PodPreset{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: settings.PodPresetSpec{
|
||||
Spec: settingsv1alpha1.PodPresetSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@@ -168,12 +168,12 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
||||
ginkgo.It("should not modify the pod on conflict", func() {
|
||||
ginkgo.By("Creating a pod preset")
|
||||
|
||||
pip := &settings.PodPreset{
|
||||
pip := &settingsv1alpha1.PodPreset{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hello",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: settings.PodPresetSpec{
|
||||
Spec: settingsv1alpha1.PodPresetSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@@ -287,6 +287,6 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func createPodPreset(c clientset.Interface, ns string, job *settings.PodPreset) (*settings.PodPreset, error) {
|
||||
func createPodPreset(c clientset.Interface, ns string, job *settingsv1alpha1.PodPreset) (*settingsv1alpha1.PodPreset, error) {
|
||||
return c.SettingsV1alpha1().PodPresets(ns).Create(job)
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -41,7 +41,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() {
|
||||
ns string
|
||||
err error
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
resizableSc *storage.StorageClass
|
||||
resizableSc *storagev1.StorageClass
|
||||
nodeName string
|
||||
isNodeLabeled bool
|
||||
nodeKeyValueLabel map[string]string
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -39,7 +39,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
|
||||
ns string
|
||||
err error
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
resizableSc *storage.StorageClass
|
||||
resizableSc *storagev1.StorageClass
|
||||
nodeName string
|
||||
isNodeLabeled bool
|
||||
nodeKeyValueLabel map[string]string
|
||||
|
@@ -21,9 +21,9 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -43,7 +43,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
||||
ns string
|
||||
err error
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
resizableSc *storage.StorageClass
|
||||
resizableSc *storagev1.StorageClass
|
||||
nodeName string
|
||||
isNodeLabeled bool
|
||||
nodeKeyValueLabel map[string]string
|
||||
@@ -163,7 +163,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *apps.Deployment) (v1.Pod, error) {
|
||||
func waitForDeploymentToRecreatePod(client clientset.Interface, deployment *appsv1.Deployment) (v1.Pod, error) {
|
||||
var runningPod v1.Pod
|
||||
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
|
||||
|
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -411,7 +411,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
|
||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
|
||||
|
||||
} else if disruptOp == evictPod {
|
||||
evictTarget := &policy.Eviction{
|
||||
evictTarget := &policyv1beta1.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: host0Pod.Name,
|
||||
Namespace: ns,
|
||||
|
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -441,7 +441,7 @@ func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.P
|
||||
return &podList.Items[0]
|
||||
}
|
||||
|
||||
func addAllowedTopologiesToStorageClass(c clientset.Interface, sc *storage.StorageClass, zones []string) {
|
||||
func addAllowedTopologiesToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zones []string) {
|
||||
term := v1.TopologySelectorTerm{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("Volume expand", func() {
|
||||
ns string
|
||||
err error
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
storageClassVar *storage.StorageClass
|
||||
storageClassVar *storagev1.StorageClass
|
||||
)
|
||||
|
||||
f := framework.NewDefaultFramework("volume-expand")
|
||||
@@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Volume expand", func() {
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
})
|
||||
|
||||
setupFunc := func(allowExpansion bool, blockVolume bool) (*v1.PersistentVolumeClaim, *storage.StorageClass, error) {
|
||||
setupFunc := func(allowExpansion bool, blockVolume bool) (*v1.PersistentVolumeClaim, *storagev1.StorageClass, error) {
|
||||
test := testsuites.StorageClassTest{
|
||||
Name: "default",
|
||||
ClaimSize: "2Gi",
|
||||
@@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("Volume expand", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func createStorageClass(t testsuites.StorageClassTest, ns string, suffix string, c clientset.Interface) (*storage.StorageClass, error) {
|
||||
func createStorageClass(t testsuites.StorageClassTest, ns string, suffix string, c clientset.Interface) (*storagev1.StorageClass, error) {
|
||||
stKlass := newStorageClass(t, ns, suffix)
|
||||
|
||||
var err error
|
||||
|
@@ -30,8 +30,8 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagebeta "k8s.io/api/storage/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -1059,7 +1059,7 @@ func getDefaultPluginName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storage.StorageClass, zone string) {
|
||||
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storagev1.StorageClass, zone string) {
|
||||
term := v1.TopologySelectorTerm{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
@@ -1071,7 +1071,7 @@ func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *stora
|
||||
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
|
||||
}
|
||||
|
||||
func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storage.StorageClass {
|
||||
func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storagev1.StorageClass {
|
||||
pluginName := t.Provisioner
|
||||
if pluginName == "" {
|
||||
pluginName = getDefaultPluginName()
|
||||
@@ -1079,9 +1079,9 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *s
|
||||
if suffix == "" {
|
||||
suffix = "sc"
|
||||
}
|
||||
bindingMode := storage.VolumeBindingImmediate
|
||||
bindingMode := storagev1.VolumeBindingImmediate
|
||||
if t.DelayBinding {
|
||||
bindingMode = storage.VolumeBindingWaitForFirstConsumer
|
||||
bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, ns, suffix)
|
||||
if t.AllowVolumeExpansion {
|
||||
@@ -1093,15 +1093,15 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *s
|
||||
func getStorageClass(
|
||||
provisioner string,
|
||||
parameters map[string]string,
|
||||
bindingMode *storage.VolumeBindingMode,
|
||||
bindingMode *storagev1.VolumeBindingMode,
|
||||
ns string,
|
||||
suffix string,
|
||||
) *storage.StorageClass {
|
||||
) *storagev1.StorageClass {
|
||||
if bindingMode == nil {
|
||||
defaultBindingMode := storage.VolumeBindingImmediate
|
||||
defaultBindingMode := storagev1.VolumeBindingImmediate
|
||||
bindingMode = &defaultBindingMode
|
||||
}
|
||||
return &storage.StorageClass{
|
||||
return &storagev1.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
@@ -1116,7 +1116,7 @@ func getStorageClass(
|
||||
}
|
||||
|
||||
// TODO: remove when storage.k8s.io/v1beta1 is removed.
|
||||
func newBetaStorageClass(t testsuites.StorageClassTest, suffix string) *storagebeta.StorageClass {
|
||||
func newBetaStorageClass(t testsuites.StorageClassTest, suffix string) *storagev1beta1.StorageClass {
|
||||
pluginName := t.Provisioner
|
||||
|
||||
if pluginName == "" {
|
||||
@@ -1126,7 +1126,7 @@ func newBetaStorageClass(t testsuites.StorageClassTest, suffix string) *storageb
|
||||
suffix = "default"
|
||||
}
|
||||
|
||||
return &storagebeta.StorageClass{
|
||||
return &storagev1beta1.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -112,11 +112,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
||||
var pvcClaimList []string
|
||||
nodeVolumeMap := make(map[string][]string)
|
||||
// Volumes will be provisioned with each different types of Storage Class
|
||||
scArrays := make([]*storageV1.StorageClass, len(scNames))
|
||||
scArrays := make([]*storagev1.StorageClass, len(scNames))
|
||||
for index, scname := range scNames {
|
||||
// Create vSphere Storage Class
|
||||
ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname))
|
||||
var sc *storageV1.StorageClass
|
||||
var sc *storagev1.StorageClass
|
||||
scParams := make(map[string]string)
|
||||
var err error
|
||||
switch scname {
|
||||
@@ -181,7 +181,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
|
||||
}
|
||||
|
||||
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
|
||||
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
|
||||
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
nodeVolumeMap := make(map[string][]string)
|
||||
nodeSelectorIndex := 0
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -77,11 +77,11 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
||||
})
|
||||
|
||||
ginkgo.It("vsphere stress tests", func() {
|
||||
scArrays := make([]*storageV1.StorageClass, len(scNames))
|
||||
scArrays := make([]*storagev1.StorageClass, len(scNames))
|
||||
for index, scname := range scNames {
|
||||
// Create vSphere Storage Class
|
||||
ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname))
|
||||
var sc *storageV1.StorageClass
|
||||
var sc *storagev1.StorageClass
|
||||
var err error
|
||||
switch scname {
|
||||
case storageclass1:
|
||||
@@ -121,7 +121,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
||||
})
|
||||
|
||||
// goroutine to perform volume lifecycle operations in parallel
|
||||
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
||||
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
|
@@ -33,7 +33,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
@@ -237,10 +237,10 @@ func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolu
|
||||
e2elog.Logf("Successfully verified content of the volume")
|
||||
}
|
||||
|
||||
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storage.StorageClass {
|
||||
var sc *storage.StorageClass
|
||||
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storagev1.StorageClass {
|
||||
var sc *storagev1.StorageClass
|
||||
|
||||
sc = &storage.StorageClass{
|
||||
sc = &storagev1.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
@@ -266,7 +266,7 @@ func getVSphereStorageClassSpec(name string, scParameters map[string]string, zon
|
||||
return sc
|
||||
}
|
||||
|
||||
func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
|
||||
func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storagev1.StorageClass) *v1.PersistentVolumeClaim {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
"github.com/vmware/govmomi/object"
|
||||
vimtypes "github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -144,7 +144,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
||||
})
|
||||
|
||||
// Wait until the pod failed over to a different node, or time out after 3 minutes
|
||||
func waitForPodToFailover(client clientset.Interface, deployment *apps.Deployment, oldNode string) (string, error) {
|
||||
func waitForPodToFailover(client clientset.Interface, deployment *appsv1.Deployment, oldNode string) (string, error) {
|
||||
var (
|
||||
err error
|
||||
newNode string
|
||||
@@ -179,7 +179,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *apps.Deploymen
|
||||
}
|
||||
|
||||
// getNodeForDeployment returns node name for the Deployment
|
||||
func getNodeForDeployment(client clientset.Interface, deployment *apps.Deployment) (string, error) {
|
||||
func getNodeForDeployment(client clientset.Interface, deployment *appsv1.Deployment) (string, error) {
|
||||
podList, err := e2edeploy.GetPodsForDeployment(client, deployment)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
storageclass *storage.StorageClass
|
||||
storageclass *storagev1.StorageClass
|
||||
pvclaims []*v1.PersistentVolumeClaim
|
||||
persistentvolumes []*v1.PersistentVolume
|
||||
err error
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storageV1 "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
@@ -89,7 +89,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
|
||||
|
||||
ginkgo.It("vcp performance tests", func() {
|
||||
scList := getTestStorageClasses(client, policyName, datastoreName)
|
||||
defer func(scList []*storageV1.StorageClass) {
|
||||
defer func(scList []*storagev1.StorageClass) {
|
||||
for _, sc := range scList {
|
||||
client.StorageV1().StorageClasses().Delete(sc.Name, nil)
|
||||
}
|
||||
@@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func getTestStorageClasses(client clientset.Interface, policyName, datastoreName string) []*storageV1.StorageClass {
|
||||
func getTestStorageClasses(client clientset.Interface, policyName, datastoreName string) []*storagev1.StorageClass {
|
||||
const (
|
||||
storageclass1 = "sc-default"
|
||||
storageclass2 = "sc-vsan"
|
||||
@@ -121,11 +121,11 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
|
||||
storageclass4 = "sc-user-specified-ds"
|
||||
)
|
||||
scNames := []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
||||
scArrays := make([]*storageV1.StorageClass, len(scNames))
|
||||
scArrays := make([]*storagev1.StorageClass, len(scNames))
|
||||
for index, scname := range scNames {
|
||||
// Create vSphere Storage Class
|
||||
ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname))
|
||||
var sc *storageV1.StorageClass
|
||||
var sc *storagev1.StorageClass
|
||||
var err error
|
||||
switch scname {
|
||||
case storageclass1:
|
||||
@@ -155,7 +155,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
|
||||
}
|
||||
|
||||
// invokeVolumeLifeCyclePerformance peforms full volume life cycle management and records latency for each operation
|
||||
func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) {
|
||||
func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) {
|
||||
var (
|
||||
totalpvclaims [][]*v1.PersistentVolumeClaim
|
||||
totalpvs [][]*v1.PersistentVolume
|
||||
|
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package upgrades
|
||||
|
||||
import (
|
||||
api "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
// AppArmorUpgradeTest tests that AppArmor profiles are enforced & usable across upgrades.
|
||||
type AppArmorUpgradeTest struct {
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
@@ -87,7 +87,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
|
||||
ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod")
|
||||
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Should be able to get pod")
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(api.PodRunning), "Pod should stay running")
|
||||
gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning), "Pod should stay running")
|
||||
gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running")
|
||||
gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted")
|
||||
}
|
||||
@@ -111,5 +111,5 @@ func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework)
|
||||
}
|
||||
|
||||
func conditionType(condition interface{}) string {
|
||||
return string(condition.(api.NodeCondition).Type)
|
||||
return string(condition.(v1.NodeCondition).Type)
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ package upgrades
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
// DaemonSetUpgradeTest tests that a DaemonSet is running before and after
|
||||
// a cluster upgrade.
|
||||
type DaemonSetUpgradeTest struct {
|
||||
daemonSet *apps.DaemonSet
|
||||
daemonSet *appsv1.DaemonSet
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
@@ -47,12 +47,12 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
|
||||
|
||||
ns := f.Namespace
|
||||
|
||||
t.daemonSet = &apps.DaemonSet{
|
||||
t.daemonSet = &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns.Name,
|
||||
Name: daemonSetName,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labelSet,
|
||||
},
|
||||
|
@@ -19,7 +19,7 @@ package upgrades
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@@ -60,7 +60,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
|
||||
rsClient := c.AppsV1().ReplicaSets(ns)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||
deployment, err := deploymentClient.Create(d)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -83,7 +83,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
|
||||
|
||||
// Trigger a new rollout so that we have some history.
|
||||
ginkgo.By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@@ -159,7 +159,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
||||
|
||||
// Verify the upgraded deployment is active by scaling up the deployment by 1
|
||||
ginkgo.By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
|
||||
deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
|
||||
deploymentWithUpdatedReplicas, err := e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *appsv1.Deployment) {
|
||||
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package upgrades
|
||||
|
||||
import (
|
||||
batch "k8s.io/api/batch/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
// JobUpgradeTest is a test harness for batch Jobs.
|
||||
type JobUpgradeTest struct {
|
||||
job *batch.Job
|
||||
job *batchv1.Job
|
||||
namespace string
|
||||
}
|
||||
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -89,7 +89,7 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
|
||||
|
||||
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
|
||||
ginkgo.By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
|
||||
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
|
||||
_, err = replicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *appsv1.ReplicaSet) {
|
||||
*rs.Spec.Replicas = scaleNum
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@@ -19,7 +19,7 @@ package upgrades
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
type StatefulSetUpgradeTest struct {
|
||||
tester *framework.StatefulSetTester
|
||||
service *v1.Service
|
||||
set *apps.StatefulSet
|
||||
set *appsv1.StatefulSet
|
||||
}
|
||||
|
||||
// Name returns the tracking name of the test.
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -221,7 +221,7 @@ func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) {
|
||||
return c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
||||
}
|
||||
|
||||
func getKubeProxyDaemonSet(c clientset.Interface) (*apps.DaemonSetList, error) {
|
||||
func getKubeProxyDaemonSet(c clientset.Interface) (*appsv1.DaemonSetList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName}))
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return c.AppsV1().DaemonSets(metav1.NamespaceSystem).List(listOpts)
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@@ -46,12 +46,12 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func(
|
||||
container2Name := "container2"
|
||||
container2Domain := "contoso.org"
|
||||
|
||||
pod := &corev1.Pod{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: container1Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
@@ -59,15 +59,15 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func(
|
||||
{
|
||||
Name: container2Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
WindowsOptions: &v1.WindowsSecurityContextOptions{
|
||||
GMSACredentialSpec: generateDummyCredSpecs(container2Domain),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
WindowsOptions: &v1.WindowsSecurityContextOptions{
|
||||
GMSACredentialSpec: generateDummyCredSpecs(podDomain),
|
||||
},
|
||||
},
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
kubeletdevicepluginv1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1"
|
||||
dm "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -65,9 +65,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
It("Verifies the Kubelet device plugin functionality.", func() {
|
||||
By("Start stub device plugin")
|
||||
// fake devices for e2e test
|
||||
devs := []*pluginapi.Device{
|
||||
{ID: "Dev-1", Health: pluginapi.Healthy},
|
||||
{ID: "Dev-2", Health: pluginapi.Healthy},
|
||||
devs := []*kubeletdevicepluginv1beta1.Device{
|
||||
{ID: "Dev-1", Health: kubeletdevicepluginv1beta1.Healthy},
|
||||
{ID: "Dev-2", Health: kubeletdevicepluginv1beta1.Healthy},
|
||||
}
|
||||
|
||||
socketPath := pluginSockDir + "dp." + fmt.Sprintf("%d", time.Now().Unix())
|
||||
@@ -79,7 +79,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Register resources")
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
|
||||
err = dp1.Register(kubeletdevicepluginv1beta1.KubeletSocket, resourceName, pluginSockDir)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become available on the local node")
|
||||
@@ -142,7 +142,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
|
||||
err = dp1.Register(kubeletdevicepluginv1beta1.KubeletSocket, resourceName, pluginSockDir)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
@@ -192,7 +192,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
err = dp1.Start()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir)
|
||||
err = dp1.Register(kubeletdevicepluginv1beta1.KubeletSocket, resourceName, pluginSockDir)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting for the resource exported by the stub device plugin to become healthy on the local node")
|
||||
@@ -302,17 +302,17 @@ func numberOfDevicesAllocatable(node *v1.Node, resourceName string) int64 {
|
||||
}
|
||||
|
||||
// stubAllocFunc will pass to stub device plugin
|
||||
func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Device) (*pluginapi.AllocateResponse, error) {
|
||||
var responses pluginapi.AllocateResponse
|
||||
func stubAllocFunc(r *kubeletdevicepluginv1beta1.AllocateRequest, devs map[string]kubeletdevicepluginv1beta1.Device) (*kubeletdevicepluginv1beta1.AllocateResponse, error) {
|
||||
var responses kubeletdevicepluginv1beta1.AllocateResponse
|
||||
for _, req := range r.ContainerRequests {
|
||||
response := &pluginapi.ContainerAllocateResponse{}
|
||||
response := &kubeletdevicepluginv1beta1.ContainerAllocateResponse{}
|
||||
for _, requestID := range req.DevicesIDs {
|
||||
dev, ok := devs[requestID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid allocation request with non-existing device %s", requestID)
|
||||
}
|
||||
|
||||
if dev.Health != pluginapi.Healthy {
|
||||
if dev.Health != kubeletdevicepluginv1beta1.Healthy {
|
||||
return nil, fmt.Errorf("invalid allocation request with unhealthy device: %s", requestID)
|
||||
}
|
||||
|
||||
@@ -328,7 +328,7 @@ func stubAllocFunc(r *pluginapi.AllocateRequest, devs map[string]pluginapi.Devic
|
||||
|
||||
f.Close()
|
||||
|
||||
response.Mounts = append(response.Mounts, &pluginapi.Mount{
|
||||
response.Mounts = append(response.Mounts, &kubeletdevicepluginv1beta1.Mount{
|
||||
ContainerPath: fpath,
|
||||
HostPath: fpath,
|
||||
})
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -46,7 +46,7 @@ import (
|
||||
const itDescription = "status and events should match expectations"
|
||||
|
||||
type expectNodeConfigStatus struct {
|
||||
lastKnownGood *apiv1.NodeConfigSource
|
||||
lastKnownGood *v1.NodeConfigSource
|
||||
err string
|
||||
// If true, expect Status.Config.Active == Status.Config.LastKnownGood,
|
||||
// otherwise expect Status.Config.Active == Status.Config.Assigned.
|
||||
@@ -55,8 +55,8 @@ type expectNodeConfigStatus struct {
|
||||
|
||||
type nodeConfigTestCase struct {
|
||||
desc string
|
||||
configSource *apiv1.NodeConfigSource
|
||||
configMap *apiv1.ConfigMap
|
||||
configSource *v1.NodeConfigSource
|
||||
configMap *v1.ConfigMap
|
||||
expectConfigStatus expectNodeConfigStatus
|
||||
expectConfig *kubeletconfig.KubeletConfiguration
|
||||
// whether to expect this substring in an error returned from the API server when updating the config source
|
||||
@@ -71,8 +71,8 @@ type nodeConfigTestCase struct {
|
||||
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
|
||||
var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:DynamicKubeletConfig][Serial][Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
|
||||
var beforeNode *apiv1.Node
|
||||
var beforeConfigMap *apiv1.ConfigMap
|
||||
var beforeNode *v1.Node
|
||||
var beforeConfigMap *v1.ConfigMap
|
||||
var beforeKC *kubeletconfig.KubeletConfiguration
|
||||
var localKC *kubeletconfig.KubeletConfiguration
|
||||
|
||||
@@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// fail to parse, we insert some bogus stuff into the configMap
|
||||
failParseConfigMap := &apiv1.ConfigMap{
|
||||
failParseConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-fail-parse"},
|
||||
Data: map[string]string{
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
@@ -161,17 +161,17 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
failValidateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(failValidateConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
correctSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
correctSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
failParseSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
failParseSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: failParseConfigMap.Namespace,
|
||||
Name: failParseConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
failValidateSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
failValidateSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: failValidateConfigMap.Namespace,
|
||||
Name: failValidateConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -187,12 +187,12 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource has all nil subfields",
|
||||
configSource: &apiv1.NodeConfigSource{},
|
||||
configSource: &v1.NodeConfigSource{},
|
||||
apierr: "exactly one reference subfield must be non-nil",
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing namespace",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Name: "bar",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}, // missing Namespace
|
||||
@@ -200,7 +200,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: "foo",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}, // missing Name
|
||||
@@ -208,7 +208,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap is missing kubeletConfigKey",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
}}, // missing KubeletConfigKey
|
||||
@@ -216,7 +216,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap.UID is illegally specified",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
UID: "foo",
|
||||
Name: "bar",
|
||||
Namespace: "baz",
|
||||
@@ -226,7 +226,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap.ResourceVersion is illegally specified",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Name: "bar",
|
||||
Namespace: "baz",
|
||||
ResourceVersion: "1",
|
||||
@@ -236,7 +236,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap has invalid namespace",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Name: "bar",
|
||||
Namespace: "../baz",
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -245,7 +245,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap has invalid name",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Name: "../bar",
|
||||
Namespace: "baz",
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -254,7 +254,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
},
|
||||
{
|
||||
desc: "Node.Spec.ConfigSource.ConfigMap has invalid kubeletConfigKey",
|
||||
configSource: &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
configSource: &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Name: "bar",
|
||||
Namespace: "baz",
|
||||
KubeletConfigKey: "../qux",
|
||||
@@ -310,7 +310,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// bad config map, we insert some bogus stuff into the configMap
|
||||
badConfigMap := &apiv1.ConfigMap{
|
||||
badConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "dynamic-kubelet-config-test-bad"},
|
||||
Data: map[string]string{
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
@@ -319,7 +319,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
badConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(badConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -328,7 +328,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
lkgStatus.ConfigMap.UID = lkgConfigMap.UID
|
||||
lkgStatus.ConfigMap.ResourceVersion = lkgConfigMap.ResourceVersion
|
||||
|
||||
badSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
badSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: badConfigMap.Namespace,
|
||||
Name: badConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -375,7 +375,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
combinedConfigMap, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(combinedConfigMap)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
lkgSource := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: combinedConfigMap.Namespace,
|
||||
Name: combinedConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -428,7 +428,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
lkgConfigMap1, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap1)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource1 := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
lkgSource1 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: lkgConfigMap1.Namespace,
|
||||
Name: lkgConfigMap1.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -441,7 +441,7 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
lkgConfigMap2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(lkgConfigMap2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
lkgSource2 := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
lkgSource2 := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: lkgConfigMap2.Namespace,
|
||||
Name: lkgConfigMap2.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -500,13 +500,13 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
cm2, err = f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cm2)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cm1Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
cm1Source := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: cm1.Namespace,
|
||||
Name: cm1.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}
|
||||
|
||||
cm2Source := &apiv1.NodeConfigSource{ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
cm2Source := &v1.NodeConfigSource{ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: cm2.Namespace,
|
||||
Name: cm2.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -563,8 +563,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
|
||||
// ensure node config source is set to the config map we will mutate in-place,
|
||||
// since updateConfigMapFunc doesn't mutate Node.Spec.ConfigSource
|
||||
source := &apiv1.NodeConfigSource{
|
||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
source := &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -635,8 +635,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
"kubelet": "{0xdeadbeef}",
|
||||
}
|
||||
// ensure node config source is set to the config map we will mutate in-place
|
||||
source := &apiv1.NodeConfigSource{
|
||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
source := &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: lkgConfigMap.Namespace,
|
||||
Name: lkgConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -722,8 +722,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
|
||||
// ensure node config source is set to the config map we will mutate in-place,
|
||||
// since recreateConfigMapFunc doesn't mutate Node.Spec.ConfigSource
|
||||
source := &apiv1.NodeConfigSource{
|
||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
source := &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -790,8 +790,8 @@ var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:Dynam
|
||||
|
||||
// ensure node config source is set to the config map we will mutate in-place,
|
||||
// since our mutation functions don't mutate Node.Spec.ConfigSource
|
||||
source := &apiv1.NodeConfigSource{
|
||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
source := &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: correctConfigMap.Namespace,
|
||||
Name: correctConfigMap.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -984,7 +984,7 @@ func (tc *nodeConfigTestCase) checkConfigStatus(f *framework.Framework) {
|
||||
}, timeout, interval).Should(BeNil())
|
||||
}
|
||||
|
||||
func expectConfigStatus(tc *nodeConfigTestCase, actual *apiv1.NodeConfigStatus) error {
|
||||
func expectConfigStatus(tc *nodeConfigTestCase, actual *v1.NodeConfigStatus) error {
|
||||
var errs []string
|
||||
if actual == nil {
|
||||
return fmt.Errorf("expectConfigStatus requires actual to be non-nil (possible Kubelet failed to update status)")
|
||||
@@ -1052,7 +1052,7 @@ func (tc *nodeConfigTestCase) checkEvent(f *framework.Framework) {
|
||||
return fmt.Errorf("checkEvent: case %s: %v", tc.desc, err)
|
||||
}
|
||||
// find config changed event with most recent timestamp
|
||||
var recent *apiv1.Event
|
||||
var recent *v1.Event
|
||||
for i := range events.Items {
|
||||
if events.Items[i].Reason == controller.KubeletConfigChangedEventReason {
|
||||
if recent == nil {
|
||||
@@ -1110,7 +1110,7 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
// remote config helper
|
||||
mkRemoteSample := func(name model.LabelValue, source *apiv1.NodeConfigSource) *model.Sample {
|
||||
mkRemoteSample := func(name model.LabelValue, source *v1.NodeConfigSource) *model.Sample {
|
||||
return &model.Sample{
|
||||
Metric: model.Metric(map[model.LabelName]model.LabelValue{
|
||||
model.MetricNameLabel: name,
|
||||
@@ -1192,6 +1192,6 @@ func (tc *nodeConfigTestCase) checkConfigMetrics(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// constructs the expected SelfLink for a config map
|
||||
func configMapAPIPath(cm *apiv1.ConfigMap) string {
|
||||
func configMapAPIPath(cm *v1.ConfigMap) string {
|
||||
return fmt.Sprintf("/api/v1/namespaces/%s/configmaps/%s", cm.Namespace, cm.Name)
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
@@ -359,7 +359,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
@@ -412,7 +412,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
})
|
||||
BeforeEach(func() {
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: highPriorityClassName}, Value: highPriority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(BeTrue())
|
||||
})
|
||||
AfterEach(func() {
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@@ -40,7 +40,7 @@ import (
|
||||
)
|
||||
|
||||
// makePodToVerifyHugePages returns a pod that verifies specified cgroup with hugetlb
|
||||
func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity) *apiv1.Pod {
|
||||
func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity) *v1.Pod {
|
||||
// convert the cgroup name to its literal form
|
||||
cgroupFsName := ""
|
||||
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
||||
@@ -53,18 +53,18 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity)
|
||||
// this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes
|
||||
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName)
|
||||
e2elog.Logf("Pod to run command: %v", command)
|
||||
pod := &apiv1.Pod{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
RestartPolicy: apiv1.RestartPolicyNever,
|
||||
Containers: []apiv1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", command},
|
||||
VolumeMounts: []apiv1.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
@@ -72,11 +72,11 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity)
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []apiv1.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -152,8 +152,8 @@ func pollResourceAsString(f *framework.Framework, resourceName string) string {
|
||||
}
|
||||
|
||||
// amountOfResourceAsString returns the amount of resourceName advertised by a node
|
||||
func amountOfResourceAsString(node *apiv1.Node, resourceName string) string {
|
||||
val, ok := node.Status.Capacity[apiv1.ResourceName(resourceName)]
|
||||
func amountOfResourceAsString(node *v1.Node, resourceName string) string {
|
||||
val, ok := node.Status.Capacity[v1.ResourceName(resourceName)]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
@@ -163,21 +163,21 @@ func amountOfResourceAsString(node *apiv1.Node, resourceName string) string {
|
||||
func runHugePagesTests(f *framework.Framework) {
|
||||
It("should assign hugepages as expected based on the Pod spec", func() {
|
||||
By("by running a G pod that requests hugepages")
|
||||
pod := f.PodClient().Create(&apiv1.Pod{
|
||||
pod := f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: apiv1.ResourceRequirements{
|
||||
Limits: apiv1.ResourceList{
|
||||
apiv1.ResourceName("cpu"): resource.MustParse("10m"),
|
||||
apiv1.ResourceName("memory"): resource.MustParse("100Mi"),
|
||||
apiv1.ResourceName("hugepages-2Mi"): resource.MustParse("50Mi"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceName("cpu"): resource.MustParse("10m"),
|
||||
v1.ResourceName("memory"): resource.MustParse("100Mi"),
|
||||
v1.ResourceName("hugepages-2Mi"): resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
)
|
||||
|
||||
// makeNodePerfPod returns a pod with the information provided from the workload.
|
||||
func makeNodePerfPod(w workloads.NodePerfWorkload) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
func makeNodePerfPod(w workloads.NodePerfWorkload) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-pod", w.Name()),
|
||||
},
|
||||
@@ -62,7 +62,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
|
||||
wl workloads.NodePerfWorkload
|
||||
oldCfg *kubeletconfig.KubeletConfiguration
|
||||
newCfg *kubeletconfig.KubeletConfiguration
|
||||
pod *corev1.Pod
|
||||
pod *v1.Pod
|
||||
)
|
||||
JustBeforeEach(func() {
|
||||
err := wl.PreTestExec()
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
@@ -39,19 +39,19 @@ func (w npbEPWorkload) Name() string {
|
||||
return "npb-ep"
|
||||
}
|
||||
|
||||
func (w npbEPWorkload) PodSpec() corev1.PodSpec {
|
||||
var containers []corev1.Container
|
||||
ctn := corev1.Container{
|
||||
func (w npbEPWorkload) PodSpec() v1.PodSpec {
|
||||
var containers []v1.Container
|
||||
ctn := v1.Container{
|
||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-ep:1.0",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"/bin/sh"},
|
||||
@@ -59,8 +59,8 @@ func (w npbEPWorkload) PodSpec() corev1.PodSpec {
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
|
||||
return corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
return v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
@@ -37,19 +37,19 @@ func (w npbISWorkload) Name() string {
|
||||
return "npb-is"
|
||||
}
|
||||
|
||||
func (w npbISWorkload) PodSpec() corev1.PodSpec {
|
||||
var containers []corev1.Container
|
||||
ctn := corev1.Container{
|
||||
func (w npbISWorkload) PodSpec() v1.PodSpec {
|
||||
var containers []v1.Container
|
||||
ctn := v1.Container{
|
||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/npb-is:1.0",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("16000m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("16000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("16000m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("48Gi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"/bin/sh"},
|
||||
@@ -57,8 +57,8 @@ func (w npbISWorkload) PodSpec() corev1.PodSpec {
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
|
||||
return corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
return v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
@@ -39,19 +39,19 @@ func (w tfWideDeepWorkload) Name() string {
|
||||
return "tensorflow-wide-deep"
|
||||
}
|
||||
|
||||
func (w tfWideDeepWorkload) PodSpec() corev1.PodSpec {
|
||||
var containers []corev1.Container
|
||||
ctn := corev1.Container{
|
||||
func (w tfWideDeepWorkload) PodSpec() v1.PodSpec {
|
||||
var containers []v1.Container
|
||||
ctn := v1.Container{
|
||||
Name: fmt.Sprintf("%s-ctn", w.Name()),
|
||||
Image: "gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0",
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("16Gi"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("16Gi"),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse("15000m"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("16Gi"),
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("15000m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
Command: []string{"/bin/sh"},
|
||||
@@ -59,8 +59,8 @@ func (w tfWideDeepWorkload) PodSpec() corev1.PodSpec {
|
||||
}
|
||||
containers = append(containers, ctn)
|
||||
|
||||
return corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
return v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ package workloads
|
||||
import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
|
||||
@@ -29,7 +29,7 @@ type NodePerfWorkload interface {
|
||||
// Name of the workload.
|
||||
Name() string
|
||||
// PodSpec used to run this workload.
|
||||
PodSpec() corev1.PodSpec
|
||||
PodSpec() v1.PodSpec
|
||||
// Timeout provides the expected time to completion
|
||||
// for this workload.
|
||||
Timeout() time.Duration
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
)
|
||||
|
||||
// makePodToVerifyPids returns a pod that verifies specified cgroup with pids
|
||||
func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Pod {
|
||||
func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod {
|
||||
// convert the cgroup name to its literal form
|
||||
cgroupFsName := ""
|
||||
cgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup, baseName)
|
||||
@@ -50,18 +50,18 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Po
|
||||
// this command takes the expected value and compares it against the actual value for the pod cgroup pids.max
|
||||
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName)
|
||||
e2elog.Logf("Pod to run command: %v", command)
|
||||
pod := &apiv1.Pod{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
RestartPolicy: apiv1.RestartPolicyNever,
|
||||
Containers: []apiv1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Command: []string{"sh", "-c", command},
|
||||
VolumeMounts: []apiv1.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
MountPath: "/tmp",
|
||||
@@ -69,11 +69,11 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *apiv1.Po
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []apiv1.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "sysfscgroup",
|
||||
VolumeSource: apiv1.VolumeSource{
|
||||
HostPath: &apiv1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -107,20 +107,20 @@ func enablePodPidsLimitInKubelet(f *framework.Framework) *kubeletconfig.KubeletC
|
||||
func runPodPidsLimitTests(f *framework.Framework) {
|
||||
It("should set pids.max for Pod", func() {
|
||||
By("by creating a G pod")
|
||||
pod := f.PodClient().Create(&apiv1.Pod{
|
||||
pod := f.PodClient().Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "container" + string(uuid.NewUUID()),
|
||||
Resources: apiv1.ResourceRequirements{
|
||||
Limits: apiv1.ResourceList{
|
||||
apiv1.ResourceName("cpu"): resource.MustParse("10m"),
|
||||
apiv1.ResourceName("memory"): resource.MustParse("100Mi"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceName("cpu"): resource.MustParse("10m"),
|
||||
v1.ResourceName("memory"): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
|
||||
kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
@@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
|
||||
})
|
||||
|
||||
func getV1alpha1ResourceMetrics() (metrics.KubeletMetrics, error) {
|
||||
return metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+v1alpha1.Version)
|
||||
return metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics/resource/"+kubeletresourcemetricsv1alpha1.Version)
|
||||
}
|
||||
|
||||
func nodeId(element interface{}) string {
|
||||
|
@@ -30,7 +30,7 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/klog"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -42,7 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||
podresourcesapi "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
|
||||
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
@@ -103,7 +103,7 @@ func getNodeSummary() (*kubeletstatsv1alpha1.Summary, error) {
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
func getNodeDevices() (*podresourcesapi.ListPodResourcesResponse, error) {
|
||||
func getNodeDevices() (*kubeletpodresourcesv1alpha1.ListPodResourcesResponse, error) {
|
||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting local endpoint: %v", err)
|
||||
@@ -115,7 +115,7 @@ func getNodeDevices() (*podresourcesapi.ListPodResourcesResponse, error) {
|
||||
defer conn.Close()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
resp, err := client.List(ctx, &podresourcesapi.ListPodResourcesRequest{})
|
||||
resp, err := client.List(ctx, &kubeletpodresourcesv1alpha1.ListPodResourcesRequest{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v.Get(_) = _, %v", client, err)
|
||||
}
|
||||
@@ -200,8 +200,8 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
}
|
||||
|
||||
// create the reference and set Node.Spec.ConfigSource
|
||||
src := &apiv1.NodeConfigSource{
|
||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
||||
src := &v1.NodeConfigSource{
|
||||
ConfigMap: &v1.ConfigMapNodeConfigSource{
|
||||
Namespace: "kube-system",
|
||||
Name: cm.Name,
|
||||
KubeletConfigKey: "kubelet",
|
||||
@@ -233,7 +233,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube
|
||||
}
|
||||
|
||||
// sets the current node's configSource, this should only be called from Serial tests
|
||||
func setNodeConfigSource(f *framework.Framework, source *apiv1.NodeConfigSource) error {
|
||||
func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) error {
|
||||
// since this is a serial test, we just get the node, change the source, and then update it
|
||||
// this prevents any issues with the patch API from affecting the test results
|
||||
nodeclient := f.ClientSet.CoreV1().Nodes()
|
||||
@@ -310,7 +310,7 @@ func decodeConfigz(resp *http.Response) (*kubeletconfig.KubeletConfiguration, er
|
||||
}
|
||||
|
||||
// creates a configmap containing kubeCfg in kube-system namespace
|
||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*apiv1.ConfigMap, error) {
|
||||
func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletConfiguration) (*v1.ConfigMap, error) {
|
||||
cmap := newKubeletConfigMap("testcfg", internalKC)
|
||||
cmap, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Create(cmap)
|
||||
if err != nil {
|
||||
@@ -320,11 +320,11 @@ func createConfigMap(f *framework.Framework, internalKC *kubeletconfig.KubeletCo
|
||||
}
|
||||
|
||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *apiv1.ConfigMap {
|
||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
|
||||
data, err := kubeletconfigcodec.EncodeKubeletConfig(internalKC, kubeletconfigv1beta1.SchemeGroupVersion)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cmap := &apiv1.ConfigMap{
|
||||
cmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-"},
|
||||
Data: map[string]string{
|
||||
"kubelet": string(data),
|
||||
@@ -345,7 +345,7 @@ func logNodeEvents(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func getLocalNode(f *framework.Framework) *apiv1.Node {
|
||||
func getLocalNode(f *framework.Framework) *v1.Node {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
|
||||
return &nodeList.Items[0]
|
||||
|
Reference in New Issue
Block a user