Merge branch 'master' of https://github.com/kubernetes/kubernetes into fix-golint-test/e2e/storage/utils
This commit is contained in:
@@ -40,6 +40,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// ensure libs have a chance to initialize
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -107,6 +108,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Feature:CustomResourceWebh
|
||||
|
||||
ginkgo.It("Should be able to convert from CR v1 to CR v2", func() {
|
||||
testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", func(crd *v1beta1.CustomResourceDefinition) {
|
||||
crd.Spec.Versions = apiVersions
|
||||
crd.Spec.Conversion = &v1beta1.CustomResourceConversion{
|
||||
Strategy: v1beta1.WebhookConverter,
|
||||
WebhookClientConfig: &v1beta1.WebhookClientConfig{
|
||||
@@ -129,6 +131,7 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Feature:CustomResourceWebh
|
||||
|
||||
ginkgo.It("Should be able to convert a non homogeneous list of CRs", func() {
|
||||
testcrd, err := crd.CreateMultiVersionTestCRD(f, "stable.example.com", func(crd *v1beta1.CustomResourceDefinition) {
|
||||
crd.Spec.Versions = apiVersions
|
||||
crd.Spec.Conversion = &v1beta1.CustomResourceConversion{
|
||||
Strategy: v1beta1.WebhookConverter,
|
||||
WebhookClientConfig: &v1beta1.WebhookClientConfig{
|
||||
@@ -384,6 +387,8 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) {
|
||||
// After changing a CRD, the resources for versions will be re-created that can be result in
|
||||
// cancelled connection (e.g. "grpc connection closed" or "context canceled").
|
||||
// Just retrying fixes that.
|
||||
//
|
||||
// TODO: we have to wait for the storage version to become effective. Storage version changes are not instant.
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
|
||||
if err == nil {
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apiserver/validation"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -48,7 +49,7 @@ var (
|
||||
metaPattern = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublishOpenAPI]", func() {
|
||||
var _ = SIGDescribe("CustomResourcePublishOpenAPI", func() {
|
||||
f := framework.NewDefaultFramework("crd-publish-openapi")
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
@@ -309,6 +310,10 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Feature:CustomResourcePublish
|
||||
}
|
||||
|
||||
ginkgo.By("mark a version not serverd")
|
||||
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(crd.Crd.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
crd.Crd.Spec.Versions[1].Served = false
|
||||
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd.Crd)
|
||||
if err != nil {
|
||||
@@ -336,28 +341,34 @@ func setupCRD(f *framework.Framework, schema []byte, groupSuffix string, version
|
||||
return nil, fmt.Errorf("require at least one version for CRD")
|
||||
}
|
||||
|
||||
if schema == nil {
|
||||
schema = []byte(`type: object`)
|
||||
}
|
||||
expect := schema
|
||||
props := &v1beta1.JSONSchemaProps{}
|
||||
if err := yaml.Unmarshal(schema, props); err != nil {
|
||||
return nil, err
|
||||
if schema == nil {
|
||||
// to be backwards compatible, we expect CRD controller to treat
|
||||
// CRD with nil schema specially and publish an empty schema
|
||||
expect = []byte(`type: object`)
|
||||
} else {
|
||||
if err := yaml.Unmarshal(schema, props); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
crd, err := crd.CreateMultiVersionTestCRD(f, group, func(crd *v1beta1.CustomResourceDefinition) {
|
||||
apiVersions := []v1beta1.CustomResourceDefinitionVersion{}
|
||||
for _, version := range versions {
|
||||
v := v1beta1.CustomResourceDefinitionVersion{
|
||||
var apiVersions []v1beta1.CustomResourceDefinitionVersion
|
||||
for i, version := range versions {
|
||||
apiVersions = append(apiVersions, v1beta1.CustomResourceDefinitionVersion{
|
||||
Name: version,
|
||||
Served: true,
|
||||
Storage: false,
|
||||
}
|
||||
apiVersions = append(apiVersions, v)
|
||||
Storage: i == 0,
|
||||
})
|
||||
}
|
||||
apiVersions[0].Storage = true
|
||||
crd.Spec.Versions = apiVersions
|
||||
|
||||
crd.Spec.Validation = &v1beta1.CustomResourceValidation{
|
||||
OpenAPIV3Schema: props,
|
||||
// set up validation when input schema isn't nil
|
||||
if schema != nil {
|
||||
crd.Spec.Validation = &v1beta1.CustomResourceValidation{
|
||||
OpenAPIV3Schema: props,
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
@@ -365,7 +376,7 @@ func setupCRD(f *framework.Framework, schema []byte, groupSuffix string, version
|
||||
}
|
||||
|
||||
for _, v := range crd.Crd.Spec.Versions {
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crd, v.Name), schema); err != nil {
|
||||
if err := waitForDefinition(f.ClientSet, definitionName(crd, v.Name), expect); err != nil {
|
||||
return nil, fmt.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
@@ -580,9 +591,13 @@ properties:
|
||||
properties:
|
||||
dummy:
|
||||
description: Dummy property.
|
||||
type: object
|
||||
status:
|
||||
description: Status of Waldo
|
||||
type: object
|
||||
properties:
|
||||
bars:
|
||||
description: List of Bars and their statuses.`)
|
||||
description: List of Bars and their statuses.
|
||||
type: array
|
||||
items:
|
||||
type: object`)
|
||||
|
||||
@@ -25,15 +25,15 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
|
||||
f := framework.NewDefaultFramework("autoscaling")
|
||||
|
||||
SIGDescribe("Autoscaling a service", func() {
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -41,12 +41,12 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
}
|
||||
})
|
||||
|
||||
Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
|
||||
ginkgo.Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
|
||||
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.
|
||||
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
|
||||
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Make sure there is only 1 node group, otherwise this test becomes useless.
|
||||
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
|
||||
if len(nodeGroups) != 1 {
|
||||
@@ -64,10 +64,10 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
|
||||
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
schedulableCount := len(nodes.Items)
|
||||
Expect(schedulableCount).To(Equal(nodeGroupSize), "not all nodes are schedulable")
|
||||
gomega.Expect(schedulableCount).To(gomega.Equal(nodeGroupSize), "not all nodes are schedulable")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
// Attempt cleanup only if a node group was targeted for scale up.
|
||||
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
|
||||
if len(nodeGroupName) > 0 {
|
||||
@@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
}
|
||||
})
|
||||
|
||||
Measure("takes less than 15 minutes", func(b Benchmarker) {
|
||||
ginkgo.Measure("takes less than 15 minutes", func(b ginkgo.Benchmarker) {
|
||||
// Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient.
|
||||
const timeToWait = 15 * time.Minute
|
||||
|
||||
@@ -85,8 +85,8 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
|
||||
// Make it so that 'nodesNum' pods fit perfectly per node.
|
||||
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
|
||||
nodeCpuMillis := (&nodeCpus).MilliValue()
|
||||
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
|
||||
nodeCPUMillis := (&nodeCpus).MilliValue()
|
||||
cpuRequestMillis := int64(nodeCPUMillis / nodesNum)
|
||||
|
||||
// Start the service we want to scale and wait for it to be up and running.
|
||||
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
@@ -99,10 +99,10 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
|
||||
// Enable Horizontal Pod Autoscaler with 50% target utilization and
|
||||
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
|
||||
targetCpuUtilizationPercent := int32(50)
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCpuUtilizationPercent, 1, 10)
|
||||
targetCPUUtilizationPercent := int32(50)
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
|
||||
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
|
||||
cpuLoad := 8 * cpuRequestMillis * int64(targetCpuUtilizationPercent) / 100 // 8 pods utilized to the target level
|
||||
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
|
||||
resourceConsumer.ConsumeCPU(int(cpuLoad))
|
||||
|
||||
// Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each.
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
@@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
var originalSizes map[string]int
|
||||
var sum int
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke", "kubemark")
|
||||
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
@@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
@@ -91,13 +91,13 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
|
||||
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
|
||||
coresPerNode = int((&cpu).MilliValue() / 1000)
|
||||
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
Expect(nodeCount).Should(Equal(sum))
|
||||
gomega.Expect(nodeCount).Should(gomega.Equal(sum))
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(3)
|
||||
@@ -109,8 +109,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
setMigSizes(originalSizes)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
@@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
|
||||
ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.95)
|
||||
replicasPerNode := 10
|
||||
|
||||
@@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
defer testCleanup()
|
||||
})
|
||||
|
||||
It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
|
||||
ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.95)
|
||||
replicasPerNode := 10
|
||||
additionalNodes1 := int(math.Ceil(0.7 * maxNodes))
|
||||
@@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
klog.Infof("Scaled up twice")
|
||||
})
|
||||
|
||||
It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
|
||||
ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.7)
|
||||
replicas := int(math.Ceil(maxNodes * 0.7))
|
||||
totalNodes := maxNodes
|
||||
@@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
}, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
|
||||
ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
|
||||
perPodReservation := int(float64(memCapacityMb) * 0.01)
|
||||
// underutilizedNodes are 10% full
|
||||
underutilizedPerNodeReplicas := 10
|
||||
@@ -291,7 +291,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
}, timeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
|
||||
ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
|
||||
fullReservation := int(float64(memCapacityMb) * 0.9)
|
||||
hostPortPodReservation := int(float64(memCapacityMb) * 0.3)
|
||||
totalNodes := maxNodes
|
||||
@@ -307,28 +307,28 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
fullNodesCount := divider
|
||||
underutilizedNodesCount := totalNodes - fullNodesCount
|
||||
|
||||
By("Reserving full nodes")
|
||||
ginkgo.By("Reserving full nodes")
|
||||
// run RC1 w/o host port
|
||||
cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2)
|
||||
defer cleanup()
|
||||
|
||||
By("Reserving host ports on remaining nodes")
|
||||
ginkgo.By("Reserving host ports on remaining nodes")
|
||||
// run RC2 w/ host port
|
||||
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
|
||||
defer cleanup2()
|
||||
|
||||
waitForAllCaPodsReadyInNamespace(f, c)
|
||||
// wait and check scale down doesn't occur
|
||||
By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
|
||||
ginkgo.By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
|
||||
time.Sleep(scaleDownTimeout)
|
||||
|
||||
By("Checking if the number of nodes is as expected")
|
||||
ginkgo.By("Checking if the number of nodes is as expected")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
|
||||
Expect(len(nodes.Items)).Should(Equal(totalNodes))
|
||||
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(totalNodes))
|
||||
})
|
||||
|
||||
Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
|
||||
ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
|
||||
// Start a number of pods saturating existing nodes.
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.80)
|
||||
replicasPerNode := 10
|
||||
@@ -348,7 +348,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
|
||||
|
||||
// Ensure that no new nodes have been added so far.
|
||||
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
|
||||
gomega.Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(gomega.Equal(nodeCount))
|
||||
|
||||
// Start a number of schedulable pods to ensure CA reacts.
|
||||
additionalNodes := maxNodes - nodeCount
|
||||
@@ -375,7 +375,7 @@ func anyKey(input map[string]int) string {
|
||||
func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error {
|
||||
// resize cluster to start size
|
||||
// run rc based on config
|
||||
By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
|
||||
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
|
||||
start := time.Now()
|
||||
framework.ExpectNoError(framework.RunRC(*config.extraPods))
|
||||
// check results
|
||||
@@ -461,7 +461,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
|
||||
}
|
||||
|
||||
func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error {
|
||||
By(fmt.Sprintf("Running RC which reserves host port and memory"))
|
||||
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and memory"))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
|
||||
@@ -48,8 +48,8 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
@@ -94,7 +94,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
var memAllocatableMb int
|
||||
var originalSizes map[string]int
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
@@ -103,7 +103,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
@@ -117,12 +117,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
quantity := node.Status.Allocatable[v1.ResourceCPU]
|
||||
coreCount += quantity.Value()
|
||||
}
|
||||
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
ginkgo.By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
Expect(nodeCount).Should(Equal(sum))
|
||||
gomega.Expect(nodeCount).Should(gomega.Equal(sum))
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(5)
|
||||
@@ -134,9 +134,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
setMigSizes(originalSizes)
|
||||
expectedNodes := 0
|
||||
for _, size := range originalSizes {
|
||||
@@ -163,29 +163,29 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("Creating unschedulable pod")
|
||||
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.By("Creating unschedulable pod")
|
||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
By("Waiting for scale up hoping it won't happen")
|
||||
ginkgo.By("Waiting for scale up hoping it won't happen")
|
||||
// Verify that the appropriate event was generated
|
||||
eventFound := false
|
||||
EventsLoop:
|
||||
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
|
||||
By("Waiting for NotTriggerScaleUp event")
|
||||
ginkgo.By("Waiting for NotTriggerScaleUp event")
|
||||
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
|
||||
By("NotTriggerScaleUp event found")
|
||||
ginkgo.By("NotTriggerScaleUp event found")
|
||||
eventFound = true
|
||||
break EventsLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
Expect(eventFound).Should(Equal(true))
|
||||
gomega.Expect(eventFound).Should(gomega.Equal(true))
|
||||
// Verify that cluster size is not changed
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size <= nodeCount }, time.Second))
|
||||
@@ -201,12 +201,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
}
|
||||
|
||||
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
ginkgo.It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() { simpleScaleUpTest(0) })
|
||||
|
||||
gpuType := os.Getenv("TESTED_GPU_TYPE")
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
@@ -219,21 +219,21 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
|
||||
By("Schedule a pod which requires GPU")
|
||||
ginkgo.By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
@@ -246,24 +246,24 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
ginkgo.By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
|
||||
By("Scale GPU deployment")
|
||||
ginkgo.By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(2))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
@@ -276,12 +276,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
|
||||
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
// Verify that cluster size is increased
|
||||
@@ -289,10 +289,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
// Expect gpu pool to stay intact
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
@@ -305,29 +305,29 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
ginkgo.By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
|
||||
By("Remove the only POD requiring GPU")
|
||||
ginkgo.By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
|
||||
})
|
||||
|
||||
It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info.
|
||||
status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.ready == s.target && s.ready <= nodeCount
|
||||
@@ -336,7 +336,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
unmanagedNodes := nodeCount - status.ready
|
||||
|
||||
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
@@ -347,7 +347,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
target := status.target
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
ginkgo.By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
|
||||
// wait for a while until scale-up finishes; we cannot read CA status immediately
|
||||
// after pods are scheduled as status config map is updated by CA once every loop iteration
|
||||
@@ -359,16 +359,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
if status.target != target {
|
||||
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
}
|
||||
Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false))
|
||||
Expect(status.status).Should(Equal(caNoScaleUpStatus))
|
||||
Expect(status.ready).Should(Equal(status.target))
|
||||
Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(Equal(status.target + unmanagedNodes))
|
||||
gomega.Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(gomega.Equal(false))
|
||||
gomega.Expect(status.status).Should(gomega.Equal(caNoScaleUpStatus))
|
||||
gomega.Expect(status.ready).Should(gomega.Equal(status.target))
|
||||
gomega.Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(gomega.Equal(status.target + unmanagedNodes))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
@@ -379,16 +379,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
|
||||
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
|
||||
By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
nodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(nodes)).Should(Equal(extraNodes))
|
||||
gomega.Expect(len(nodes)).Should(gomega.Equal(extraNodes))
|
||||
extraMemMb := 0
|
||||
for _, node := range nodes {
|
||||
mem := node.Status.Allocatable[v1.ResourceMemory]
|
||||
extraMemMb += int((&mem).Value() / 1024 / 1024)
|
||||
}
|
||||
|
||||
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
|
||||
@@ -399,10 +399,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
@@ -412,7 +412,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
|
||||
|
||||
@@ -421,18 +421,18 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
pods := nodeCount
|
||||
newPods := 2
|
||||
labels := map[string]string{
|
||||
"anti-affinity": "yes",
|
||||
}
|
||||
By("starting a pod with anti-affinity on each node")
|
||||
ginkgo.By("starting a pod with anti-affinity on each node")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("scheduling extra pods with anti-affinity to existing ones")
|
||||
ginkgo.By("scheduling extra pods with anti-affinity to existing ones")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
@@ -440,8 +440,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("creating pods")
|
||||
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.By("creating pods")
|
||||
pods := nodeCount
|
||||
newPods := 1
|
||||
labels := map[string]string{
|
||||
@@ -450,10 +450,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
ginkgo.By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting EmptyDir")
|
||||
ginkgo.By("creating a pod requesting EmptyDir")
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
@@ -461,7 +461,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
volumeLabels := labels.Set{
|
||||
@@ -469,7 +469,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
selector := metav1.SetAsLabelSelector(volumeLabels)
|
||||
|
||||
By("creating volume & pvc")
|
||||
ginkgo.By("creating volume & pvc")
|
||||
diskName, err := framework.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
@@ -505,7 +505,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
}()
|
||||
|
||||
By("creating pods")
|
||||
ginkgo.By("creating pods")
|
||||
pods := nodeCount
|
||||
labels := map[string]string{
|
||||
"anti-affinity": "yes",
|
||||
@@ -516,10 +516,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
klog.Infof("RC and pods not using volume deleted")
|
||||
}()
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
ginkgo.By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting PVC")
|
||||
ginkgo.By("creating a pod requesting PVC")
|
||||
pvcPodName := "pvc-pod"
|
||||
newPods := 1
|
||||
volumes := buildVolumes(pv, pvc)
|
||||
@@ -533,11 +533,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
labelKey := "cluster-autoscaling-test.special-node"
|
||||
labelValue := "true"
|
||||
|
||||
By("Finding the smallest MIG")
|
||||
ginkgo.By("Finding the smallest MIG")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
@@ -557,7 +557,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
|
||||
removeLabels := func(nodesToClean sets.String) {
|
||||
By("Removing labels from nodes")
|
||||
ginkgo.By("Removing labels from nodes")
|
||||
for node := range nodesToClean {
|
||||
framework.RemoveLabelOffNode(c, node, labelKey)
|
||||
}
|
||||
@@ -567,7 +567,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
nodesSet := sets.NewString(nodes...)
|
||||
defer removeLabels(nodesSet)
|
||||
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
||||
ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
||||
|
||||
for node := range nodesSet {
|
||||
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
|
||||
@@ -575,7 +575,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
|
||||
|
||||
By("Waiting for new node to appear and annotating it")
|
||||
ginkgo.By("Waiting for new node to appear and annotating it")
|
||||
framework.WaitForGroupSize(minMig, int32(minSize+1))
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
@@ -586,7 +586,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
newNodesSet := sets.NewString(newNodes...)
|
||||
newNodesSet.Delete(nodes...)
|
||||
if len(newNodesSet) > 1 {
|
||||
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
|
||||
ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
|
||||
klog.Infof("Usually only 1 new node is expected, investigating")
|
||||
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
if output, err := exec.Command("gcloud", "compute", "instances", "list",
|
||||
@@ -612,7 +612,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// However at this moment we DO WANT it to crash so that we don't check all test runs for the
|
||||
// rare behavior, but only the broken ones.
|
||||
}
|
||||
By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
|
||||
ginkgo.By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
|
||||
registeredNodes := sets.NewString()
|
||||
for nodeName := range newNodesSet {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
@@ -622,7 +622,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
klog.Errorf("Failed to get node %v: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
|
||||
ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
|
||||
for node := range registeredNodes {
|
||||
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
|
||||
}
|
||||
@@ -633,10 +633,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
|
||||
})
|
||||
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
@@ -647,7 +647,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
extraPods := extraNodes + 1
|
||||
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
|
||||
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
|
||||
|
||||
@@ -663,7 +663,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
defer cleanup()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Manually increase cluster size")
|
||||
ginkgo.By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
newSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
@@ -674,20 +674,20 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
|
||||
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready))
|
||||
}
|
||||
|
||||
It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
ginkgo.It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
func() { simpleScaleDownTest(0) })
|
||||
|
||||
It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
|
||||
})
|
||||
|
||||
It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
@@ -700,7 +700,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
|
||||
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
|
||||
// this issue.
|
||||
@@ -709,44 +709,44 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
|
||||
})
|
||||
|
||||
It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) {
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
||||
})
|
||||
})
|
||||
|
||||
It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
|
||||
By("No nodes should be removed")
|
||||
ginkgo.By("No nodes should be removed")
|
||||
time.Sleep(scaleDownTimeout)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodes.Items)).Should(Equal(increasedSize))
|
||||
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(increasedSize))
|
||||
})
|
||||
})
|
||||
|
||||
It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) {
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
||||
})
|
||||
})
|
||||
|
||||
It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) {
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
||||
})
|
||||
})
|
||||
|
||||
It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// Provider-specific setup
|
||||
if framework.ProviderIs("gke") {
|
||||
// GKE-specific setup
|
||||
By("Add a new node pool with 0 nodes and min size 0")
|
||||
ginkgo.By("Add a new node pool with 0 nodes and min size 0")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 0)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
@@ -756,7 +756,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// on GCE, run only if there are already at least 2 node groups
|
||||
framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
|
||||
|
||||
By("Manually scale smallest node group to 0")
|
||||
ginkgo.By("Manually scale smallest node group to 0")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
@@ -769,7 +769,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
|
||||
}
|
||||
|
||||
By("Make remaining nodes unschedulable")
|
||||
ginkgo.By("Make remaining nodes unschedulable")
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
@@ -785,7 +785,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
By("Run a scale-up test")
|
||||
ginkgo.By("Run a scale-up test")
|
||||
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
@@ -807,7 +807,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// verify the targeted node pool/MIG is of size 0
|
||||
gkeScaleToZero := func() {
|
||||
// GKE-specific setup
|
||||
By("Add a new node pool with size 1 and min size 0")
|
||||
ginkgo.By("Add a new node pool with size 1 and min size 0")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
@@ -817,9 +817,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
defer disableAutoscaler(extraPoolName, 0, 1)
|
||||
|
||||
ngNodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(ngNodes)).To(Equal(extraNodes))
|
||||
gomega.Expect(len(ngNodes)).To(gomega.Equal(extraNodes))
|
||||
for _, node := range ngNodes {
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
}
|
||||
|
||||
for _, node := range ngNodes {
|
||||
@@ -830,12 +830,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
// GKE-specific check
|
||||
newSize := getPoolSize(f, extraPoolName)
|
||||
Expect(newSize).Should(Equal(0))
|
||||
gomega.Expect(newSize).Should(gomega.Equal(0))
|
||||
}
|
||||
|
||||
gceScaleToZero := func() {
|
||||
// non-GKE only
|
||||
By("Find smallest node group and manually scale it to a single node")
|
||||
ginkgo.By("Find smallest node group and manually scale it to a single node")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
@@ -848,9 +848,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
|
||||
ngNodes, err := framework.GetGroupNodes(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(len(ngNodes) == 1).To(BeTrue())
|
||||
gomega.Expect(len(ngNodes) == 1).To(gomega.BeTrue())
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// this part is identical
|
||||
@@ -861,10 +861,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// non-GKE only
|
||||
newSize, err := framework.GroupSize(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(newSize).Should(Equal(0))
|
||||
gomega.Expect(newSize).Should(gomega.Equal(0))
|
||||
}
|
||||
|
||||
It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
if framework.ProviderIs("gke") { // In GKE, we can just add a node pool
|
||||
gkeScaleToZero()
|
||||
} else if len(originalSizes) >= 2 {
|
||||
@@ -874,7 +874,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
clusterSize := nodeCount
|
||||
for clusterSize < unhealthyClusterThreshold+1 {
|
||||
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
|
||||
@@ -893,13 +893,13 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
// making no assumptions about minimal node startup time.
|
||||
time.Sleep(2 * time.Minute)
|
||||
|
||||
By("Block network connectivity to some nodes to simulate unhealthy cluster")
|
||||
ginkgo.By("Block network connectivity to some nodes to simulate unhealthy cluster")
|
||||
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(nodesToBreakCount <= len(nodes.Items)).To(BeTrue())
|
||||
gomega.Expect(nodesToBreakCount <= len(nodes.Items)).To(gomega.BeTrue())
|
||||
nodesToBreak := nodes.Items[:nodesToBreakCount]
|
||||
|
||||
// TestUnderTemporaryNetworkFailure only removes connectivity to a single node,
|
||||
@@ -917,11 +917,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
time.Sleep(scaleUpTimeout)
|
||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
|
||||
gomega.Expect(len(currentNodes.Items)).Should(gomega.Equal(len(nodes.Items) - nodesToBreakCount))
|
||||
status, err := getClusterwideStatus(c)
|
||||
e2elog.Logf("Clusterwide status: %v", status)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(status).Should(Equal("Unhealthy"))
|
||||
gomega.Expect(status).Should(gomega.Equal("Unhealthy"))
|
||||
}
|
||||
}
|
||||
testFunction()
|
||||
@@ -929,19 +929,19 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
|
||||
defer cleanupFunc()
|
||||
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
|
||||
time.Sleep(scaleUpTimeout)
|
||||
// Verify that cluster size is not changed
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, time.Second))
|
||||
})
|
||||
|
||||
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
||||
@@ -951,7 +951,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
func(size int) bool { return size > nodeCount }, time.Second))
|
||||
})
|
||||
|
||||
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
|
||||
@@ -963,24 +963,24 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
func(size int) bool { return size == nodeCount }, time.Second))
|
||||
})
|
||||
|
||||
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
|
||||
defer cleanupFunc()
|
||||
By("Waiting for scale down")
|
||||
ginkgo.By("Waiting for scale down")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
||||
defer cleanupFunc()
|
||||
By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
|
||||
time.Sleep(scaleDownTimeout)
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == increasedSize }, time.Second))
|
||||
@@ -988,7 +988,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
|
||||
func installNvidiaDriversDaemonSet() {
|
||||
By("Add daemonset which installs nvidia drivers")
|
||||
ginkgo.By("Add daemonset which installs nvidia drivers")
|
||||
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
|
||||
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
|
||||
}
|
||||
@@ -1012,7 +1012,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
|
||||
|
||||
By("Create a PodDisruptionBudget")
|
||||
ginkgo.By("Create a PodDisruptionBudget")
|
||||
minAvailable := intstr.FromInt(numPods - pdbSize)
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1034,15 +1034,15 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
verifyFunction(increasedSize)
|
||||
}
|
||||
|
||||
func getGkeApiEndpoint() string {
|
||||
gkeApiEndpoint := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER")
|
||||
if gkeApiEndpoint == "" {
|
||||
gkeApiEndpoint = "https://test-container.sandbox.googleapis.com"
|
||||
func getGkeAPIEndpoint() string {
|
||||
gkeAPIEndpoint := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER")
|
||||
if gkeAPIEndpoint == "" {
|
||||
gkeAPIEndpoint = "https://test-container.sandbox.googleapis.com"
|
||||
}
|
||||
if strings.HasSuffix(gkeApiEndpoint, "/") {
|
||||
gkeApiEndpoint = gkeApiEndpoint[:len(gkeApiEndpoint)-1]
|
||||
if strings.HasSuffix(gkeAPIEndpoint, "/") {
|
||||
gkeAPIEndpoint = gkeAPIEndpoint[:len(gkeAPIEndpoint)-1]
|
||||
}
|
||||
return gkeApiEndpoint
|
||||
return gkeAPIEndpoint
|
||||
}
|
||||
|
||||
func getGKEURL(apiVersion string, suffix string) string {
|
||||
@@ -1051,7 +1051,7 @@ func getGKEURL(apiVersion string, suffix string) string {
|
||||
token := strings.Replace(string(out), "\n", "", -1)
|
||||
|
||||
return fmt.Sprintf("%s/%s/%s?access_token=%s",
|
||||
getGkeApiEndpoint(),
|
||||
getGkeAPIEndpoint(),
|
||||
apiVersion,
|
||||
suffix,
|
||||
token)
|
||||
@@ -1064,12 +1064,11 @@ func getGKEClusterURL(apiVersion string) string {
|
||||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Region,
|
||||
framework.TestContext.CloudConfig.Cluster))
|
||||
} else {
|
||||
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
|
||||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Zone,
|
||||
framework.TestContext.CloudConfig.Cluster))
|
||||
}
|
||||
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
|
||||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Zone,
|
||||
framework.TestContext.CloudConfig.Cluster))
|
||||
}
|
||||
|
||||
func getCluster(apiVersion string) (string, error) {
|
||||
@@ -1107,9 +1106,8 @@ func isAutoscalerEnabled(expectedMaxNodeCountInTargetPool int) (bool, error) {
|
||||
func getClusterLocation() string {
|
||||
if isRegionalCluster() {
|
||||
return "--region=" + framework.TestContext.CloudConfig.Region
|
||||
} else {
|
||||
return "--zone=" + framework.TestContext.CloudConfig.Zone
|
||||
}
|
||||
return "--zone=" + framework.TestContext.CloudConfig.Zone
|
||||
}
|
||||
|
||||
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
|
||||
@@ -1248,7 +1246,7 @@ func getPoolInitialSize(poolName string) int {
|
||||
klog.Infof("Node-pool initial size: %s", output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
fields := strings.Fields(string(output))
|
||||
Expect(len(fields)).Should(Equal(1))
|
||||
gomega.Expect(len(fields)).Should(gomega.Equal(1))
|
||||
size, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -1274,7 +1272,7 @@ func getPoolSize(f *framework.Framework, poolName string) int {
|
||||
}
|
||||
|
||||
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
|
||||
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
||||
ginkgo.By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
@@ -1311,7 +1309,7 @@ func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, mega
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
|
||||
}
|
||||
|
||||
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
|
||||
// ReserveMemoryWithSelectorAndTolerations creates a replication controller with pods with node selector that, in summation,
|
||||
// request the specified amount of memory.
|
||||
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
|
||||
@@ -1418,7 +1416,7 @@ func setMigSizes(sizes map[string]int) bool {
|
||||
currentSize, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
if desiredSize != currentSize {
|
||||
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
|
||||
ginkgo.By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
|
||||
err = framework.ResizeGroup(mig, int32(desiredSize))
|
||||
framework.ExpectNoError(err)
|
||||
madeChanges = true
|
||||
@@ -1428,10 +1426,10 @@ func setMigSizes(sizes map[string]int) bool {
|
||||
}
|
||||
|
||||
func drainNode(f *framework.Framework, node *v1.Node) {
|
||||
By("Make the single node unschedulable")
|
||||
ginkgo.By("Make the single node unschedulable")
|
||||
makeNodeUnschedulable(f.ClientSet, node)
|
||||
|
||||
By("Manually drain the single node")
|
||||
ginkgo.By("Manually drain the single node")
|
||||
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -1442,7 +1440,7 @@ func drainNode(f *framework.Framework, node *v1.Node) {
|
||||
}
|
||||
|
||||
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
|
||||
By(fmt.Sprintf("Taint node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Taint node %s", node.Name))
|
||||
for j := 0; j < 3; j++ {
|
||||
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1479,7 +1477,7 @@ func (CriticalAddonsOnlyError) Error() string {
|
||||
}
|
||||
|
||||
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
|
||||
By(fmt.Sprintf("Remove taint from node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Remove taint from node %s", node.Name))
|
||||
for j := 0; j < 3; j++ {
|
||||
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1634,7 +1632,7 @@ func buildAntiAffinity(labels map[string]string) *v1.Affinity {
|
||||
// 3a. enable scheduling on that node
|
||||
// 3b. increase number of replicas in RC by podsPerNode
|
||||
func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error {
|
||||
By("Run a pod on each node")
|
||||
ginkgo.By("Run a pod on each node")
|
||||
for _, node := range nodes {
|
||||
err := makeNodeUnschedulable(f.ClientSet, &node)
|
||||
|
||||
@@ -1709,7 +1707,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
||||
// Increase cluster size by newNodesForScaledownTests to create some unused nodes
|
||||
// that can be later removed by cluster autoscaler.
|
||||
func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int {
|
||||
By("Manually increase cluster size")
|
||||
ginkgo.By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
newSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
@@ -1857,13 +1855,13 @@ func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) boo
|
||||
// This is a temporary fix to allow CA to migrate some kube-system pods
|
||||
// TODO: Remove this when the PDB is added for some of those components
|
||||
func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
|
||||
ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
|
||||
|
||||
var newPdbs []string
|
||||
cleanup := func() {
|
||||
var finalErr error
|
||||
for _, newPdbName := range newPdbs {
|
||||
By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
|
||||
ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
|
||||
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
// log error, but attempt to remove other pdbs
|
||||
@@ -1888,7 +1886,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
{label: "glbc", minAvailable: 0},
|
||||
}
|
||||
for _, pdbData := range pdbsToAdd {
|
||||
By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
|
||||
ginkgo.By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
|
||||
labelMap := map[string]string{"k8s-app": pdbData.label}
|
||||
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
|
||||
minAvailable := intstr.FromInt(pdbData.minAvailable)
|
||||
@@ -1922,7 +1920,7 @@ func createPriorityClasses(f *framework.Framework) func() {
|
||||
if err != nil {
|
||||
klog.Errorf("Error creating priority class: %v", err)
|
||||
}
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
||||
}
|
||||
|
||||
return func() {
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
@@ -45,13 +45,13 @@ const (
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
@@ -66,7 +66,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
@@ -83,7 +83,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with External Metric with target value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
@@ -106,7 +106,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target average value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with External Metric with target average value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
@@ -129,7 +129,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
@@ -144,7 +144,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
@@ -175,7 +175,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two External metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale up with two External metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := externalMetricValue
|
||||
@@ -216,6 +216,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
})
|
||||
})
|
||||
|
||||
// CustomMetricTestCase is a struct for test cases.
|
||||
type CustomMetricTestCase struct {
|
||||
framework *framework.Framework
|
||||
hpa *as.HorizontalPodAutoscaler
|
||||
@@ -226,8 +227,9 @@ type CustomMetricTestCase struct {
|
||||
scaledReplicas int
|
||||
}
|
||||
|
||||
// Run starts test case.
|
||||
func (tc *CustomMetricTestCase) Run() {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
@@ -251,11 +253,11 @@ func (tc *CustomMetricTestCase) Run() {
|
||||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
err = monitoring.CreateDescriptors(gcmService, projectID)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectID)
|
||||
|
||||
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
||||
if err != nil {
|
||||
|
||||
@@ -31,10 +31,11 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Constants used in dns-autoscaling test.
|
||||
const (
|
||||
DNSdefaultTimeout = 5 * time.Minute
|
||||
ClusterAddonLabelKey = "k8s-app"
|
||||
@@ -47,18 +48,18 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
var c clientset.Interface
|
||||
var previousParams map[string]string
|
||||
var originDNSReplicasCount int
|
||||
var DNSParams_1 DNSParamsLinear
|
||||
var DNSParams_2 DNSParamsLinear
|
||||
var DNSParams_3 DNSParamsLinear
|
||||
var DNSParams1 DNSParamsLinear
|
||||
var DNSParams2 DNSParamsLinear
|
||||
var DNSParams3 DNSParamsLinear
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
c = f.ClientSet
|
||||
|
||||
nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
|
||||
By("Collecting original replicas count and DNS scaling params")
|
||||
ginkgo.By("Collecting original replicas count and DNS scaling params")
|
||||
var err error
|
||||
originDNSReplicasCount, err = getDNSReplicas(c)
|
||||
framework.ExpectNoError(err)
|
||||
@@ -68,13 +69,13 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
previousParams = pcm.Data
|
||||
|
||||
if nodeCount <= 500 {
|
||||
DNSParams_1 = DNSParamsLinear{
|
||||
DNSParams1 = DNSParamsLinear{
|
||||
nodesPerReplica: 1,
|
||||
}
|
||||
DNSParams_2 = DNSParamsLinear{
|
||||
DNSParams2 = DNSParamsLinear{
|
||||
nodesPerReplica: 2,
|
||||
}
|
||||
DNSParams_3 = DNSParamsLinear{
|
||||
DNSParams3 = DNSParamsLinear{
|
||||
nodesPerReplica: 3,
|
||||
coresPerReplica: 3,
|
||||
}
|
||||
@@ -84,13 +85,13 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
// The default setup is: 256 cores/replica, 16 nodes/replica.
|
||||
// With nodeCount > 500, nodes/13, nodes/14, nodes/15 and nodes/16
|
||||
// are different numbers.
|
||||
DNSParams_1 = DNSParamsLinear{
|
||||
DNSParams1 = DNSParamsLinear{
|
||||
nodesPerReplica: 13,
|
||||
}
|
||||
DNSParams_2 = DNSParamsLinear{
|
||||
DNSParams2 = DNSParamsLinear{
|
||||
nodesPerReplica: 14,
|
||||
}
|
||||
DNSParams_3 = DNSParamsLinear{
|
||||
DNSParams3 = DNSParamsLinear{
|
||||
nodesPerReplica: 15,
|
||||
coresPerReplica: 15,
|
||||
}
|
||||
@@ -99,25 +100,25 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
|
||||
// This test is separated because it is slow and need to run serially.
|
||||
// Will take around 5 minutes to run on a 4 nodes cluster.
|
||||
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
ginkgo.By("Restoring initial dns autoscaling parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for number of running and ready kube-dns pods recover")
|
||||
ginkgo.By("Wait for number of running and ready kube-dns pods recover")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
_, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@@ -125,11 +126,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
}
|
||||
|
||||
By("Manually increase cluster size")
|
||||
ginkgo.By("Manually increase cluster size")
|
||||
increasedSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
increasedSizes[key] = val + 1
|
||||
@@ -139,87 +140,88 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restoring cluster size")
|
||||
ginkgo.By("Restoring cluster size")
|
||||
setMigSizes(originalSizes)
|
||||
err = framework.WaitForReadyNodes(c, numNodes, scaleDownTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #55779 is fixed.
|
||||
It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
ginkgo.It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
ginkgo.By("Restoring initial dns autoscaling parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("--- Scenario: should scale kube-dns based on changed parameters ---")
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
|
||||
ginkgo.By("--- Scenario: should scale kube-dns based on changed parameters ---")
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
|
||||
framework.ExpectNoError(err)
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
|
||||
By("Delete the ConfigMap for autoscaler")
|
||||
ginkgo.By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
|
||||
ginkgo.By("Delete the ConfigMap for autoscaler")
|
||||
err = deleteDNSScalingConfigMap(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for the ConfigMap got re-created")
|
||||
ginkgo.By("Wait for the ConfigMap got re-created")
|
||||
_, err = waitForDNSConfigMapCreated(c, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_2)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams2)))
|
||||
framework.ExpectNoError(err)
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_2)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams2)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("--- Scenario: should recover after autoscaler pod got deleted ---")
|
||||
By("Delete the autoscaler pod for kube-dns")
|
||||
ginkgo.By("--- Scenario: should recover after autoscaler pod got deleted ---")
|
||||
ginkgo.By("Delete the autoscaler pod for kube-dns")
|
||||
err = deleteDNSAutoscalerPod(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
|
||||
framework.ExpectNoError(err)
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
// DNSParamsLinear is a struct for number of DNS pods.
|
||||
type DNSParamsLinear struct {
|
||||
nodesPerReplica float64
|
||||
coresPerReplica float64
|
||||
|
||||
@@ -18,6 +18,7 @@ package autoscaling
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-autoscaling] "+text, body)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// These tests don't seem to be running properly in parallel: issue: #20338.
|
||||
@@ -37,20 +37,20 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
|
||||
SIGDescribe("[Serial] [Slow] Deployment", func() {
|
||||
// CPU tests via deployments
|
||||
It(titleUp, func() {
|
||||
ginkgo.It(titleUp, func() {
|
||||
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
ginkgo.It(titleDown, func() {
|
||||
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
|
||||
// CPU tests via ReplicaSets
|
||||
It(titleUp, func() {
|
||||
ginkgo.It(titleUp, func() {
|
||||
scaleUp("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
ginkgo.It(titleDown, func() {
|
||||
scaleDown("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
})
|
||||
@@ -58,16 +58,16 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
// These tests take ~20 minutes each.
|
||||
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
|
||||
// CPU tests via replication controllers
|
||||
It(titleUp+" and verify decision stability", func() {
|
||||
ginkgo.It(titleUp+" and verify decision stability", func() {
|
||||
scaleUp("rc", common.KindRC, true, rc, f)
|
||||
})
|
||||
It(titleDown+" and verify decision stability", func() {
|
||||
ginkgo.It(titleDown+" and verify decision stability", func() {
|
||||
scaleDown("rc", common.KindRC, true, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("ReplicationController light", func() {
|
||||
It("Should scale from 1 pod to 2 pods", func() {
|
||||
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 150,
|
||||
@@ -79,7 +79,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
}
|
||||
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||
})
|
||||
It("Should scale from 2 pods to 1 pod", func() {
|
||||
ginkgo.It("Should scale from 2 pods to 1 pod", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 2,
|
||||
totalInitialCPUUsage: 50,
|
||||
|
||||
@@ -182,7 +182,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
@@ -446,14 +446,14 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
|
||||
@@ -163,7 +163,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
|
||||
@@ -392,11 +392,11 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
|
||||
By("waiting for pod running")
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for pod to be running")
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
framework.ExpectNoError(err, "failed to delete pod")
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -476,7 +476,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
|
||||
By("waiting for pod running")
|
||||
err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for pod to be running")
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
By("creating a file in subpath")
|
||||
cmd := "touch /volume_mount/mypath/foo/test.log"
|
||||
@@ -499,11 +499,11 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
|
||||
By("waiting for annotated pod running")
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).NotTo(HaveOccurred(), "while waiting for annotated pod to be running")
|
||||
framework.ExpectNoError(err, "while waiting for annotated pod to be running")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
framework.ExpectNoError(err, "failed to delete pod")
|
||||
})
|
||||
|
||||
/*
|
||||
|
||||
@@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
@@ -162,7 +162,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
@@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
@@ -352,7 +352,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
startedPod := podClient.Create(pod)
|
||||
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
framework.ExpectNoError(err, "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
|
||||
@@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
|
||||
})
|
||||
// a timeout is acceptable, since it means we waited 5 minutes and didn't see any unwarranted node status updates
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for infrequent nodestatus update")
|
||||
framework.ExpectNoError(err, "error waiting for infrequent nodestatus update")
|
||||
}
|
||||
|
||||
By("verify node is still in ready status even though node status report is infrequent")
|
||||
|
||||
@@ -67,12 +67,12 @@ func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool)
|
||||
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
|
||||
|
||||
if expectSuccess {
|
||||
Expect(err).NotTo(HaveOccurred(), msg)
|
||||
framework.ExpectNoError(err, msg)
|
||||
// We need to clean up the dummy link that was created, as it
|
||||
// leaks out into the node level -- yuck.
|
||||
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
|
||||
c.privilegedPod, containerName, reverseCmd...)
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
framework.ExpectNoError(err,
|
||||
fmt.Sprintf("could not remove dummy1 link: %v", err))
|
||||
} else {
|
||||
Expect(err).To(HaveOccurred(), msg)
|
||||
|
||||
@@ -188,7 +188,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
|
||||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
@@ -374,14 +374,14 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
|
||||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
|
||||
@@ -163,7 +163,7 @@ var _ = Describe("[sig-storage] Projected downwardAPI", func() {
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
|
||||
@@ -382,14 +382,14 @@ var _ = Describe("[sig-storage] Projected secret", func() {
|
||||
|
||||
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
|
||||
|
||||
@@ -38,6 +38,9 @@ const (
|
||||
// PreconfiguredRuntimeHandler is the name of the runtime handler that is expected to be
|
||||
// preconfigured in the test environment.
|
||||
PreconfiguredRuntimeHandler = "test-handler"
|
||||
// DockerRuntimeHandler is a hardcoded runtime handler that is accepted by dockershim, and
|
||||
// treated equivalently to a nil runtime handler.
|
||||
DockerRuntimeHandler = "docker"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-node] RuntimeClass", func() {
|
||||
@@ -59,9 +62,12 @@ var _ = Describe("[sig-node] RuntimeClass", func() {
|
||||
// This test requires that the PreconfiguredRuntimeHandler has already been set up on nodes.
|
||||
It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func() {
|
||||
// The built-in docker runtime does not support configuring runtime handlers.
|
||||
framework.SkipIfContainerRuntimeIs("docker")
|
||||
handler := PreconfiguredRuntimeHandler
|
||||
if framework.TestContext.ContainerRuntime == "docker" {
|
||||
handler = DockerRuntimeHandler
|
||||
}
|
||||
|
||||
rcName := createRuntimeClass(f, "preconfigured-handler", PreconfiguredRuntimeHandler)
|
||||
rcName := createRuntimeClass(f, "preconfigured-handler", handler)
|
||||
pod := createRuntimeClassPod(f, rcName)
|
||||
expectPodSuccess(f, pod)
|
||||
})
|
||||
|
||||
@@ -347,14 +347,14 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
|
||||
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
|
||||
|
||||
@@ -49,7 +49,6 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// These tests need privileged containers, which are disabled by default. Run
|
||||
@@ -130,7 +129,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
|
||||
defer func() {
|
||||
volume.TestCleanup(f, config)
|
||||
err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
|
||||
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
|
||||
}()
|
||||
|
||||
tests := []volume.Test{
|
||||
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -68,15 +67,6 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
|
||||
|
||||
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
|
||||
response, err := c.SubjectAccessReviews().Create(review)
|
||||
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
|
||||
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
|
||||
// TODO: Should have a check for the provider here but that introduces too tight of
|
||||
// coupling with the `framework` package. See: https://github.com/kubernetes/kubernetes/issues/76726
|
||||
if apierrors.IsNotFound(err) {
|
||||
logf("SubjectAccessReview endpoint is missing")
|
||||
time.Sleep(1 * time.Second)
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -2,14 +2,21 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["wait.go"],
|
||||
srcs = [
|
||||
"ports.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/endpoints",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
136
test/e2e/framework/endpoints/ports.go
Normal file
136
test/e2e/framework/endpoints/ports.go
Normal file
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
This soak tests places a specified number of pods on each node and then
|
||||
repeatedly sends queries to a service running on these pods via
|
||||
a serivce
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
// ServiceStartTimeout is how long to wait for a service endpoint to be resolvable.
|
||||
const ServiceStartTimeout = 3 * time.Minute
|
||||
|
||||
// PortsByPodName is a map that maps pod name to container ports.
|
||||
type PortsByPodName map[string][]int
|
||||
|
||||
// PortsByPodUID is a map that maps pod UID to container ports.
|
||||
type PortsByPodUID map[types.UID][]int
|
||||
|
||||
// GetContainerPortsByPodUID returns a PortsByPodUID map on the given endpoints.
|
||||
func GetContainerPortsByPodUID(ep *v1.Endpoints) PortsByPodUID {
|
||||
m := PortsByPodUID{}
|
||||
for _, ss := range ep.Subsets {
|
||||
for _, port := range ss.Ports {
|
||||
for _, addr := range ss.Addresses {
|
||||
containerPort := port.Port
|
||||
if _, ok := m[addr.TargetRef.UID]; !ok {
|
||||
m[addr.TargetRef.UID] = make([]int, 0)
|
||||
}
|
||||
m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], int(containerPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func translatePodNameToUID(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) (PortsByPodUID, error) {
|
||||
portsByUID := make(PortsByPodUID)
|
||||
for name, portList := range expectedEndpoints {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
|
||||
}
|
||||
portsByUID[pod.ObjectMeta.UID] = portList
|
||||
}
|
||||
return portsByUID, nil
|
||||
}
|
||||
|
||||
func validatePorts(ep PortsByPodUID, expectedEndpoints PortsByPodUID) error {
|
||||
if len(ep) != len(expectedEndpoints) {
|
||||
// should not happen because we check this condition before
|
||||
return fmt.Errorf("invalid number of endpoints got %v, expected %v", ep, expectedEndpoints)
|
||||
}
|
||||
for podUID := range expectedEndpoints {
|
||||
if _, ok := ep[podUID]; !ok {
|
||||
return fmt.Errorf("endpoint %v not found", podUID)
|
||||
}
|
||||
if len(ep[podUID]) != len(expectedEndpoints[podUID]) {
|
||||
return fmt.Errorf("invalid list of ports for uid %v. Got %v, expected %v", podUID, ep[podUID], expectedEndpoints[podUID])
|
||||
}
|
||||
sort.Ints(ep[podUID])
|
||||
sort.Ints(expectedEndpoints[podUID])
|
||||
for index := range ep[podUID] {
|
||||
if ep[podUID][index] != expectedEndpoints[podUID][index] {
|
||||
return fmt.Errorf("invalid list of ports for uid %v. Got %v, expected %v", podUID, ep[podUID], expectedEndpoints[podUID])
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateEndpointsPorts validates that the given service exists and is served by the given expectedEndpoints.
|
||||
func ValidateEndpointsPorts(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) error {
|
||||
ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
|
||||
i := 1
|
||||
for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) {
|
||||
ep, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
|
||||
continue
|
||||
}
|
||||
portsByPodUID := GetContainerPortsByPodUID(ep)
|
||||
expectedPortsByPodUID, err := translatePodNameToUID(c, namespace, expectedEndpoints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(portsByPodUID) == len(expectedEndpoints) {
|
||||
err := validatePorts(portsByPodUID, expectedPortsByPodUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e2elog.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
|
||||
serviceName, namespace, expectedEndpoints, time.Since(start))
|
||||
return nil
|
||||
}
|
||||
if i%5 == 0 {
|
||||
e2elog.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
|
||||
}
|
||||
i++
|
||||
}
|
||||
if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
|
||||
for _, pod := range pods.Items {
|
||||
e2elog.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
|
||||
}
|
||||
} else {
|
||||
e2elog.Logf("Can't list pod debug info: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
|
||||
}
|
||||
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package metrics
|
||||
|
||||
// ClusterAutoscalerMetrics is metrics for cluster autoscaller
|
||||
// ClusterAutoscalerMetrics is metrics for cluster autoscaler
|
||||
type ClusterAutoscalerMetrics Metrics
|
||||
|
||||
// Equal returns true if all metrics are the same as the arguments.
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -1238,104 +1237,6 @@ func UpdateService(c clientset.Interface, namespace, serviceName string, update
|
||||
return service, err
|
||||
}
|
||||
|
||||
// GetContainerPortsByPodUID returns a PortsByPodUID map on the given endpoints.
|
||||
func GetContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
|
||||
m := PortsByPodUID{}
|
||||
for _, ss := range endpoints.Subsets {
|
||||
for _, port := range ss.Ports {
|
||||
for _, addr := range ss.Addresses {
|
||||
containerPort := port.Port
|
||||
if _, ok := m[addr.TargetRef.UID]; !ok {
|
||||
m[addr.TargetRef.UID] = make([]int, 0)
|
||||
}
|
||||
m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], int(containerPort))
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// PortsByPodName maps pod name to ports.
|
||||
type PortsByPodName map[string][]int
|
||||
|
||||
// PortsByPodUID maps UID to ports.
|
||||
type PortsByPodUID map[types.UID][]int
|
||||
|
||||
func translatePodNameToUIDOrFail(c clientset.Interface, ns string, expectedEndpoints PortsByPodName) PortsByPodUID {
|
||||
portsByUID := make(PortsByPodUID)
|
||||
|
||||
for name, portList := range expectedEndpoints {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
|
||||
}
|
||||
portsByUID[pod.ObjectMeta.UID] = portList
|
||||
}
|
||||
// Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns)
|
||||
return portsByUID
|
||||
}
|
||||
|
||||
func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) {
|
||||
if len(endpoints) != len(expectedEndpoints) {
|
||||
// should not happen because we check this condition before
|
||||
Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints)
|
||||
}
|
||||
for podUID := range expectedEndpoints {
|
||||
if _, ok := endpoints[podUID]; !ok {
|
||||
Failf("endpoint %v not found", podUID)
|
||||
}
|
||||
if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) {
|
||||
Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID])
|
||||
}
|
||||
sort.Ints(endpoints[podUID])
|
||||
sort.Ints(expectedEndpoints[podUID])
|
||||
for index := range endpoints[podUID] {
|
||||
if endpoints[podUID][index] != expectedEndpoints[podUID][index] {
|
||||
Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateEndpointsOrFail validates that the given service exists and is served by the given expectedEndpoints.
|
||||
func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName string, expectedEndpoints PortsByPodName) {
|
||||
ginkgo.By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", ServiceStartTimeout, serviceName, namespace, expectedEndpoints))
|
||||
i := 1
|
||||
for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) {
|
||||
endpoints, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
|
||||
continue
|
||||
}
|
||||
// Logf("Found endpoints %v", endpoints)
|
||||
|
||||
portsByPodUID := GetContainerPortsByPodUID(endpoints)
|
||||
// Logf("Found port by pod UID %v", portsByPodUID)
|
||||
|
||||
expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints)
|
||||
if len(portsByPodUID) == len(expectedEndpoints) {
|
||||
validatePortsOrFail(portsByPodUID, expectedPortsByPodUID)
|
||||
Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
|
||||
serviceName, namespace, expectedEndpoints, time.Since(start))
|
||||
return
|
||||
}
|
||||
|
||||
if i%5 == 0 {
|
||||
Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
|
||||
for _, pod := range pods.Items {
|
||||
Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
|
||||
}
|
||||
} else {
|
||||
Logf("Can't list pod debug info: %v", err)
|
||||
}
|
||||
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
|
||||
}
|
||||
|
||||
// StartServeHostnameService creates a replication controller that serves its
|
||||
// hostname and a service on top of it.
|
||||
func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
|
||||
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/endpoints:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
|
||||
@@ -40,8 +40,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/elazarl/goproxy"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
@@ -60,6 +58,7 @@ import (
|
||||
commonutils "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
@@ -67,6 +66,7 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
uexec "k8s.io/utils/exec"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@@ -1075,7 +1075,7 @@ metadata:
|
||||
})
|
||||
validateService := func(name string, servicePort int, timeout time.Duration) {
|
||||
err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
endpoints, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
|
||||
ep, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// log the real error
|
||||
e2elog.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
|
||||
@@ -1089,7 +1089,7 @@ metadata:
|
||||
return false, err
|
||||
}
|
||||
|
||||
uidToPort := framework.GetContainerPortsByPodUID(endpoints)
|
||||
uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
|
||||
if len(uidToPort) == 0 {
|
||||
e2elog.Logf("No endpoint found, retrying")
|
||||
return false, nil
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
@@ -138,7 +139,8 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
framework.ExpectNoError(err, "failed to create service with ServicePorts in namespace: %s", ns)
|
||||
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
names := map[string]bool{}
|
||||
defer func() {
|
||||
@@ -153,19 +155,23 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}})
|
||||
names[name1] = true
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{name1: {80}})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}})
|
||||
names[name2] = true
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{name1: {80}, name2: {80}})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}, name2: {80}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, name1)
|
||||
delete(names, name1)
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{name2: {80}})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name2: {80}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, name2)
|
||||
delete(names, name2)
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
})
|
||||
|
||||
/*
|
||||
@@ -206,7 +212,8 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(err, "failed to create service with ServicePorts in namespace: %s", ns)
|
||||
port1 := 100
|
||||
port2 := 101
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
names := map[string]bool{}
|
||||
defer func() {
|
||||
@@ -234,19 +241,23 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1)
|
||||
names[podname1] = true
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{podname1: {port1}})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2)
|
||||
names[podname2] = true
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{podname1: {port1}, podname2: {port2}})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}, podname2: {port2}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, podname1)
|
||||
delete(names, podname1)
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{podname2: {port2}})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname2: {port2}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, podname2)
|
||||
delete(names, podname2)
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{})
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
})
|
||||
|
||||
ginkgo.It("should preserve source pod IP for traffic thru service cluster IP", func() {
|
||||
@@ -297,7 +308,8 @@ var _ = SIGDescribe("Services", func() {
|
||||
}()
|
||||
|
||||
// Waiting for service to expose endpoint.
|
||||
framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{serverPodName: {servicePort}})
|
||||
err := e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{serverPodName: {servicePort}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
ginkgo.By("Retrieve sourceip from a pod on the same node")
|
||||
sourceIP1, execPodIP1 := execSourceipTest(f, cs, ns, node1.Name, serviceIP, servicePort)
|
||||
|
||||
@@ -347,7 +347,7 @@ func createClients(numberOfClients int) ([]clientset.Interface, []scaleclient.Sc
|
||||
|
||||
for i := 0; i < numberOfClients; i++ {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
config.QPS = 100
|
||||
config.Burst = 200
|
||||
if framework.TestContext.KubeAPIContentType != "" {
|
||||
|
||||
@@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||
// cannot be run in parallel with any other test that touches Nodes or Pods.
|
||||
// It is so because we need to have precise control on what's running in the cluster.
|
||||
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
systemPodsNo = 0
|
||||
for _, pod := range systemPods {
|
||||
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
|
||||
@@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||
}
|
||||
|
||||
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
|
||||
|
||||
@@ -58,18 +58,18 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for limitRanges")
|
||||
framework.ExpectNoError(err, "failed to query for limitRanges")
|
||||
gomega.Expect(len(limitRanges.Items)).To(gomega.Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: limitRanges.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(metav1.ListOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to set up watch")
|
||||
framework.ExpectNoError(err, "failed to set up watch")
|
||||
|
||||
ginkgo.By("Submitting a LimitRange")
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(limitRange)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying LimitRange creation was observed")
|
||||
select {
|
||||
@@ -83,37 +83,37 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
|
||||
ginkgo.By("Fetching the LimitRange to ensure it has proper values")
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
|
||||
actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
|
||||
err = equalResourceRequirement(expected, actual)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a Pod with no resource requirements")
|
||||
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
for i := range pod.Spec.Containers {
|
||||
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("Creating a Pod with partial resource requirements")
|
||||
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// This is an interesting case, so it's worth a comment
|
||||
// If you specify a Limit, and no Request, the Limit will default to the Request
|
||||
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
|
||||
@@ -123,7 +123,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
if err != nil {
|
||||
// Print the pod to help in debugging.
|
||||
e2elog.Logf("Pod %+v does not have the expected requirements", pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,19 +141,20 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
newMin := getResourceList("9m", "49Mi", "49Gi")
|
||||
limitRange.Spec.Limits[0].Min = newMin
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(limitRange)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying LimitRange updating is effective")
|
||||
gomega.Expect(wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
|
||||
err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
|
||||
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(limitRange.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
|
||||
})).NotTo(gomega.HaveOccurred())
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating a Pod with less than former min resources")
|
||||
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Failing to create a Pod with more than max resources")
|
||||
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
|
||||
@@ -162,10 +163,10 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
|
||||
ginkgo.By("Deleting a LimitRange")
|
||||
err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(limitRange.Name, metav1.NewDeleteOptions(30))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying the LimitRange was deleted")
|
||||
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": limitRange.Name}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options)
|
||||
@@ -190,12 +191,13 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
|
||||
return false, nil
|
||||
|
||||
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice")
|
||||
})
|
||||
framework.ExpectNoError(err, "kubelet never observed the termination notice")
|
||||
|
||||
ginkgo.By("Creating a Pod with more than former max resources")
|
||||
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
@@ -713,7 +713,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n
|
||||
predicate = scheduleSuccessEvent(ns, podName, "" /* any node */)
|
||||
}
|
||||
success, err := common.ObserveEventAfterAction(f, predicate, action)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(success).To(gomega.Equal(true))
|
||||
}
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("Pod should be scheduled to node that don't match the PodAntiAffinity terms", func() {
|
||||
@@ -191,7 +191,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
return node.Annotations[v1.PreferAvoidPodsAnnotationKey] == string(val)
|
||||
}
|
||||
success, err := common.ObserveNodeUpdateAfterAction(f, nodeName, predicate, action)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(success).To(gomega.Equal(true))
|
||||
|
||||
defer framework.RemoveAvoidPodsOffNode(cs, nodeName)
|
||||
@@ -202,7 +202,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
|
||||
LabelSelector: "name=scheduler-priority-avoid-pod",
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By(fmt.Sprintf("Verify the pods should not scheduled to the node: %s", nodeName))
|
||||
for _, pod := range testPods.Items {
|
||||
gomega.Expect(pod.Spec.NodeName).NotTo(gomega.Equal(nodeName))
|
||||
@@ -235,7 +235,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
|
||||
ginkgo.By("Pod should prefer scheduled to the node don't have the taint.")
|
||||
tolePod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
|
||||
ginkgo.By("Trying to apply 10 taint on the first node.")
|
||||
@@ -255,7 +255,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
|
||||
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
|
||||
tolePod, err = cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(tolePod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
})
|
||||
})
|
||||
@@ -400,7 +400,7 @@ func createRC(ns, rsName string, replicas int32, rcPodLabels map[string]string,
|
||||
},
|
||||
}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(ns).Create(rc)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
return rc
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
if zoneCount <= 0 {
|
||||
zoneCount, err = getZoneCount(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
|
||||
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
|
||||
@@ -80,7 +80,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(serviceSpec)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Now create some pods behind the service
|
||||
podSpec := &v1.Pod{
|
||||
@@ -107,11 +107,11 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||
// Wait for all of them to be scheduled
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
|
||||
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Now make sure they're spread across zones
|
||||
zoneNames, err := framework.GetClusterZones(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ func getZoneCount(c clientset.Interface) (int, error) {
|
||||
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
|
||||
ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
|
||||
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
return getZoneNameForNode(*node)
|
||||
}
|
||||
|
||||
@@ -155,7 +155,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
|
||||
continue
|
||||
}
|
||||
zoneName, err := getZoneNameForPod(c, pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
podsPerZone[zoneName] = podsPerZone[zoneName] + 1
|
||||
}
|
||||
minPodsPerZone := math.MaxInt32
|
||||
@@ -205,7 +205,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
||||
},
|
||||
},
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
// Cleanup the replication controller when we are done.
|
||||
defer func() {
|
||||
// Resize the replication controller to zero to get rid of pods.
|
||||
@@ -216,15 +216,15 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for all of them to be scheduled
|
||||
ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
|
||||
pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Now make sure they're spread across zones
|
||||
zoneNames, err := framework.GetClusterZones(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(gomega.Equal(true))
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
if zoneCount <= 0 {
|
||||
zoneCount, err = getZoneCount(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
|
||||
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
|
||||
@@ -61,17 +61,17 @@ var _ = SIGDescribe("Multi-AZ Cluster Volumes [sig-storage]", func() {
|
||||
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
|
||||
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get all the zones that the nodes are in
|
||||
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
e2elog.Logf("Expected zones: %v", expectedZones)
|
||||
|
||||
// Get all the zones in this current region
|
||||
region := gceCloud.Region()
|
||||
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var extraZone string
|
||||
for _, zone := range allZonesInRegion {
|
||||
@@ -117,13 +117,13 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
}
|
||||
|
||||
err = gceCloud.InsertInstance(project, zone, rb)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
defer func() {
|
||||
// Teardown of the compute instance
|
||||
e2elog.Logf("Deleting compute resource: %v", name)
|
||||
err := gceCloud.DeleteInstance(project, zone, name)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
|
||||
ginkgo.By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
|
||||
@@ -136,7 +136,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
for index := 1; index <= zoneCount+1; index++ {
|
||||
pvc := newNamedDefaultClaim(ns, index)
|
||||
pvc, err = framework.CreatePVC(c, ns, pvc)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
pvcList = append(pvcList, pvc)
|
||||
|
||||
// Defer the cleanup
|
||||
@@ -152,7 +152,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
// Wait for all claims bound
|
||||
for _, claim := range pvcList {
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
pvZones := sets.NewString()
|
||||
@@ -160,11 +160,12 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
for _, claim := range pvcList {
|
||||
// Get a new copy of the claim to have all fields populated
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the related PV
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain]
|
||||
gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found")
|
||||
@@ -188,7 +189,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
ns := f.Namespace.Name
|
||||
|
||||
zones, err := framework.GetClusterZones(c)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
zonelist := zones.List()
|
||||
ginkgo.By("Creating static PVs across zones")
|
||||
configs := make([]*staticPVTestConfig, podCount)
|
||||
@@ -205,14 +206,14 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
|
||||
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
|
||||
err = framework.DeletePVSource(config.pvSource)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
for i, config := range configs {
|
||||
zone := zonelist[i%len(zones)]
|
||||
config.pvSource, err = framework.CreatePVSource(zone)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
NamePrefix: "multizone-pv",
|
||||
@@ -223,7 +224,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &className}
|
||||
|
||||
config.pv, config.pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for all PVCs to be bound")
|
||||
@@ -235,13 +236,13 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
for _, config := range configs {
|
||||
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
|
||||
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for all pods to be running")
|
||||
for _, config := range configs {
|
||||
err = framework.WaitForPodRunningInNamespace(c, config.pod)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// GCEPDCSIProvisionerName is the name of GCE Persistent Disk CSI provisioner
|
||||
GCEPDCSIProvisionerName = "pd.csi.storage.gke.io"
|
||||
// GCEPDCSIZoneTopologyKey is the key of GCE Persistent Disk CSI zone topology
|
||||
GCEPDCSIZoneTopologyKey = "topology.gke.io/zone"
|
||||
)
|
||||
|
||||
|
||||
@@ -1207,7 +1207,7 @@ func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTest
|
||||
return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
|
||||
}
|
||||
|
||||
func (h *gcePdDriver) GetClaimSize() string {
|
||||
func (g *gcePdDriver) GetClaimSize() string {
|
||||
return "5Gi"
|
||||
}
|
||||
|
||||
@@ -1652,6 +1652,7 @@ var _ testsuites.TestDriver = &localDriver{}
|
||||
var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{}
|
||||
var _ testsuites.PreprovisionedPVTestDriver = &localDriver{}
|
||||
|
||||
// InitLocalDriverWithVolumeType initializes the local driver based on the volume type.
|
||||
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() testsuites.TestDriver {
|
||||
maxFileSize := defaultLocalVolumeMaxFileSize
|
||||
if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
|
||||
|
||||
@@ -512,11 +512,12 @@ func getVolumeOpCounts(c clientset.Interface, pluginName string) opCounts {
|
||||
metricsGrabber, err := metrics.NewMetricsGrabber(c, nil, true, false, true, false, false)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("Error creating metrics grabber : %v", err)
|
||||
framework.ExpectNoError(err, "Error creating metrics grabber: %v", err)
|
||||
}
|
||||
|
||||
if !metricsGrabber.HasRegisteredMaster() {
|
||||
framework.Skipf("Environment does not support getting controller-manager metrics - skipping")
|
||||
e2elog.Logf("Warning: Environment does not support getting controller-manager metrics")
|
||||
return opCounts{}
|
||||
}
|
||||
|
||||
controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
|
||||
|
||||
@@ -118,12 +118,11 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
||||
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
|
||||
l.resource = createGenericVolumeTestResource(driver, l.config, pattern)
|
||||
if l.resource.volSource == nil {
|
||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
l.intreeOps, l.migratedOps = getMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
|
||||
@@ -46,24 +46,31 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func(
|
||||
container2Name := "container2"
|
||||
container2Domain := "contoso.org"
|
||||
|
||||
containers := make([]corev1.Container, 2)
|
||||
for i, name := range []string{container1Name, container2Name} {
|
||||
containers[i] = corev1.Container{
|
||||
Name: name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}
|
||||
}
|
||||
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Annotations: map[string]string{
|
||||
"pod.alpha.windows.kubernetes.io/gmsa-credential-spec": generateDummyCredSpecs(podDomain),
|
||||
container2Name + ".container.alpha.windows.kubernetes.io/gmsa-credential-spec": generateDummyCredSpecs(container2Domain),
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: containers,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: container1Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
{
|
||||
Name: container2Name,
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
GMSACredentialSpec: generateDummyCredSpecs(container2Domain),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
WindowsOptions: &corev1.WindowsSecurityContextOptions{
|
||||
GMSACredentialSpec: generateDummyCredSpecs(podDomain),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -108,10 +115,10 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func(
|
||||
})
|
||||
})
|
||||
|
||||
func generateDummyCredSpecs(domain string) string {
|
||||
func generateDummyCredSpecs(domain string) *string {
|
||||
shortName := strings.ToUpper(strings.Split(domain, ".")[0])
|
||||
|
||||
return fmt.Sprintf(`{
|
||||
credSpecs := fmt.Sprintf(`{
|
||||
"ActiveDirectoryConfig":{
|
||||
"GroupManagedServiceAccounts":[
|
||||
{
|
||||
@@ -136,4 +143,6 @@ func generateDummyCredSpecs(domain string) string {
|
||||
"Sid":"S-1-5-21-2126729477-2524175714-3194792973"
|
||||
}
|
||||
}`, shortName, domain, domain, domain, shortName)
|
||||
|
||||
return &credSpecs
|
||||
}
|
||||
|
||||
@@ -176,7 +176,6 @@ if [ ! -z $pid ]; then
|
||||
fi
|
||||
|
||||
volume_stats_agg_period=10s
|
||||
allow_privileged=true
|
||||
serialize_image_pulls=false
|
||||
config_dir=`mktemp -d`
|
||||
file_check_frequency=10s
|
||||
@@ -184,7 +183,6 @@ pod_cidr=10.100.0.0/24
|
||||
log_level=4
|
||||
start_kubelet --kubeconfig ${KUBELET_KUBECONFIG} \
|
||||
--volume-stats-agg-period $volume_stats_agg_period \
|
||||
--allow-privileged=$allow_privileged \
|
||||
--serialize-image-pulls=$serialize_image_pulls \
|
||||
--pod-manifest-path $config_dir \
|
||||
--file-check-frequency $file_check_frequency \
|
||||
|
||||
@@ -21,6 +21,7 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -152,6 +153,8 @@ type GCEImage struct {
|
||||
// Defaults to using only the latest image. Acceptable values are [0, # of images that match the regex).
|
||||
// If the number of existing previous images is lesser than what is desired, the test will use that is available.
|
||||
PreviousImages int `json:"previous_images,omitempty"`
|
||||
// ImageFamily is the image family to use. The latest image from the image family will be used.
|
||||
ImageFamily string `json:"image_family,omitempty"`
|
||||
|
||||
Machine string `json:"machine,omitempty"`
|
||||
Resources Resources `json:"resources,omitempty"`
|
||||
@@ -229,11 +232,12 @@ func main() {
|
||||
for shortName, imageConfig := range externalImageConfig.Images {
|
||||
var images []string
|
||||
isRegex, name := false, shortName
|
||||
if imageConfig.ImageRegex != "" && imageConfig.Image == "" {
|
||||
if (imageConfig.ImageRegex != "" || imageConfig.ImageFamily != "") && imageConfig.Image == "" {
|
||||
isRegex = true
|
||||
images, err = getGCEImages(imageConfig.ImageRegex, imageConfig.Project, imageConfig.PreviousImages)
|
||||
images, err = getGCEImages(imageConfig.ImageRegex, imageConfig.ImageFamily, imageConfig.Project, imageConfig.PreviousImages)
|
||||
if err != nil {
|
||||
klog.Fatalf("Could not retrieve list of images based on image prefix %q: %v", imageConfig.ImageRegex, err)
|
||||
klog.Fatalf("Could not retrieve list of images based on image prefix %q and family %q: %v",
|
||||
imageConfig.ImageRegex, imageConfig.ImageFamily, err)
|
||||
}
|
||||
} else {
|
||||
images = []string{imageConfig.Image}
|
||||
@@ -468,26 +472,33 @@ func (a byCreationTime) Less(i, j int) bool { return a[i].creationTime.After(a[j
|
||||
func (a byCreationTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// Returns a list of image names based on regex and number of previous images requested.
|
||||
func getGCEImages(imageRegex, project string, previousImages int) ([]string, error) {
|
||||
ilc, err := computeService.Images.List(project).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list images in project %q: %v", project, err)
|
||||
}
|
||||
func getGCEImages(imageRegex, imageFamily string, project string, previousImages int) ([]string, error) {
|
||||
imageObjs := []imageObj{}
|
||||
imageRe := regexp.MustCompile(imageRegex)
|
||||
for _, instance := range ilc.Items {
|
||||
if imageRe.MatchString(instance.Name) {
|
||||
creationTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse instance creation timestamp %q: %v", instance.CreationTimestamp, err)
|
||||
if err := computeService.Images.List(project).Pages(context.Background(),
|
||||
func(ilc *compute.ImageList) error {
|
||||
for _, instance := range ilc.Items {
|
||||
if imageRegex != "" && !imageRe.MatchString(instance.Name) {
|
||||
continue
|
||||
}
|
||||
if imageFamily != "" && instance.Family != imageFamily {
|
||||
continue
|
||||
}
|
||||
creationTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse instance creation timestamp %q: %v", instance.CreationTimestamp, err)
|
||||
}
|
||||
io := imageObj{
|
||||
creationTime: creationTime,
|
||||
name: instance.Name,
|
||||
}
|
||||
klog.V(4).Infof("Found image %q based on regex %q and family %q in project %q", io.string(), imageRegex, imageFamily, project)
|
||||
imageObjs = append(imageObjs, io)
|
||||
}
|
||||
io := imageObj{
|
||||
creationTime: creationTime,
|
||||
name: instance.Name,
|
||||
}
|
||||
klog.V(4).Infof("Found image %q based on regex %q in project %q", io.string(), imageRegex, project)
|
||||
imageObjs = append(imageObjs, io)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to list images in project %q: %v", project, err)
|
||||
}
|
||||
sort.Sort(byCreationTime(imageObjs))
|
||||
images := []string{}
|
||||
@@ -547,7 +558,13 @@ func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResul
|
||||
|
||||
// Provision a gce instance using image
|
||||
func createInstance(imageConfig *internalGCEImage) (string, error) {
|
||||
klog.V(1).Infof("Creating instance %+v", *imageConfig)
|
||||
p, err := computeService.Projects.Get(*project).Do()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get project info %q", *project)
|
||||
}
|
||||
// Use default service account
|
||||
serviceAccount := p.DefaultServiceAccount
|
||||
klog.V(1).Infof("Creating instance %+v with service account %q", *imageConfig, serviceAccount)
|
||||
name := imageToInstanceName(imageConfig)
|
||||
i := &compute.Instance{
|
||||
Name: name,
|
||||
@@ -572,6 +589,14 @@ func createInstance(imageConfig *internalGCEImage) (string, error) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ServiceAccounts: []*compute.ServiceAccount{
|
||||
{
|
||||
Email: serviceAccount,
|
||||
Scopes: []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, accelerator := range imageConfig.resources.Accelerators {
|
||||
@@ -591,7 +616,6 @@ func createInstance(imageConfig *internalGCEImage) (string, error) {
|
||||
i.GuestAccelerators = append(i.GuestAccelerators, ac)
|
||||
}
|
||||
|
||||
var err error
|
||||
i.Metadata = imageConfig.metadata
|
||||
if _, err := computeService.Instances.Get(*project, *zone, i.Name).Do(); err != nil {
|
||||
op, err := computeService.Instances.Insert(*project, *zone, i).Do()
|
||||
|
||||
@@ -260,7 +260,6 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
"--kubeconfig", kubeconfigPath,
|
||||
"--root-dir", KubeletRootDirectory,
|
||||
"--v", LogVerbosityLevel, "--logtostderr",
|
||||
"--allow-privileged=true",
|
||||
)
|
||||
|
||||
// Apply test framework feature gates by default. This could also be overridden
|
||||
|
||||
@@ -45,6 +45,7 @@ filegroup(
|
||||
"//test/integration/daemonset:all-srcs",
|
||||
"//test/integration/defaulttolerationseconds:all-srcs",
|
||||
"//test/integration/deployment:all-srcs",
|
||||
"//test/integration/disruption:all-srcs",
|
||||
"//test/integration/dryrun:all-srcs",
|
||||
"//test/integration/etcd:all-srcs",
|
||||
"//test/integration/evictions:all-srcs",
|
||||
|
||||
47
test/integration/disruption/BUILD
Normal file
47
test/integration/disruption/BUILD
Normal file
@@ -0,0 +1,47 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"disruption_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
tags = ["integration"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app/testing:go_default_library",
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration/etcd:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
284
test/integration/disruption/disruption_test.go
Normal file
284
test/integration/disruption/disruption_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package disruption
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/test/integration/etcd"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*kubeapiservertesting.TestServer, *disruption.DisruptionController, informers.SharedInformerFactory, clientset.Interface, *apiextensionsclientset.Clientset, dynamic.Interface) {
|
||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins", "ServiceAccount"}, framework.SharedEtcd())
|
||||
|
||||
clientSet, err := clientset.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating clientset: %v", err)
|
||||
}
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(server.ClientConfig, "pdb-informers")), resyncPeriod)
|
||||
|
||||
client := clientset.NewForConfigOrDie(restclient.AddUserAgent(server.ClientConfig, "disruption-controller"))
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(clientSet.Discovery())
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(client.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(server.ClientConfig, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating scaleClient: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := apiextensionsclientset.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating extension clientset: %v", err)
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(server.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating dynamicClient: %v", err)
|
||||
}
|
||||
|
||||
pdbc := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informers.Core().V1().ReplicationControllers(),
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().StatefulSets(),
|
||||
client,
|
||||
mapper,
|
||||
scaleClient,
|
||||
)
|
||||
return server, pdbc, informers, clientSet, apiExtensionClient, dynamicClient
|
||||
}
|
||||
|
||||
func TestPDBWithScaleSubresource(t *testing.T) {
|
||||
s, pdbc, informers, clientSet, apiExtensionClient, dynamicClient := setup(t)
|
||||
defer s.TearDownFn()
|
||||
|
||||
nsName := "pdb-scale-subresource"
|
||||
createNs(t, nsName, clientSet)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
informers.Start(stopCh)
|
||||
go pdbc.Run(stopCh)
|
||||
defer close(stopCh)
|
||||
|
||||
crdDefinition := newCustomResourceDefinition()
|
||||
etcd.CreateTestCRDs(t, apiExtensionClient, true, crdDefinition)
|
||||
gvr := schema.GroupVersionResource{Group: crdDefinition.Spec.Group, Version: crdDefinition.Spec.Version, Resource: crdDefinition.Spec.Names.Plural}
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(nsName)
|
||||
|
||||
replicas := 4
|
||||
maxUnavailable := int32(2)
|
||||
podLabelValue := "test-crd"
|
||||
|
||||
resource := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crdDefinition.Spec.Names.Kind,
|
||||
"apiVersion": crdDefinition.Spec.Group + "/" + crdDefinition.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "resource",
|
||||
"namespace": nsName,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"replicas": replicas,
|
||||
},
|
||||
},
|
||||
}
|
||||
createdResource, err := resourceClient.Create(resource, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
trueValue := true
|
||||
ownerRef := metav1.OwnerReference{
|
||||
Name: resource.GetName(),
|
||||
Kind: crdDefinition.Spec.Names.Kind,
|
||||
APIVersion: crdDefinition.Spec.Group + "/" + crdDefinition.Spec.Version,
|
||||
UID: createdResource.GetUID(),
|
||||
Controller: &trueValue,
|
||||
}
|
||||
for i := 0; i < replicas; i++ {
|
||||
createPod(t, fmt.Sprintf("pod-%d", i), nsName, podLabelValue, clientSet, ownerRef)
|
||||
}
|
||||
|
||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4, v1.PodRunning)
|
||||
|
||||
pdb := &v1beta1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pdb",
|
||||
},
|
||||
Spec: v1beta1.PodDisruptionBudgetSpec{
|
||||
MaxUnavailable: &intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: maxUnavailable,
|
||||
},
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": podLabelValue},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Create(pdb); err != nil {
|
||||
t.Errorf("Error creating PodDisruptionBudget: %v", err)
|
||||
}
|
||||
|
||||
waitPDBStable(t, clientSet, 4, nsName, pdb.Name)
|
||||
|
||||
newPdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(nsName).Get(pdb.Name, metav1.GetOptions{})
|
||||
|
||||
if expected, found := int32(replicas), newPdb.Status.ExpectedPods; expected != found {
|
||||
t.Errorf("Expected %d, but found %d", expected, found)
|
||||
}
|
||||
if expected, found := int32(replicas)-maxUnavailable, newPdb.Status.DesiredHealthy; expected != found {
|
||||
t.Errorf("Expected %d, but found %d", expected, found)
|
||||
}
|
||||
if expected, found := maxUnavailable, newPdb.Status.PodDisruptionsAllowed; expected != found {
|
||||
t.Errorf("Expected %d, but found %d", expected, found)
|
||||
}
|
||||
}
|
||||
|
||||
func createPod(t *testing.T, name, namespace, labelValue string, clientSet clientset.Interface, ownerRef metav1.OwnerReference) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"app": labelValue},
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
ownerRef,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := clientSet.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
addPodConditionReady(pod)
|
||||
if _, err := clientSet.CoreV1().Pods(namespace).UpdateStatus(pod); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createNs(t *testing.T, name string, clientSet clientset.Interface) {
|
||||
_, err := clientSet.CoreV1().Namespaces().Create(&v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Error creating namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func addPodConditionReady(pod *v1.Pod) {
|
||||
pod.Status = v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newCustomResourceDefinition() *apiextensionsv1beta1.CustomResourceDefinition {
|
||||
return &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "crds.mygroup.example.com"},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: "mygroup.example.com",
|
||||
Version: "v1beta1",
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "crds",
|
||||
Singular: "crd",
|
||||
Kind: "Crd",
|
||||
ListKind: "CrdList",
|
||||
},
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
Subresources: &apiextensionsv1beta1.CustomResourceSubresources{
|
||||
Scale: &apiextensionsv1beta1.CustomResourceSubresourceScale{
|
||||
SpecReplicasPath: ".spec.replicas",
|
||||
StatusReplicasPath: ".status.replicas",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitPDBStable(t *testing.T, clientSet clientset.Interface, podNum int32, ns, pdbName string) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdb, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(ns).Get(pdbName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pdb.Status.CurrentHealthy != podNum {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int, phase v1.PodPhase) {
|
||||
if err := wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) {
|
||||
objects := podInformer.GetIndexer().List()
|
||||
if len(objects) != podNum {
|
||||
return false, nil
|
||||
}
|
||||
for _, obj := range objects {
|
||||
pod := obj.(*v1.Pod)
|
||||
if pod.Status.Phase != phase {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
26
test/integration/disruption/main_test.go
Normal file
26
test/integration/disruption/main_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package disruption
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
||||
@@ -22,9 +22,13 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
],
|
||||
|
||||
@@ -18,6 +18,7 @@ package evictions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"sync"
|
||||
@@ -32,9 +33,13 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
@@ -329,6 +334,17 @@ func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *disruption.D
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pdb-informers")), resyncPeriod)
|
||||
|
||||
client := clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "disruption-controller"))
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(clientSet.Discovery())
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(client.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(&config, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create scaleClient: %v", err)
|
||||
}
|
||||
|
||||
rm := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
@@ -336,7 +352,9 @@ func rmSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *disruption.D
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().StatefulSets(),
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "disruption-controller")),
|
||||
client,
|
||||
mapper,
|
||||
scaleClient,
|
||||
)
|
||||
return s, closeFn, rm, informers, clientSet
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ func TestCRD(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCRDOpenAPI(t *testing.T) {
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--feature-gates=CustomResourcePublishOpenAPI=true"}, framework.SharedEtcd())
|
||||
result := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||
defer result.TearDownFn()
|
||||
kubeclient, err := kubernetes.NewForConfig(result.ClientConfig)
|
||||
if err != nil {
|
||||
@@ -160,6 +160,7 @@ func TestCRDOpenAPI(t *testing.T) {
|
||||
},
|
||||
Validation: &apiextensionsv1beta1.CustomResourceValidation{
|
||||
OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{
|
||||
"foo": {Type: "string"},
|
||||
},
|
||||
|
||||
@@ -109,12 +109,16 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
||||
@@ -736,7 +736,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
|
||||
initDisruptionController(context)
|
||||
initDisruptionController(t, context)
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
|
||||
@@ -33,12 +33,16 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@@ -238,9 +242,19 @@ func initTestSchedulerWithOptions(
|
||||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(context *testContext) *disruption.DisruptionController {
|
||||
func initDisruptionController(t *testing.T, context *testContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(context.clientSet.Discovery())
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
|
||||
config := restclient.Config{Host: context.httpServer.URL}
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(context.clientSet.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(&config, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create scaleClient: %v", err)
|
||||
}
|
||||
|
||||
dc := disruption.NewDisruptionController(
|
||||
informers.Core().V1().Pods(),
|
||||
informers.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
@@ -248,7 +262,9 @@ func initDisruptionController(context *testContext) *disruption.DisruptionContro
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().StatefulSets(),
|
||||
context.clientSet)
|
||||
context.clientSet,
|
||||
mapper,
|
||||
scaleClient)
|
||||
|
||||
informers.Start(context.schedulerConfig.StopEverything)
|
||||
informers.WaitForCacheSync(context.schedulerConfig.StopEverything)
|
||||
|
||||
@@ -43,9 +43,6 @@ import (
|
||||
const (
|
||||
pollInterval = 100 * time.Millisecond
|
||||
pollTimeout = 60 * time.Second
|
||||
|
||||
fakeImageName = "fake-name"
|
||||
fakeImage = "fakeimage"
|
||||
)
|
||||
|
||||
func labelMap() map[string]string {
|
||||
|
||||
@@ -257,7 +257,6 @@ function load-docker-images {
|
||||
# Computes command line arguments to be passed to kubelet.
|
||||
function compute-kubelet-params {
|
||||
local params="${KUBELET_TEST_ARGS:-}"
|
||||
params+=" --allow-privileged=true"
|
||||
params+=" --cgroup-root=/"
|
||||
params+=" --cloud-provider=gce"
|
||||
params+=" --pod-manifest-path=/etc/kubernetes/manifests"
|
||||
|
||||
Reference in New Issue
Block a user