
This is the result of automatically editing source files like this: go install golang.org/x/tools/cmd/goimports@latest find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh with e2e-framework-sed.sh containing this: sed -i \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \ -e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \ -e "s/framework.AllNodes\b/e2edebug.AllNodes/" \ -e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \ -e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \ -e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \ -e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \ -e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \ -e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \ -e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \ -e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \ -e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \ -e "s/framework.EventsLister\b/e2edebug.EventsLister/" \ -e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \ -e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \ -e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \ -e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \ -e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \ -e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \ -e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \ -e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \ -e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \ -e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \ -e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \ -e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \ -e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \ -e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \ -e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \ -e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \ -e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \ -e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \ -e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \ -e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \ -e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \ -e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \ -e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \ -e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \ -e "s/framework.NodesSet\b/e2edebug.NodesSet/" \ -e "s/framework.PodClient\b/e2epod.PodClient/" \ -e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \ -e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \ -e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \ -e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \ -e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \ -e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \ -e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \ -e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \ -e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \ -e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \ -e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \ -e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \ -e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \ -e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \ -e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \ -e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \ -e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \ -e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \ -e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \ -e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \ -e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \ -e "s/framework.WorkItem\b/e2edebug.WorkItem/" \ "$@" for i in "$@"; do # Import all sub packages and let goimports figure out which of those # are redundant (= already imported) or not needed. sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i" goimports -w "$i" done
286 lines
8.0 KiB
Go
286 lines
8.0 KiB
Go
/*
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package windows
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sort"
|
|
"sync"
|
|
"time"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
|
"k8s.io/apimachinery/pkg/watch"
|
|
"k8s.io/client-go/tools/cache"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
|
admissionapi "k8s.io/pod-security-admission/api"
|
|
|
|
"github.com/onsi/ginkgo/v2"
|
|
"github.com/onsi/gomega"
|
|
)
|
|
|
|
var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() {
|
|
f := framework.NewDefaultFramework("density-test-windows")
|
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
|
|
|
ginkgo.Context("create a batch of pods", func() {
|
|
// TODO(coufon): the values are generous, set more precise limits with benchmark data
|
|
// and add more tests
|
|
dTests := []densityTest{
|
|
{
|
|
podsNr: 10,
|
|
interval: 0 * time.Millisecond,
|
|
// percentile limit of single pod startup latency
|
|
podStartupLimits: e2emetrics.LatencyMetric{
|
|
Perc50: 30 * time.Second,
|
|
Perc90: 54 * time.Second,
|
|
Perc99: 59 * time.Second,
|
|
},
|
|
// upbound of startup latency of a batch of pods
|
|
podBatchStartupLimit: 10 * time.Minute,
|
|
},
|
|
}
|
|
|
|
for _, testArg := range dTests {
|
|
itArg := testArg
|
|
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval)
|
|
ginkgo.It(desc, func() {
|
|
itArg.createMethod = "batch"
|
|
runDensityBatchTest(f, itArg)
|
|
})
|
|
}
|
|
})
|
|
|
|
})
|
|
|
|
type densityTest struct {
|
|
// number of pods
|
|
podsNr int
|
|
// interval between creating pod (rate control)
|
|
interval time.Duration
|
|
// create pods in 'batch' or 'sequence'
|
|
createMethod string
|
|
// API QPS limit
|
|
APIQPSLimit int
|
|
// performance limits
|
|
podStartupLimits e2emetrics.LatencyMetric
|
|
podBatchStartupLimit time.Duration
|
|
}
|
|
|
|
// runDensityBatchTest runs the density batch pod creation test
|
|
func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Duration, []e2emetrics.PodLatencyData) {
|
|
const (
|
|
podType = "density_test_pod"
|
|
)
|
|
var (
|
|
mutex = &sync.Mutex{}
|
|
watchTimes = make(map[string]metav1.Time)
|
|
stopCh = make(chan struct{})
|
|
)
|
|
|
|
// create test pod data structure
|
|
pods := newDensityTestPods(testArg.podsNr, false, imageutils.GetPauseImageName(), podType)
|
|
|
|
// the controller watches the change of pod status
|
|
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
|
go controller.Run(stopCh)
|
|
defer close(stopCh)
|
|
|
|
ginkgo.By("Creating a batch of pods")
|
|
// It returns a map['pod name']'creation time' containing the creation timestamps
|
|
createTimes := createBatchPodWithRateControl(f, pods, testArg.interval)
|
|
|
|
ginkgo.By("Waiting for all Pods to be observed by the watch...")
|
|
|
|
gomega.Eventually(func() bool {
|
|
return len(watchTimes) == testArg.podsNr
|
|
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
|
|
|
if len(watchTimes) < testArg.podsNr {
|
|
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
|
|
}
|
|
|
|
// Analyze results
|
|
var (
|
|
firstCreate metav1.Time
|
|
lastRunning metav1.Time
|
|
init = true
|
|
e2eLags = make([]e2emetrics.PodLatencyData, 0)
|
|
)
|
|
|
|
for name, create := range createTimes {
|
|
watch, ok := watchTimes[name]
|
|
if !ok {
|
|
framework.Failf("pod %s failed to be observed by the watch", name)
|
|
}
|
|
|
|
e2eLags = append(e2eLags,
|
|
e2emetrics.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)})
|
|
|
|
if !init {
|
|
if firstCreate.Time.After(create.Time) {
|
|
firstCreate = create
|
|
}
|
|
if lastRunning.Time.Before(watch.Time) {
|
|
lastRunning = watch
|
|
}
|
|
} else {
|
|
init = false
|
|
firstCreate, lastRunning = create, watch
|
|
}
|
|
}
|
|
|
|
sort.Sort(e2emetrics.LatencySlice(e2eLags))
|
|
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
|
|
|
deletePodsSync(f, pods)
|
|
|
|
return batchLag, e2eLags
|
|
}
|
|
|
|
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
|
|
// between creations there is an interval for throughput control
|
|
func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, interval time.Duration) map[string]metav1.Time {
|
|
createTimes := make(map[string]metav1.Time)
|
|
for _, pod := range pods {
|
|
createTimes[pod.ObjectMeta.Name] = metav1.Now()
|
|
go e2epod.NewPodClient(f).Create(pod)
|
|
time.Sleep(interval)
|
|
}
|
|
return createTimes
|
|
}
|
|
|
|
// newInformerWatchPod creates an informer to check whether all pods are running.
|
|
func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller {
|
|
ns := f.Namespace.Name
|
|
checkPodRunning := func(p *v1.Pod) {
|
|
mutex.Lock()
|
|
defer mutex.Unlock()
|
|
defer ginkgo.GinkgoRecover()
|
|
|
|
if p.Status.Phase == v1.PodRunning {
|
|
if _, found := watchTimes[p.Name]; !found {
|
|
watchTimes[p.Name] = metav1.Now()
|
|
}
|
|
}
|
|
}
|
|
|
|
_, controller := cache.NewInformer(
|
|
&cache.ListWatch{
|
|
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
|
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
|
obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options)
|
|
return runtime.Object(obj), err
|
|
},
|
|
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
|
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String()
|
|
return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
|
},
|
|
},
|
|
&v1.Pod{},
|
|
0,
|
|
cache.ResourceEventHandlerFuncs{
|
|
AddFunc: func(obj interface{}) {
|
|
p, ok := obj.(*v1.Pod)
|
|
if !ok {
|
|
framework.Failf("expected Pod, got %T", obj)
|
|
}
|
|
go checkPodRunning(p)
|
|
},
|
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
|
p, ok := newObj.(*v1.Pod)
|
|
if !ok {
|
|
framework.Failf("expected Pod, got %T", newObj)
|
|
}
|
|
go checkPodRunning(p)
|
|
},
|
|
},
|
|
)
|
|
return controller
|
|
}
|
|
|
|
// newDensityTestPods creates a list of pods (specification) for test.
|
|
func newDensityTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod {
|
|
var pods []*v1.Pod
|
|
|
|
for i := 0; i < numPods; i++ {
|
|
|
|
podName := "test-" + string(uuid.NewUUID())
|
|
pod := v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: podName,
|
|
Labels: map[string]string{
|
|
"type": podType,
|
|
"name": podName,
|
|
},
|
|
},
|
|
Spec: v1.PodSpec{
|
|
// Restart policy is always (default).
|
|
Containers: []v1.Container{
|
|
{
|
|
Image: imageName,
|
|
Name: podName,
|
|
},
|
|
},
|
|
NodeSelector: map[string]string{
|
|
"kubernetes.io/os": "windows",
|
|
},
|
|
},
|
|
}
|
|
|
|
if volume {
|
|
pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
|
|
{MountPath: "/test-volume-mnt", Name: podName + "-volume"},
|
|
}
|
|
pod.Spec.Volumes = []v1.Volume{
|
|
{Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
|
}
|
|
}
|
|
|
|
pods = append(pods, &pod)
|
|
}
|
|
|
|
return pods
|
|
}
|
|
|
|
// deletePodsSync deletes a list of pods and block until pods disappear.
|
|
func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
|
var wg sync.WaitGroup
|
|
for _, pod := range pods {
|
|
wg.Add(1)
|
|
go func(pod *v1.Pod) {
|
|
defer ginkgo.GinkgoRecover()
|
|
defer wg.Done()
|
|
|
|
err := e2epod.NewPodClient(f).Delete(context.TODO(), pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
|
framework.ExpectNoError(err)
|
|
|
|
err = e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
|
|
30*time.Second, 10*time.Minute)
|
|
framework.ExpectNoError(err)
|
|
}(pod)
|
|
}
|
|
wg.Wait()
|
|
}
|