Merge pull request #122419 from pohly/scheduler-perf-create-any
scheduler-perf: create any object from YAML
This commit is contained in:
@@ -1,4 +1,4 @@
|
|||||||
apiVersion: resource.k8s.io/v1alpha1
|
apiVersion: resource.k8s.io/v1alpha2
|
||||||
kind: ResourceClaimTemplate
|
kind: ResourceClaimTemplate
|
||||||
metadata:
|
metadata:
|
||||||
name: test-claim-template
|
name: test-claim-template
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
apiVersion: resource.k8s.io/v1alpha1
|
apiVersion: resource.k8s.io/v1alpha2
|
||||||
kind: ResourceClass
|
kind: ResourceClass
|
||||||
metadata:
|
metadata:
|
||||||
name: test-class
|
name: test-class
|
||||||
|
@@ -729,16 +729,16 @@
|
|||||||
driverName: test-driver.cdi.k8s.io
|
driverName: test-driver.cdi.k8s.io
|
||||||
nodes: scheduler-perf-dra-*
|
nodes: scheduler-perf-dra-*
|
||||||
maxClaimsPerNodeParam: $maxClaimsPerNode
|
maxClaimsPerNodeParam: $maxClaimsPerNode
|
||||||
- opcode: createResourceClass
|
- opcode: createAny
|
||||||
templatePath: config/dra/resourceclass.yaml
|
templatePath: config/dra/resourceclass.yaml
|
||||||
- opcode: createResourceClaimTemplate
|
- opcode: createAny
|
||||||
templatePath: config/dra/resourceclaimtemplate.yaml
|
templatePath: config/dra/resourceclaimtemplate.yaml
|
||||||
namespace: init
|
namespace: init
|
||||||
- opcode: createPods
|
- opcode: createPods
|
||||||
namespace: init
|
namespace: init
|
||||||
countParam: $initPods
|
countParam: $initPods
|
||||||
podTemplatePath: config/dra/pod-with-claim-template.yaml
|
podTemplatePath: config/dra/pod-with-claim-template.yaml
|
||||||
- opcode: createResourceClaimTemplate
|
- opcode: createAny
|
||||||
templatePath: config/dra/resourceclaimtemplate.yaml
|
templatePath: config/dra/resourceclaimtemplate.yaml
|
||||||
namespace: test
|
namespace: test
|
||||||
- opcode: createPods
|
- opcode: createPods
|
||||||
@@ -799,24 +799,24 @@
|
|||||||
driverName: another-test-driver.cdi.k8s.io
|
driverName: another-test-driver.cdi.k8s.io
|
||||||
nodes: scheduler-perf-dra-*
|
nodes: scheduler-perf-dra-*
|
||||||
maxClaimsPerNodeParam: $maxClaimsPerNode
|
maxClaimsPerNodeParam: $maxClaimsPerNode
|
||||||
- opcode: createResourceClass
|
- opcode: createAny
|
||||||
templatePath: config/dra/resourceclass.yaml
|
templatePath: config/dra/resourceclass.yaml
|
||||||
- opcode: createResourceClass
|
- opcode: createAny
|
||||||
templatePath: config/dra/another-resourceclass.yaml
|
templatePath: config/dra/another-resourceclass.yaml
|
||||||
- opcode: createResourceClaimTemplate
|
- opcode: createAny
|
||||||
templatePath: config/dra/resourceclaimtemplate.yaml
|
templatePath: config/dra/resourceclaimtemplate.yaml
|
||||||
namespace: init
|
namespace: init
|
||||||
- opcode: createResourceClaimTemplate
|
- opcode: createAny
|
||||||
templatePath: config/dra/another-resourceclaimtemplate.yaml
|
templatePath: config/dra/another-resourceclaimtemplate.yaml
|
||||||
namespace: init
|
namespace: init
|
||||||
- opcode: createPods
|
- opcode: createPods
|
||||||
namespace: init
|
namespace: init
|
||||||
countParam: $initPods
|
countParam: $initPods
|
||||||
podTemplatePath: config/dra/pod-with-many-claim-templates.yaml
|
podTemplatePath: config/dra/pod-with-many-claim-templates.yaml
|
||||||
- opcode: createResourceClaimTemplate
|
- opcode: createAny
|
||||||
templatePath: config/dra/resourceclaimtemplate.yaml
|
templatePath: config/dra/resourceclaimtemplate.yaml
|
||||||
namespace: test
|
namespace: test
|
||||||
- opcode: createResourceClaimTemplate
|
- opcode: createAny
|
||||||
templatePath: config/dra/another-resourceclaimtemplate.yaml
|
templatePath: config/dra/another-resourceclaimtemplate.yaml
|
||||||
namespace: test
|
namespace: test
|
||||||
- opcode: createPods
|
- opcode: createPods
|
||||||
|
@@ -19,17 +19,22 @@ package benchmark
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/client-go/discovery/cached/memory"
|
||||||
|
"k8s.io/client-go/restmapper"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
// createOp defines an op where some object gets created from a template.
|
// createAny defines an op where some object gets created from a YAML file.
|
||||||
// Everything specific for that object (create call, op code, names) gets
|
// The nameset can be specified.
|
||||||
// provided through a type.
|
type createAny struct {
|
||||||
type createOp[T interface{}, P createOpType[T]] struct {
|
// Must match createAnyOpcode.
|
||||||
// Must match createOpType.Opcode().
|
|
||||||
Opcode operationCode
|
Opcode operationCode
|
||||||
// Namespace the object should be created in. Must be empty for cluster-scoped objects.
|
// Namespace the object should be created in. Must be empty for cluster-scoped objects.
|
||||||
Namespace string
|
Namespace string
|
||||||
@@ -37,53 +42,85 @@ type createOp[T interface{}, P createOpType[T]] struct {
|
|||||||
TemplatePath string
|
TemplatePath string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cro *createOp[T, P]) isValid(allowParameterization bool) error {
|
var _ runnableOp = &createAny{}
|
||||||
var p P
|
|
||||||
if cro.Opcode != p.Opcode() {
|
func (c *createAny) isValid(allowParameterization bool) error {
|
||||||
return fmt.Errorf("invalid opcode %q; expected %q", cro.Opcode, p.Opcode())
|
if c.Opcode != createAnyOpcode {
|
||||||
|
return fmt.Errorf("invalid opcode %q; expected %q", c.Opcode, createAnyOpcode)
|
||||||
}
|
}
|
||||||
if p.Namespaced() && cro.Namespace == "" {
|
if c.TemplatePath == "" {
|
||||||
return fmt.Errorf("Namespace must be set")
|
|
||||||
}
|
|
||||||
if !p.Namespaced() && cro.Namespace != "" {
|
|
||||||
return fmt.Errorf("Namespace must not be set")
|
|
||||||
}
|
|
||||||
if cro.TemplatePath == "" {
|
|
||||||
return fmt.Errorf("TemplatePath must be set")
|
return fmt.Errorf("TemplatePath must be set")
|
||||||
}
|
}
|
||||||
|
// The namespace can only be checked during later because we don't know yet
|
||||||
|
// whether the object is namespaced or cluster-scoped.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cro *createOp[T, P]) collectsMetrics() bool {
|
func (c *createAny) collectsMetrics() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cro *createOp[T, P]) patchParams(w *workload) (realOp, error) {
|
func (c *createAny) patchParams(w *workload) (realOp, error) {
|
||||||
return cro, cro.isValid(false)
|
return c, c.isValid(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cro *createOp[T, P]) requiredNamespaces() []string {
|
func (c *createAny) requiredNamespaces() []string {
|
||||||
if cro.Namespace == "" {
|
if c.Namespace == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return []string{cro.Namespace}
|
return []string{c.Namespace}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cro *createOp[T, P]) run(ctx context.Context, tb testing.TB, client clientset.Interface) {
|
func (c *createAny) run(tCtx ktesting.TContext) {
|
||||||
var obj *T
|
var obj *unstructured.Unstructured
|
||||||
var p P
|
if err := getSpecFromFile(&c.TemplatePath, &obj); err != nil {
|
||||||
if err := getSpecFromFile(&cro.TemplatePath, &obj); err != nil {
|
tCtx.Fatalf("%s: parsing failed: %v", c.TemplatePath, err)
|
||||||
tb.Fatalf("parsing %s %q: %v", p.Name(), cro.TemplatePath, err)
|
|
||||||
}
|
}
|
||||||
if _, err := p.CreateCall(client, cro.Namespace)(ctx, obj, metav1.CreateOptions{}); err != nil {
|
|
||||||
tb.Fatalf("create %s: %v", p.Name(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createOpType provides type-specific values for the generic createOp.
|
// Not caching the discovery result isn't very efficient, but good enough when
|
||||||
type createOpType[T interface{}] interface {
|
// createAny isn't done often.
|
||||||
Opcode() operationCode
|
discoveryCache := memory.NewMemCacheClient(tCtx.Client().Discovery())
|
||||||
Name() string
|
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryCache)
|
||||||
Namespaced() bool
|
gv, err := schema.ParseGroupVersion(obj.GetAPIVersion())
|
||||||
CreateCall(client clientset.Interface, namespace string) func(context.Context, *T, metav1.CreateOptions) (*T, error)
|
if err != nil {
|
||||||
|
tCtx.Fatalf("%s: extract group+version from object %q: %v", c.TemplatePath, klog.KObj(obj), err)
|
||||||
|
}
|
||||||
|
gk := schema.GroupKind{Group: gv.Group, Kind: obj.GetKind()}
|
||||||
|
|
||||||
|
create := func() error {
|
||||||
|
mapping, err := restMapper.RESTMapping(gk, gv.Version)
|
||||||
|
if err != nil {
|
||||||
|
// Cached mapping might be stale, refresh on next try.
|
||||||
|
restMapper.Reset()
|
||||||
|
return fmt.Errorf("map %q to resource: %v", gk, err)
|
||||||
|
}
|
||||||
|
resourceClient := tCtx.Dynamic().Resource(mapping.Resource)
|
||||||
|
|
||||||
|
if c.Namespace != "" {
|
||||||
|
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
|
||||||
|
return fmt.Errorf("namespace %q set for %q, but %q has scope %q", c.Namespace, c.TemplatePath, gk, mapping.Scope.Name())
|
||||||
|
}
|
||||||
|
_, err = resourceClient.Namespace(c.Namespace).Create(tCtx, obj, metav1.CreateOptions{})
|
||||||
|
} else {
|
||||||
|
if mapping.Scope.Name() != meta.RESTScopeNameRoot {
|
||||||
|
return fmt.Errorf("namespace not set for %q, but %q has scope %q", c.TemplatePath, gk, mapping.Scope.Name())
|
||||||
|
}
|
||||||
|
_, err = resourceClient.Create(tCtx, obj, metav1.CreateOptions{})
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Retry, some errors (like CRD just created and type not ready for use yet) are temporary.
|
||||||
|
ctx, cancel := context.WithTimeout(tCtx, 20*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
for {
|
||||||
|
err := create()
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
tCtx.Fatalf("%s: timed out (%q) while creating %q, last error was: %v", c.TemplatePath, context.Cause(ctx), klog.KObj(obj), err)
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -21,14 +21,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
|
||||||
|
|
||||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
draapp "k8s.io/kubernetes/test/e2e/dra/test-driver/app"
|
draapp "k8s.io/kubernetes/test/e2e/dra/test-driver/app"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
// createResourceClaimsOp defines an op where resource claims are created.
|
// createResourceClaimsOp defines an op where resource claims are created.
|
||||||
@@ -82,18 +81,18 @@ func (op *createResourceClaimsOp) requiredNamespaces() []string {
|
|||||||
return []string{op.Namespace}
|
return []string{op.Namespace}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (op *createResourceClaimsOp) run(ctx context.Context, tb testing.TB, clientset clientset.Interface) {
|
func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
|
||||||
tb.Logf("creating %d claims in namespace %q", op.Count, op.Namespace)
|
tCtx.Logf("creating %d claims in namespace %q", op.Count, op.Namespace)
|
||||||
|
|
||||||
var claimTemplate *resourcev1alpha2.ResourceClaim
|
var claimTemplate *resourcev1alpha2.ResourceClaim
|
||||||
if err := getSpecFromFile(&op.TemplatePath, &claimTemplate); err != nil {
|
if err := getSpecFromFile(&op.TemplatePath, &claimTemplate); err != nil {
|
||||||
tb.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err)
|
tCtx.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err)
|
||||||
}
|
}
|
||||||
var createErr error
|
var createErr error
|
||||||
var mutex sync.Mutex
|
var mutex sync.Mutex
|
||||||
create := func(i int) {
|
create := func(i int) {
|
||||||
err := func() error {
|
err := func() error {
|
||||||
if _, err := clientset.ResourceV1alpha2().ResourceClaims(op.Namespace).Create(ctx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
|
if _, err := tCtx.Client().ResourceV1alpha2().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
|
||||||
return fmt.Errorf("create claim: %v", err)
|
return fmt.Errorf("create claim: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -109,34 +108,12 @@ func (op *createResourceClaimsOp) run(ctx context.Context, tb testing.TB, client
|
|||||||
if workers > 30 {
|
if workers > 30 {
|
||||||
workers = 30
|
workers = 30
|
||||||
}
|
}
|
||||||
workqueue.ParallelizeUntil(ctx, workers, op.Count, create)
|
workqueue.ParallelizeUntil(tCtx, workers, op.Count, create)
|
||||||
if createErr != nil {
|
if createErr != nil {
|
||||||
tb.Fatal(createErr.Error())
|
tCtx.Fatal(createErr.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// createResourceClassOpType customizes createOp for creating a ResourceClass.
|
|
||||||
type createResourceClassOpType struct{}
|
|
||||||
|
|
||||||
func (c createResourceClassOpType) Opcode() operationCode { return createResourceClassOpcode }
|
|
||||||
func (c createResourceClassOpType) Name() string { return "ResourceClass" }
|
|
||||||
func (c createResourceClassOpType) Namespaced() bool { return false }
|
|
||||||
func (c createResourceClassOpType) CreateCall(client clientset.Interface, namespace string) func(context.Context, *resourcev1alpha2.ResourceClass, metav1.CreateOptions) (*resourcev1alpha2.ResourceClass, error) {
|
|
||||||
return client.ResourceV1alpha2().ResourceClasses().Create
|
|
||||||
}
|
|
||||||
|
|
||||||
// createResourceClassOpType customizes createOp for creating a ResourceClaim.
|
|
||||||
type createResourceClaimTemplateOpType struct{}
|
|
||||||
|
|
||||||
func (c createResourceClaimTemplateOpType) Opcode() operationCode {
|
|
||||||
return createResourceClaimTemplateOpcode
|
|
||||||
}
|
|
||||||
func (c createResourceClaimTemplateOpType) Name() string { return "ResourceClaimTemplate" }
|
|
||||||
func (c createResourceClaimTemplateOpType) Namespaced() bool { return true }
|
|
||||||
func (c createResourceClaimTemplateOpType) CreateCall(client clientset.Interface, namespace string) func(context.Context, *resourcev1alpha2.ResourceClaimTemplate, metav1.CreateOptions) (*resourcev1alpha2.ResourceClaimTemplate, error) {
|
|
||||||
return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Create
|
|
||||||
}
|
|
||||||
|
|
||||||
// createResourceDriverOp defines an op where resource claims are created.
|
// createResourceDriverOp defines an op where resource claims are created.
|
||||||
type createResourceDriverOp struct {
|
type createResourceDriverOp struct {
|
||||||
// Must be createResourceDriverOpcode.
|
// Must be createResourceDriverOpcode.
|
||||||
@@ -186,8 +163,8 @@ func (op *createResourceDriverOp) patchParams(w *workload) (realOp, error) {
|
|||||||
|
|
||||||
func (op *createResourceDriverOp) requiredNamespaces() []string { return nil }
|
func (op *createResourceDriverOp) requiredNamespaces() []string { return nil }
|
||||||
|
|
||||||
func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, clientset clientset.Interface) {
|
func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||||
tb.Logf("creating resource driver %q for nodes matching %q", op.DriverName, op.Nodes)
|
tCtx.Logf("creating resource driver %q for nodes matching %q", op.DriverName, op.Nodes)
|
||||||
|
|
||||||
// Start the controller side of the DRA test driver such that it simulates
|
// Start the controller side of the DRA test driver such that it simulates
|
||||||
// per-node resources.
|
// per-node resources.
|
||||||
@@ -197,22 +174,22 @@ func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, client
|
|||||||
MaxAllocations: op.MaxClaimsPerNode,
|
MaxAllocations: op.MaxClaimsPerNode,
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
nodes, err := tCtx.Client().CoreV1().Nodes().List(tCtx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("list nodes: %v", err)
|
tCtx.Fatalf("list nodes: %v", err)
|
||||||
}
|
}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
match, err := filepath.Match(op.Nodes, node.Name)
|
match, err := filepath.Match(op.Nodes, node.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("matching glob pattern %q against node name %q: %v", op.Nodes, node.Name, err)
|
tCtx.Fatalf("matching glob pattern %q against node name %q: %v", op.Nodes, node.Name, err)
|
||||||
}
|
}
|
||||||
if match {
|
if match {
|
||||||
resources.Nodes = append(resources.Nodes, node.Name)
|
resources.Nodes = append(resources.Nodes, node.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
controller := draapp.NewController(clientset, resources)
|
controller := draapp.NewController(tCtx.Client(), resources)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(tCtx)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -220,11 +197,11 @@ func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, client
|
|||||||
ctx := klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), op.DriverName))
|
ctx := klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), op.DriverName))
|
||||||
controller.Run(ctx, 5 /* workers */)
|
controller.Run(ctx, 5 /* workers */)
|
||||||
}()
|
}()
|
||||||
tb.Cleanup(func() {
|
tCtx.Cleanup(func() {
|
||||||
tb.Logf("stopping resource driver %q", op.DriverName)
|
tCtx.Logf("stopping resource driver %q", op.DriverName)
|
||||||
// We must cancel before waiting.
|
// We must cancel before waiting.
|
||||||
cancel()
|
cancel()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
tb.Logf("stopped resource driver %q", op.DriverName)
|
tCtx.Logf("stopped resource driver %q", op.DriverName)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -33,7 +33,6 @@ import (
|
|||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -59,23 +58,23 @@ import (
|
|||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
"k8s.io/kubernetes/test/utils/ktesting"
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting/initoption"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
type operationCode string
|
type operationCode string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
createNodesOpcode operationCode = "createNodes"
|
createAnyOpcode operationCode = "createAny"
|
||||||
createNamespacesOpcode operationCode = "createNamespaces"
|
createNodesOpcode operationCode = "createNodes"
|
||||||
createPodsOpcode operationCode = "createPods"
|
createNamespacesOpcode operationCode = "createNamespaces"
|
||||||
createPodSetsOpcode operationCode = "createPodSets"
|
createPodsOpcode operationCode = "createPods"
|
||||||
createResourceClaimsOpcode operationCode = "createResourceClaims"
|
createPodSetsOpcode operationCode = "createPodSets"
|
||||||
createResourceClaimTemplateOpcode operationCode = "createResourceClaimTemplate"
|
createResourceClaimsOpcode operationCode = "createResourceClaims"
|
||||||
createResourceClassOpcode operationCode = "createResourceClass"
|
createResourceDriverOpcode operationCode = "createResourceDriver"
|
||||||
createResourceDriverOpcode operationCode = "createResourceDriver"
|
churnOpcode operationCode = "churn"
|
||||||
churnOpcode operationCode = "churn"
|
barrierOpcode operationCode = "barrier"
|
||||||
barrierOpcode operationCode = "barrier"
|
sleepOpcode operationCode = "sleep"
|
||||||
sleepOpcode operationCode = "sleep"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -238,13 +237,12 @@ type op struct {
|
|||||||
// which op we're decoding at runtime.
|
// which op we're decoding at runtime.
|
||||||
func (op *op) UnmarshalJSON(b []byte) error {
|
func (op *op) UnmarshalJSON(b []byte) error {
|
||||||
possibleOps := []realOp{
|
possibleOps := []realOp{
|
||||||
|
&createAny{},
|
||||||
&createNodesOp{},
|
&createNodesOp{},
|
||||||
&createNamespacesOp{},
|
&createNamespacesOp{},
|
||||||
&createPodsOp{},
|
&createPodsOp{},
|
||||||
&createPodSetsOp{},
|
&createPodSetsOp{},
|
||||||
&createResourceClaimsOp{},
|
&createResourceClaimsOp{},
|
||||||
&createOp[resourcev1alpha2.ResourceClaimTemplate, createResourceClaimTemplateOpType]{},
|
|
||||||
&createOp[resourcev1alpha2.ResourceClass, createResourceClassOpType]{},
|
|
||||||
&createResourceDriverOp{},
|
&createResourceDriverOp{},
|
||||||
&churnOp{},
|
&churnOp{},
|
||||||
&barrierOp{},
|
&barrierOp{},
|
||||||
@@ -293,7 +291,7 @@ type runnableOp interface {
|
|||||||
// before running the operation.
|
// before running the operation.
|
||||||
requiredNamespaces() []string
|
requiredNamespaces() []string
|
||||||
// run executes the steps provided by the operation.
|
// run executes the steps provided by the operation.
|
||||||
run(context.Context, testing.TB, clientset.Interface)
|
run(ktesting.TContext)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isValidParameterizable(val string) bool {
|
func isValidParameterizable(val string) bool {
|
||||||
@@ -674,6 +672,7 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
|
|||||||
if !enabled(*perfSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
|
if !enabled(*perfSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
|
||||||
b.Skipf("disabled by label filter %q", *perfSchedulingLabelFilter)
|
b.Skipf("disabled by label filter %q", *perfSchedulingLabelFilter)
|
||||||
}
|
}
|
||||||
|
tCtx := ktesting.Init(b, initoption.PerTestOutput(*useTestingLog))
|
||||||
|
|
||||||
// Ensure that there are no leaked
|
// Ensure that there are no leaked
|
||||||
// goroutines. They could influence
|
// goroutines. They could influence
|
||||||
@@ -684,28 +683,19 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
|
|||||||
// quit *before* restoring klog settings.
|
// quit *before* restoring klog settings.
|
||||||
framework.GoleakCheck(b)
|
framework.GoleakCheck(b)
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
if *useTestingLog {
|
|
||||||
// In addition to redirection klog
|
|
||||||
// output, also enable contextual
|
|
||||||
// logging.
|
|
||||||
_, ctx = ktesting.NewTestContext(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that we are ready to run, start
|
// Now that we are ready to run, start
|
||||||
// etcd.
|
// etcd.
|
||||||
framework.StartEtcd(b, output)
|
framework.StartEtcd(b, output)
|
||||||
|
|
||||||
// 30 minutes should be plenty enough even for the 5000-node tests.
|
// 30 minutes should be plenty enough even for the 5000-node tests.
|
||||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Minute)
|
timeout := 30 * time.Minute
|
||||||
b.Cleanup(cancel)
|
tCtx = ktesting.WithTimeout(tCtx, timeout, fmt.Sprintf("timed out after the %s per-test timeout", timeout))
|
||||||
|
|
||||||
for feature, flag := range tc.FeatureGates {
|
for feature, flag := range tc.FeatureGates {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
|
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
|
||||||
}
|
}
|
||||||
informerFactory, client, dyncClient := setupClusterForWorkload(ctx, b, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry)
|
informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry)
|
||||||
results := runWorkload(ctx, b, tc, w, informerFactory, client, dyncClient, false)
|
results := runWorkload(tCtx, tc, w, informerFactory, false)
|
||||||
dataItems.DataItems = append(dataItems.DataItems, results...)
|
dataItems.DataItems = append(dataItems.DataItems, results...)
|
||||||
|
|
||||||
if len(results) > 0 {
|
if len(results) > 0 {
|
||||||
@@ -771,7 +761,7 @@ func loadSchedulerConfig(file string) (*config.KubeSchedulerConfiguration, error
|
|||||||
return nil, fmt.Errorf("couldn't decode as KubeSchedulerConfiguration, got %s: ", gvk)
|
return nil, fmt.Errorf("couldn't decode as KubeSchedulerConfiguration, got %s: ", gvk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func unrollWorkloadTemplate(tb testing.TB, wt []op, w *workload) []op {
|
func unrollWorkloadTemplate(tb ktesting.TB, wt []op, w *workload) []op {
|
||||||
var unrolled []op
|
var unrolled []op
|
||||||
for opIndex, o := range wt {
|
for opIndex, o := range wt {
|
||||||
realOp, err := o.realOp.patchParams(w)
|
realOp, err := o.realOp.patchParams(w)
|
||||||
@@ -794,23 +784,23 @@ func unrollWorkloadTemplate(tb testing.TB, wt []op, w *workload) []op {
|
|||||||
return unrolled
|
return unrolled
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupClusterForWorkload(ctx context.Context, tb testing.TB, configPath string, featureGates map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
|
func setupClusterForWorkload(tCtx ktesting.TContext, configPath string, featureGates map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) {
|
||||||
var cfg *config.KubeSchedulerConfiguration
|
var cfg *config.KubeSchedulerConfiguration
|
||||||
var err error
|
var err error
|
||||||
if configPath != "" {
|
if configPath != "" {
|
||||||
cfg, err = loadSchedulerConfig(configPath)
|
cfg, err = loadSchedulerConfig(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("error loading scheduler config file: %v", err)
|
tCtx.Fatalf("error loading scheduler config file: %v", err)
|
||||||
}
|
}
|
||||||
if err = validation.ValidateKubeSchedulerConfiguration(cfg); err != nil {
|
if err = validation.ValidateKubeSchedulerConfiguration(cfg); err != nil {
|
||||||
tb.Fatalf("validate scheduler config file failed: %v", err)
|
tCtx.Fatalf("validate scheduler config file failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return mustSetupCluster(ctx, tb, cfg, featureGates, outOfTreePluginRegistry)
|
return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, client clientset.Interface, dynClient dynamic.Interface, cleanup bool) []DataItem {
|
func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, cleanup bool) []DataItem {
|
||||||
b, benchmarking := tb.(*testing.B)
|
b, benchmarking := tCtx.TB().(*testing.B)
|
||||||
if benchmarking {
|
if benchmarking {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
b.Cleanup(func() {
|
b.Cleanup(func() {
|
||||||
@@ -839,10 +829,10 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
podInformer := informerFactory.Core().V1().Pods()
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
|
|
||||||
// Everything else started by this function gets stopped before it returns.
|
// Everything else started by this function gets stopped before it returns.
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
tCtx = ktesting.WithCancel(tCtx)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
defer wg.Wait()
|
defer wg.Wait()
|
||||||
defer cancel()
|
defer tCtx.Cancel("workload is done")
|
||||||
|
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var dataItems []DataItem
|
var dataItems []DataItem
|
||||||
@@ -853,48 +843,48 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
|
|
||||||
if cleanup {
|
if cleanup {
|
||||||
// This must run before controllers get shut down.
|
// This must run before controllers get shut down.
|
||||||
defer cleanupWorkload(ctx, tb, tc, client, numPodsScheduledPerNamespace)
|
defer cleanupWorkload(tCtx, tc, numPodsScheduledPerNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
for opIndex, op := range unrollWorkloadTemplate(tb, tc.WorkloadTemplate, w) {
|
for opIndex, op := range unrollWorkloadTemplate(tCtx, tc.WorkloadTemplate, w) {
|
||||||
realOp, err := op.realOp.patchParams(w)
|
realOp, err := op.realOp.patchParams(w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-tCtx.Done():
|
||||||
tb.Fatalf("op %d: %v", opIndex, ctx.Err())
|
tCtx.Fatalf("op %d: %v", opIndex, context.Cause(tCtx))
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
switch concreteOp := realOp.(type) {
|
switch concreteOp := realOp.(type) {
|
||||||
case *createNodesOp:
|
case *createNodesOp:
|
||||||
nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), concreteOp, client)
|
nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), concreteOp, tCtx.Client())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
if err := nodePreparer.PrepareNodes(ctx, nextNodeIndex); err != nil {
|
if err := nodePreparer.PrepareNodes(tCtx, nextNodeIndex); err != nil {
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
if cleanup {
|
if cleanup {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := nodePreparer.CleanupNodes(ctx); err != nil {
|
if err := nodePreparer.CleanupNodes(tCtx); err != nil {
|
||||||
tb.Fatalf("failed to clean up nodes, error: %v", err)
|
tCtx.Fatalf("failed to clean up nodes, error: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
nextNodeIndex += concreteOp.Count
|
nextNodeIndex += concreteOp.Count
|
||||||
|
|
||||||
case *createNamespacesOp:
|
case *createNamespacesOp:
|
||||||
nsPreparer, err := newNamespacePreparer(concreteOp, client, tb)
|
nsPreparer, err := newNamespacePreparer(tCtx, concreteOp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
if err := nsPreparer.prepare(ctx); err != nil {
|
if err := nsPreparer.prepare(tCtx); err != nil {
|
||||||
err2 := nsPreparer.cleanup(ctx)
|
err2 := nsPreparer.cleanup(tCtx)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
err = fmt.Errorf("prepare: %v; cleanup: %v", err, err2)
|
err = fmt.Errorf("prepare: %v; cleanup: %v", err, err2)
|
||||||
}
|
}
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
for _, n := range nsPreparer.namespaces() {
|
for _, n := range nsPreparer.namespaces() {
|
||||||
if _, ok := numPodsScheduledPerNamespace[n]; ok {
|
if _, ok := numPodsScheduledPerNamespace[n]; ok {
|
||||||
@@ -911,7 +901,7 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
if concreteOp.Namespace != nil {
|
if concreteOp.Namespace != nil {
|
||||||
namespace = *concreteOp.Namespace
|
namespace = *concreteOp.Namespace
|
||||||
}
|
}
|
||||||
createNamespaceIfNotPresent(ctx, tb, client, namespace, &numPodsScheduledPerNamespace)
|
createNamespaceIfNotPresent(tCtx, namespace, &numPodsScheduledPerNamespace)
|
||||||
if concreteOp.PodTemplatePath == nil {
|
if concreteOp.PodTemplatePath == nil {
|
||||||
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
|
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
|
||||||
}
|
}
|
||||||
@@ -919,18 +909,17 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
// This needs a separate context and wait group because
|
// This needs a separate context and wait group because
|
||||||
// the code below needs to be sure that the goroutines
|
// the code below needs to be sure that the goroutines
|
||||||
// are stopped.
|
// are stopped.
|
||||||
var collectorCtx context.Context
|
var collectorCtx ktesting.TContext
|
||||||
var collectorCancel func()
|
|
||||||
var collectorWG sync.WaitGroup
|
var collectorWG sync.WaitGroup
|
||||||
defer collectorWG.Wait()
|
defer collectorWG.Wait()
|
||||||
|
|
||||||
if concreteOp.CollectMetrics {
|
if concreteOp.CollectMetrics {
|
||||||
collectorCtx, collectorCancel = context.WithCancel(ctx)
|
collectorCtx = ktesting.WithCancel(tCtx)
|
||||||
defer collectorCancel()
|
defer collectorCtx.Cancel("cleaning up")
|
||||||
name := tb.Name()
|
name := tCtx.Name()
|
||||||
// The first part is the same for each work load, therefore we can strip it.
|
// The first part is the same for each work load, therefore we can strip it.
|
||||||
name = name[strings.Index(name, "/")+1:]
|
name = name[strings.Index(name, "/")+1:]
|
||||||
collectors = getTestDataCollectors(tb, podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
|
collectors = getTestDataCollectors(collectorCtx, podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
|
||||||
for _, collector := range collectors {
|
for _, collector := range collectors {
|
||||||
// Need loop-local variable for function below.
|
// Need loop-local variable for function below.
|
||||||
collector := collector
|
collector := collector
|
||||||
@@ -941,23 +930,23 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := createPods(ctx, tb, namespace, concreteOp, client); err != nil {
|
if err := createPods(tCtx, namespace, concreteOp); err != nil {
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
if concreteOp.SkipWaitToCompletion {
|
if concreteOp.SkipWaitToCompletion {
|
||||||
// Only record those namespaces that may potentially require barriers
|
// Only record those namespaces that may potentially require barriers
|
||||||
// in the future.
|
// in the future.
|
||||||
numPodsScheduledPerNamespace[namespace] += concreteOp.Count
|
numPodsScheduledPerNamespace[namespace] += concreteOp.Count
|
||||||
} else {
|
} else {
|
||||||
if err := waitUntilPodsScheduledInNamespace(ctx, tb, podInformer, namespace, concreteOp.Count); err != nil {
|
if err := waitUntilPodsScheduledInNamespace(tCtx, podInformer, namespace, concreteOp.Count); err != nil {
|
||||||
tb.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err)
|
tCtx.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if concreteOp.CollectMetrics {
|
if concreteOp.CollectMetrics {
|
||||||
// CollectMetrics and SkipWaitToCompletion can never be true at the
|
// CollectMetrics and SkipWaitToCompletion can never be true at the
|
||||||
// same time, so if we're here, it means that all pods have been
|
// same time, so if we're here, it means that all pods have been
|
||||||
// scheduled.
|
// scheduled.
|
||||||
collectorCancel()
|
collectorCtx.Cancel("collecting metrix, collector must stop first")
|
||||||
collectorWG.Wait()
|
collectorWG.Wait()
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
for _, collector := range collectors {
|
for _, collector := range collectors {
|
||||||
@@ -980,11 +969,11 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
} else {
|
} else {
|
||||||
namespace = fmt.Sprintf("namespace-%d", opIndex)
|
namespace = fmt.Sprintf("namespace-%d", opIndex)
|
||||||
}
|
}
|
||||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(client.Discovery()))
|
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(tCtx.Client().Discovery()))
|
||||||
// Ensure the namespace exists.
|
// Ensure the namespace exists.
|
||||||
nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
|
||||||
if _, err := client.CoreV1().Namespaces().Create(ctx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
if _, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
|
||||||
tb.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err)
|
tCtx.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var churnFns []func(name string) string
|
var churnFns []func(name string) string
|
||||||
@@ -992,31 +981,31 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
for i, path := range concreteOp.TemplatePaths {
|
for i, path := range concreteOp.TemplatePaths {
|
||||||
unstructuredObj, gvk, err := getUnstructuredFromFile(path)
|
unstructuredObj, gvk, err := getUnstructuredFromFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err)
|
tCtx.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err)
|
||||||
}
|
}
|
||||||
// Obtain GVR.
|
// Obtain GVR.
|
||||||
mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err)
|
tCtx.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err)
|
||||||
}
|
}
|
||||||
gvr := mapping.Resource
|
gvr := mapping.Resource
|
||||||
// Distinguish cluster-scoped with namespaced API objects.
|
// Distinguish cluster-scoped with namespaced API objects.
|
||||||
var dynRes dynamic.ResourceInterface
|
var dynRes dynamic.ResourceInterface
|
||||||
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
|
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
|
||||||
dynRes = dynClient.Resource(gvr).Namespace(namespace)
|
dynRes = tCtx.Dynamic().Resource(gvr).Namespace(namespace)
|
||||||
} else {
|
} else {
|
||||||
dynRes = dynClient.Resource(gvr)
|
dynRes = tCtx.Dynamic().Resource(gvr)
|
||||||
}
|
}
|
||||||
|
|
||||||
churnFns = append(churnFns, func(name string) string {
|
churnFns = append(churnFns, func(name string) string {
|
||||||
if name != "" {
|
if name != "" {
|
||||||
if err := dynRes.Delete(ctx, name, metav1.DeleteOptions{}); err != nil {
|
if err := dynRes.Delete(tCtx, name, metav1.DeleteOptions{}); err != nil {
|
||||||
tb.Errorf("op %d: unable to delete %v: %v", opIndex, name, err)
|
tCtx.Errorf("op %d: unable to delete %v: %v", opIndex, name, err)
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
live, err := dynRes.Create(ctx, unstructuredObj, metav1.CreateOptions{})
|
live, err := dynRes.Create(tCtx, unstructuredObj, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@@ -1047,7 +1036,7 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
churnFns[i]("")
|
churnFns[i]("")
|
||||||
}
|
}
|
||||||
count++
|
count++
|
||||||
case <-ctx.Done():
|
case <-tCtx.Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1070,7 +1059,7 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
retVals[i][count%concreteOp.Number] = churnFns[i](retVals[i][count%concreteOp.Number])
|
retVals[i][count%concreteOp.Number] = churnFns[i](retVals[i][count%concreteOp.Number])
|
||||||
}
|
}
|
||||||
count++
|
count++
|
||||||
case <-ctx.Done():
|
case <-tCtx.Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1080,11 +1069,11 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
case *barrierOp:
|
case *barrierOp:
|
||||||
for _, namespace := range concreteOp.Namespaces {
|
for _, namespace := range concreteOp.Namespaces {
|
||||||
if _, ok := numPodsScheduledPerNamespace[namespace]; !ok {
|
if _, ok := numPodsScheduledPerNamespace[namespace]; !ok {
|
||||||
tb.Fatalf("op %d: unknown namespace %s", opIndex, namespace)
|
tCtx.Fatalf("op %d: unknown namespace %s", opIndex, namespace)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := waitUntilPodsScheduled(ctx, tb, podInformer, concreteOp.Namespaces, numPodsScheduledPerNamespace); err != nil {
|
if err := waitUntilPodsScheduled(tCtx, podInformer, concreteOp.Namespaces, numPodsScheduledPerNamespace); err != nil {
|
||||||
tb.Fatalf("op %d: %v", opIndex, err)
|
tCtx.Fatalf("op %d: %v", opIndex, err)
|
||||||
}
|
}
|
||||||
// At the end of the barrier, we can be sure that there are no pods
|
// At the end of the barrier, we can be sure that there are no pods
|
||||||
// pending scheduling in the namespaces that we just blocked on.
|
// pending scheduling in the namespaces that we just blocked on.
|
||||||
@@ -1098,25 +1087,25 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
|
|
||||||
case *sleepOp:
|
case *sleepOp:
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-tCtx.Done():
|
||||||
case <-time.After(concreteOp.Duration):
|
case <-time.After(concreteOp.Duration):
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
runable, ok := concreteOp.(runnableOp)
|
runable, ok := concreteOp.(runnableOp)
|
||||||
if !ok {
|
if !ok {
|
||||||
tb.Fatalf("op %d: invalid op %v", opIndex, concreteOp)
|
tCtx.Fatalf("op %d: invalid op %v", opIndex, concreteOp)
|
||||||
}
|
}
|
||||||
for _, namespace := range runable.requiredNamespaces() {
|
for _, namespace := range runable.requiredNamespaces() {
|
||||||
createNamespaceIfNotPresent(ctx, tb, client, namespace, &numPodsScheduledPerNamespace)
|
createNamespaceIfNotPresent(tCtx, namespace, &numPodsScheduledPerNamespace)
|
||||||
}
|
}
|
||||||
runable.run(ctx, tb, client)
|
runable.run(tCtx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check unused params and inform users
|
// check unused params and inform users
|
||||||
unusedParams := w.unusedParams()
|
unusedParams := w.unusedParams()
|
||||||
if len(unusedParams) != 0 {
|
if len(unusedParams) != 0 {
|
||||||
tb.Fatalf("the parameters %v are defined on workload %s, but unused.\nPlease make sure there are no typos.", unusedParams, w.Name)
|
tCtx.Fatalf("the parameters %v are defined on workload %s, but unused.\nPlease make sure there are no typos.", unusedParams, w.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some tests have unschedulable pods. Do not add an implicit barrier at the
|
// Some tests have unschedulable pods. Do not add an implicit barrier at the
|
||||||
@@ -1133,17 +1122,17 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload,
|
|||||||
//
|
//
|
||||||
// Calling cleanupWorkload can be skipped if it is known that the next workload
|
// Calling cleanupWorkload can be skipped if it is known that the next workload
|
||||||
// will run with a fresh etcd instance.
|
// will run with a fresh etcd instance.
|
||||||
func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client clientset.Interface, numPodsScheduledPerNamespace map[string]int) {
|
func cleanupWorkload(tCtx ktesting.TContext, tc *testCase, numPodsScheduledPerNamespace map[string]int) {
|
||||||
deleteNow := *metav1.NewDeleteOptions(0)
|
deleteNow := *metav1.NewDeleteOptions(0)
|
||||||
for namespace := range numPodsScheduledPerNamespace {
|
for namespace := range numPodsScheduledPerNamespace {
|
||||||
// Pods have to be deleted explicitly, with no grace period. Normally
|
// Pods have to be deleted explicitly, with no grace period. Normally
|
||||||
// kubelet will set the DeletionGracePeriodSeconds to zero when it's okay
|
// kubelet will set the DeletionGracePeriodSeconds to zero when it's okay
|
||||||
// to remove a deleted pod, but we don't run kubelet...
|
// to remove a deleted pod, but we don't run kubelet...
|
||||||
if err := client.CoreV1().Pods(namespace).DeleteCollection(ctx, deleteNow, metav1.ListOptions{}); err != nil {
|
if err := tCtx.Client().CoreV1().Pods(namespace).DeleteCollection(tCtx, deleteNow, metav1.ListOptions{}); err != nil {
|
||||||
tb.Fatalf("failed to delete pods in namespace %q: %v", namespace, err)
|
tCtx.Fatalf("failed to delete pods in namespace %q: %v", namespace, err)
|
||||||
}
|
}
|
||||||
if err := client.CoreV1().Namespaces().Delete(ctx, namespace, deleteNow); err != nil {
|
if err := tCtx.Client().CoreV1().Namespaces().Delete(tCtx, namespace, deleteNow); err != nil {
|
||||||
tb.Fatalf("Deleting Namespace %q in numPodsScheduledPerNamespace: %v", namespace, err)
|
tCtx.Fatalf("Deleting Namespace %q in numPodsScheduledPerNamespace: %v", namespace, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1151,8 +1140,8 @@ func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client cl
|
|||||||
// actually removing a namespace can take some time (garbage collecting
|
// actually removing a namespace can take some time (garbage collecting
|
||||||
// other generated object like secrets, etc.) and we don't want to
|
// other generated object like secrets, etc.) and we don't want to
|
||||||
// start the next workloads while that cleanup is still going on.
|
// start the next workloads while that cleanup is still going on.
|
||||||
if err := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
|
if err := wait.PollUntilContextTimeout(tCtx, time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||||
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
namespaces, err := tCtx.Client().CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -1165,33 +1154,33 @@ func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client cl
|
|||||||
// All namespaces gone.
|
// All namespaces gone.
|
||||||
return true, nil
|
return true, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
tb.Fatalf("failed while waiting for namespace removal: %v", err)
|
tCtx.Fatalf("failed while waiting for namespace removal: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNamespaceIfNotPresent(ctx context.Context, tb testing.TB, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
|
func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsPerNamespace *map[string]int) {
|
||||||
if _, ok := (*podsPerNamespace)[namespace]; !ok {
|
if _, ok := (*podsPerNamespace)[namespace]; !ok {
|
||||||
// The namespace has not created yet.
|
// The namespace has not created yet.
|
||||||
// So, create that and register it.
|
// So, create that and register it.
|
||||||
_, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
_, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("failed to create namespace for Pod: %v", namespace)
|
tCtx.Fatalf("failed to create namespace for Pod: %v", namespace)
|
||||||
}
|
}
|
||||||
(*podsPerNamespace)[namespace] = 0
|
(*podsPerNamespace)[namespace] = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type testDataCollector interface {
|
type testDataCollector interface {
|
||||||
run(ctx context.Context)
|
run(tCtx ktesting.TContext)
|
||||||
collect() []DataItem
|
collect() []DataItem
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestDataCollectors(tb testing.TB, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
|
func getTestDataCollectors(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
|
||||||
if mcc == nil {
|
if mcc == nil {
|
||||||
mcc = &defaultMetricsCollectorConfig
|
mcc = &defaultMetricsCollectorConfig
|
||||||
}
|
}
|
||||||
return []testDataCollector{
|
return []testDataCollector{
|
||||||
newThroughputCollector(tb, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
|
newThroughputCollector(tCtx, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
|
||||||
newMetricsCollector(mcc, map[string]string{"Name": name}),
|
newMetricsCollector(mcc, map[string]string{"Name": name}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1224,25 +1213,25 @@ func getNodePreparer(prefix string, cno *createNodesOp, clientset clientset.Inte
|
|||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPods(ctx context.Context, tb testing.TB, namespace string, cpo *createPodsOp, clientset clientset.Interface) error {
|
func createPods(tCtx ktesting.TContext, namespace string, cpo *createPodsOp) error {
|
||||||
strategy, err := getPodStrategy(cpo)
|
strategy, err := getPodStrategy(cpo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tb.Logf("creating %d pods in namespace %q", cpo.Count, namespace)
|
tCtx.Logf("creating %d pods in namespace %q", cpo.Count, namespace)
|
||||||
config := testutils.NewTestPodCreatorConfig()
|
config := testutils.NewTestPodCreatorConfig()
|
||||||
config.AddStrategy(namespace, cpo.Count, strategy)
|
config.AddStrategy(namespace, cpo.Count, strategy)
|
||||||
podCreator := testutils.NewTestPodCreator(clientset, config)
|
podCreator := testutils.NewTestPodCreator(tCtx.Client(), config)
|
||||||
return podCreator.CreatePods(ctx)
|
return podCreator.CreatePods(tCtx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitUntilPodsScheduledInNamespace blocks until all pods in the given
|
// waitUntilPodsScheduledInNamespace blocks until all pods in the given
|
||||||
// namespace are scheduled. Times out after 10 minutes because even at the
|
// namespace are scheduled. Times out after 10 minutes because even at the
|
||||||
// lowest observed QPS of ~10 pods/sec, a 5000-node test should complete.
|
// lowest observed QPS of ~10 pods/sec, a 5000-node test should complete.
|
||||||
func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespace string, wantCount int) error {
|
func waitUntilPodsScheduledInNamespace(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, namespace string, wantCount int) error {
|
||||||
var pendingPod *v1.Pod
|
var pendingPod *v1.Pod
|
||||||
|
|
||||||
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) {
|
err := wait.PollUntilContextTimeout(tCtx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return true, ctx.Err()
|
return true, ctx.Err()
|
||||||
@@ -1253,10 +1242,10 @@ func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podIn
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if len(scheduled) >= wantCount {
|
if len(scheduled) >= wantCount {
|
||||||
tb.Logf("scheduling succeed")
|
tCtx.Logf("scheduling succeed")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
tb.Logf("namespace: %s, pods: want %d, got %d", namespace, wantCount, len(scheduled))
|
tCtx.Logf("namespace: %s, pods: want %d, got %d", namespace, wantCount, len(scheduled))
|
||||||
if len(unscheduled) > 0 {
|
if len(unscheduled) > 0 {
|
||||||
pendingPod = unscheduled[0]
|
pendingPod = unscheduled[0]
|
||||||
} else {
|
} else {
|
||||||
@@ -1273,7 +1262,7 @@ func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podIn
|
|||||||
|
|
||||||
// waitUntilPodsScheduled blocks until the all pods in the given namespaces are
|
// waitUntilPodsScheduled blocks until the all pods in the given namespaces are
|
||||||
// scheduled.
|
// scheduled.
|
||||||
func waitUntilPodsScheduled(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespaces []string, numPodsScheduledPerNamespace map[string]int) error {
|
func waitUntilPodsScheduled(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, namespaces []string, numPodsScheduledPerNamespace map[string]int) error {
|
||||||
// If unspecified, default to all known namespaces.
|
// If unspecified, default to all known namespaces.
|
||||||
if len(namespaces) == 0 {
|
if len(namespaces) == 0 {
|
||||||
for namespace := range numPodsScheduledPerNamespace {
|
for namespace := range numPodsScheduledPerNamespace {
|
||||||
@@ -1282,15 +1271,15 @@ func waitUntilPodsScheduled(ctx context.Context, tb testing.TB, podInformer core
|
|||||||
}
|
}
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-tCtx.Done():
|
||||||
return ctx.Err()
|
return context.Cause(tCtx)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
wantCount, ok := numPodsScheduledPerNamespace[namespace]
|
wantCount, ok := numPodsScheduledPerNamespace[namespace]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("unknown namespace %s", namespace)
|
return fmt.Errorf("unknown namespace %s", namespace)
|
||||||
}
|
}
|
||||||
if err := waitUntilPodsScheduledInNamespace(ctx, tb, podInformer, namespace, wantCount); err != nil {
|
if err := waitUntilPodsScheduledInNamespace(tCtx, podInformer, namespace, wantCount); err != nil {
|
||||||
return fmt.Errorf("error waiting for pods in namespace %q: %w", namespace, err)
|
return fmt.Errorf("error waiting for pods in namespace %q: %w", namespace, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1440,14 +1429,12 @@ func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.Pe
|
|||||||
|
|
||||||
// namespacePreparer holds configuration information for the test namespace preparer.
|
// namespacePreparer holds configuration information for the test namespace preparer.
|
||||||
type namespacePreparer struct {
|
type namespacePreparer struct {
|
||||||
client clientset.Interface
|
|
||||||
count int
|
count int
|
||||||
prefix string
|
prefix string
|
||||||
spec *v1.Namespace
|
spec *v1.Namespace
|
||||||
tb testing.TB
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface, tb testing.TB) (*namespacePreparer, error) {
|
func newNamespacePreparer(tCtx ktesting.TContext, cno *createNamespacesOp) (*namespacePreparer, error) {
|
||||||
ns := &v1.Namespace{}
|
ns := &v1.Namespace{}
|
||||||
if cno.NamespaceTemplatePath != nil {
|
if cno.NamespaceTemplatePath != nil {
|
||||||
if err := getSpecFromFile(cno.NamespaceTemplatePath, ns); err != nil {
|
if err := getSpecFromFile(cno.NamespaceTemplatePath, ns); err != nil {
|
||||||
@@ -1456,11 +1443,9 @@ func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &namespacePreparer{
|
return &namespacePreparer{
|
||||||
client: clientset,
|
|
||||||
count: cno.Count,
|
count: cno.Count,
|
||||||
prefix: cno.Prefix,
|
prefix: cno.Prefix,
|
||||||
spec: ns,
|
spec: ns,
|
||||||
tb: tb,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1474,17 +1459,17 @@ func (p *namespacePreparer) namespaces() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// prepare creates the namespaces.
|
// prepare creates the namespaces.
|
||||||
func (p *namespacePreparer) prepare(ctx context.Context) error {
|
func (p *namespacePreparer) prepare(tCtx ktesting.TContext) error {
|
||||||
base := &v1.Namespace{}
|
base := &v1.Namespace{}
|
||||||
if p.spec != nil {
|
if p.spec != nil {
|
||||||
base = p.spec
|
base = p.spec
|
||||||
}
|
}
|
||||||
p.tb.Logf("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base)
|
tCtx.Logf("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base)
|
||||||
for i := 0; i < p.count; i++ {
|
for i := 0; i < p.count; i++ {
|
||||||
n := base.DeepCopy()
|
n := base.DeepCopy()
|
||||||
n.Name = fmt.Sprintf("%s-%d", p.prefix, i)
|
n.Name = fmt.Sprintf("%s-%d", p.prefix, i)
|
||||||
if err := testutils.RetryWithExponentialBackOff(func() (bool, error) {
|
if err := testutils.RetryWithExponentialBackOff(func() (bool, error) {
|
||||||
_, err := p.client.CoreV1().Namespaces().Create(ctx, n, metav1.CreateOptions{})
|
_, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, n, metav1.CreateOptions{})
|
||||||
return err == nil || apierrors.IsAlreadyExists(err), nil
|
return err == nil || apierrors.IsAlreadyExists(err), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1494,12 +1479,12 @@ func (p *namespacePreparer) prepare(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cleanup deletes existing test namespaces.
|
// cleanup deletes existing test namespaces.
|
||||||
func (p *namespacePreparer) cleanup(ctx context.Context) error {
|
func (p *namespacePreparer) cleanup(tCtx ktesting.TContext) error {
|
||||||
var errRet error
|
var errRet error
|
||||||
for i := 0; i < p.count; i++ {
|
for i := 0; i < p.count; i++ {
|
||||||
n := fmt.Sprintf("%s-%d", p.prefix, i)
|
n := fmt.Sprintf("%s-%d", p.prefix, i)
|
||||||
if err := p.client.CoreV1().Namespaces().Delete(ctx, n, metav1.DeleteOptions{}); err != nil {
|
if err := tCtx.Client().CoreV1().Namespaces().Delete(tCtx, n, metav1.DeleteOptions{}); err != nil {
|
||||||
p.tb.Errorf("Deleting Namespace: %v", err)
|
tCtx.Errorf("Deleting Namespace: %v", err)
|
||||||
errRet = err
|
errRet = err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -17,7 +17,6 @@ limitations under the License.
|
|||||||
package benchmark
|
package benchmark
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@@ -70,16 +69,15 @@ func TestScheduling(t *testing.T) {
|
|||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
// Not a sub test because we don't have a good name for it.
|
// Not a sub test because we don't have a good name for it.
|
||||||
func() {
|
func() {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.Init(t)
|
||||||
|
|
||||||
// No timeout here because the `go test -timeout` will ensure that
|
// No timeout here because the `go test -timeout` will ensure that
|
||||||
// the test doesn't get stuck forever.
|
// the test doesn't get stuck forever.
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
for feature, flag := range config.featureGates {
|
for feature, flag := range config.featureGates {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, feature, flag)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, feature, flag)()
|
||||||
}
|
}
|
||||||
informerFactory, client, dynClient := setupClusterForWorkload(ctx, t, config.schedulerConfigPath, config.featureGates, nil)
|
informerFactory, tCtx := setupClusterForWorkload(tCtx, config.schedulerConfigPath, config.featureGates, nil)
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
if !config.equals(tc) {
|
if !config.equals(tc) {
|
||||||
@@ -93,8 +91,8 @@ func TestScheduling(t *testing.T) {
|
|||||||
if !enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
|
if !enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
|
||||||
t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
|
t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
|
||||||
}
|
}
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
tCtx := ktesting.WithTB(tCtx, t)
|
||||||
runWorkload(ctx, t, tc, w, informerFactory, client, dynClient, true)
|
runWorkload(tCtx, tc, w, informerFactory, true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@@ -18,7 +18,6 @@ package benchmark
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -26,26 +25,22 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
cliflag "k8s.io/component-base/cli/flag"
|
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
"k8s.io/component-base/metrics/testutil"
|
"k8s.io/component-base/metrics/testutil"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1"
|
kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1"
|
||||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||||
@@ -53,6 +48,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
"k8s.io/kubernetes/test/integration/util"
|
"k8s.io/kubernetes/test/integration/util"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -83,33 +79,34 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
|
|||||||
// remove resources after finished.
|
// remove resources after finished.
|
||||||
// Notes on rate limiter:
|
// Notes on rate limiter:
|
||||||
// - client rate limit is set to 5000.
|
// - client rate limit is set to 5000.
|
||||||
func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
|
func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) {
|
||||||
// Run API server with minimimal logging by default. Can be raised with -v.
|
// Run API server with minimimal logging by default. Can be raised with -v.
|
||||||
framework.MinVerbosity = 0
|
framework.MinVerbosity = 0
|
||||||
|
|
||||||
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, tb, framework.TestServerSetup{
|
// No alpha APIs (overrides api/all=true in https://github.com/kubernetes/kubernetes/blob/d647d19f6aef811bace300eec96a67644ff303d4/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go#L136),
|
||||||
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
// except for DRA API group when needed.
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
runtimeConfig := []string{"api/alpha=false"}
|
||||||
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority"}
|
if enabledFeatures[features.DynamicResourceAllocation] {
|
||||||
|
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha2=true")
|
||||||
// Enable DRA API group.
|
}
|
||||||
if enabledFeatures[features.DynamicResourceAllocation] {
|
customFlags := []string{
|
||||||
opts.APIEnablement.RuntimeConfig = cliflag.ConfigurationMap{
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
resourcev1alpha2.SchemeGroupVersion.String(): "true",
|
"--disable-admission-plugins=ServiceAccount,TaintNodesByCondition,Priority",
|
||||||
}
|
"--runtime-config=" + strings.Join(runtimeConfig, ","),
|
||||||
}
|
}
|
||||||
},
|
server, err := apiservertesting.StartTestServer(tCtx, apiservertesting.NewDefaultTestServerOptions(), customFlags, framework.SharedEtcd())
|
||||||
})
|
if err != nil {
|
||||||
tb.Cleanup(tearDownFn)
|
tCtx.Fatalf("start apiserver: %v", err)
|
||||||
|
}
|
||||||
|
tCtx.Cleanup(server.TearDownFn)
|
||||||
|
|
||||||
// Cleanup will be in reverse order: first the clients get cancelled,
|
// Cleanup will be in reverse order: first the clients get cancelled,
|
||||||
// then the apiserver is torn down.
|
// then the apiserver is torn down via the automatic cancelation of
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
// tCtx.
|
||||||
tb.Cleanup(cancel)
|
|
||||||
|
|
||||||
// TODO: client connection configuration, such as QPS or Burst is configurable in theory, this could be derived from the `config`, need to
|
// TODO: client connection configuration, such as QPS or Burst is configurable in theory, this could be derived from the `config`, need to
|
||||||
// support this when there is any testcase that depends on such configuration.
|
// support this when there is any testcase that depends on such configuration.
|
||||||
cfg := restclient.CopyConfig(kubeConfig)
|
cfg := restclient.CopyConfig(server.ClientConfig)
|
||||||
cfg.QPS = 5000.0
|
cfg.QPS = 5000.0
|
||||||
cfg.Burst = 5000
|
cfg.Burst = 5000
|
||||||
|
|
||||||
@@ -118,34 +115,33 @@ func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSch
|
|||||||
var err error
|
var err error
|
||||||
config, err = newDefaultComponentConfig()
|
config, err = newDefaultComponentConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tb.Fatalf("Error creating default component config: %v", err)
|
tCtx.Fatalf("Error creating default component config: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(cfg)
|
tCtx = ktesting.WithRESTConfig(tCtx, cfg)
|
||||||
dynClient := dynamic.NewForConfigOrDie(cfg)
|
|
||||||
|
|
||||||
// Not all config options will be effective but only those mostly related with scheduler performance will
|
// Not all config options will be effective but only those mostly related with scheduler performance will
|
||||||
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
|
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
|
||||||
_, informerFactory := util.StartScheduler(ctx, client, cfg, config, outOfTreePluginRegistry)
|
_, informerFactory := util.StartScheduler(tCtx, tCtx.Client(), cfg, config, outOfTreePluginRegistry)
|
||||||
util.StartFakePVController(ctx, client, informerFactory)
|
util.StartFakePVController(tCtx, tCtx.Client(), informerFactory)
|
||||||
runGC := util.CreateGCController(ctx, tb, *cfg, informerFactory)
|
runGC := util.CreateGCController(tCtx, tCtx, *cfg, informerFactory)
|
||||||
runNS := util.CreateNamespaceController(ctx, tb, *cfg, informerFactory)
|
runNS := util.CreateNamespaceController(tCtx, tCtx, *cfg, informerFactory)
|
||||||
|
|
||||||
runResourceClaimController := func() {}
|
runResourceClaimController := func() {}
|
||||||
if enabledFeatures[features.DynamicResourceAllocation] {
|
if enabledFeatures[features.DynamicResourceAllocation] {
|
||||||
// Testing of DRA with inline resource claims depends on this
|
// Testing of DRA with inline resource claims depends on this
|
||||||
// controller for creating and removing ResourceClaims.
|
// controller for creating and removing ResourceClaims.
|
||||||
runResourceClaimController = util.CreateResourceClaimController(ctx, tb, client, informerFactory)
|
runResourceClaimController = util.CreateResourceClaimController(tCtx, tCtx, tCtx.Client(), informerFactory)
|
||||||
}
|
}
|
||||||
|
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(tCtx.Done())
|
||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(tCtx.Done())
|
||||||
go runGC()
|
go runGC()
|
||||||
go runNS()
|
go runNS()
|
||||||
go runResourceClaimController()
|
go runResourceClaimController()
|
||||||
|
|
||||||
return informerFactory, client, dynClient
|
return informerFactory, tCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the list of scheduled and unscheduled pods in the specified namespaces.
|
// Returns the list of scheduled and unscheduled pods in the specified namespaces.
|
||||||
@@ -268,7 +264,7 @@ func newMetricsCollector(config *metricsCollectorConfig, labels map[string]strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*metricsCollector) run(ctx context.Context) {
|
func (*metricsCollector) run(tCtx ktesting.TContext) {
|
||||||
// metricCollector doesn't need to start before the tests, so nothing to do here.
|
// metricCollector doesn't need to start before the tests, so nothing to do here.
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -342,7 +338,6 @@ func collectHistogramVec(metric string, labels map[string]string, lvMap map[stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
type throughputCollector struct {
|
type throughputCollector struct {
|
||||||
tb testing.TB
|
|
||||||
podInformer coreinformers.PodInformer
|
podInformer coreinformers.PodInformer
|
||||||
schedulingThroughputs []float64
|
schedulingThroughputs []float64
|
||||||
labels map[string]string
|
labels map[string]string
|
||||||
@@ -350,9 +345,8 @@ type throughputCollector struct {
|
|||||||
errorMargin float64
|
errorMargin float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newThroughputCollector(tb testing.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
|
func newThroughputCollector(tb ktesting.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
|
||||||
return &throughputCollector{
|
return &throughputCollector{
|
||||||
tb: tb,
|
|
||||||
podInformer: podInformer,
|
podInformer: podInformer,
|
||||||
labels: labels,
|
labels: labels,
|
||||||
namespaces: namespaces,
|
namespaces: namespaces,
|
||||||
@@ -360,7 +354,7 @@ func newThroughputCollector(tb testing.TB, podInformer coreinformers.PodInformer
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *throughputCollector) run(ctx context.Context) {
|
func (tc *throughputCollector) run(tCtx ktesting.TContext) {
|
||||||
podsScheduled, _, err := getScheduledPods(tc.podInformer, tc.namespaces...)
|
podsScheduled, _, err := getScheduledPods(tc.podInformer, tc.namespaces...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("%v", err)
|
klog.Fatalf("%v", err)
|
||||||
@@ -374,7 +368,7 @@ func (tc *throughputCollector) run(ctx context.Context) {
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-tCtx.Done():
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
@@ -419,7 +413,7 @@ func (tc *throughputCollector) run(ctx context.Context) {
|
|||||||
errorMargin := (duration - expectedDuration).Seconds() / expectedDuration.Seconds() * 100
|
errorMargin := (duration - expectedDuration).Seconds() / expectedDuration.Seconds() * 100
|
||||||
if tc.errorMargin > 0 && math.Abs(errorMargin) > tc.errorMargin {
|
if tc.errorMargin > 0 && math.Abs(errorMargin) > tc.errorMargin {
|
||||||
// This might affect the result, report it.
|
// This might affect the result, report it.
|
||||||
tc.tb.Errorf("ERROR: Expected throuput collector to sample at regular time intervals. The %d most recent intervals took %s instead of %s, a difference of %0.1f%%.", skipped+1, duration, expectedDuration, errorMargin)
|
tCtx.Errorf("ERROR: Expected throuput collector to sample at regular time intervals. The %d most recent intervals took %s instead of %s, a difference of %0.1f%%.", skipped+1, duration, expectedDuration, errorMargin)
|
||||||
}
|
}
|
||||||
|
|
||||||
// To keep percentiles accurate, we have to record multiple samples with the same
|
// To keep percentiles accurate, we have to record multiple samples with the same
|
||||||
|
@@ -128,7 +128,7 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
|
|||||||
return sched, informerFactory
|
return sched, informerFactory
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateResourceClaimController(ctx context.Context, tb testing.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() {
|
func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() {
|
||||||
podInformer := informerFactory.Core().V1().Pods()
|
podInformer := informerFactory.Core().V1().Pods()
|
||||||
schedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
schedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||||
@@ -190,7 +190,7 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface, i
|
|||||||
// CreateGCController creates a garbage controller and returns a run function
|
// CreateGCController creates a garbage controller and returns a run function
|
||||||
// for it. The informer factory needs to be started before invoking that
|
// for it. The informer factory needs to be started before invoking that
|
||||||
// function.
|
// function.
|
||||||
func CreateGCController(ctx context.Context, tb testing.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() {
|
func CreateGCController(ctx context.Context, tb ktesting.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() {
|
||||||
restclient.AddUserAgent(&restConfig, "gc-controller")
|
restclient.AddUserAgent(&restConfig, "gc-controller")
|
||||||
clientSet := clientset.NewForConfigOrDie(&restConfig)
|
clientSet := clientset.NewForConfigOrDie(&restConfig)
|
||||||
metadataClient, err := metadata.NewForConfig(&restConfig)
|
metadataClient, err := metadata.NewForConfig(&restConfig)
|
||||||
@@ -227,7 +227,7 @@ func CreateGCController(ctx context.Context, tb testing.TB, restConfig restclien
|
|||||||
// CreateNamespaceController creates a namespace controller and returns a run
|
// CreateNamespaceController creates a namespace controller and returns a run
|
||||||
// function for it. The informer factory needs to be started before invoking
|
// function for it. The informer factory needs to be started before invoking
|
||||||
// that function.
|
// that function.
|
||||||
func CreateNamespaceController(ctx context.Context, tb testing.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() {
|
func CreateNamespaceController(ctx context.Context, tb ktesting.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() {
|
||||||
restclient.AddUserAgent(&restConfig, "namespace-controller")
|
restclient.AddUserAgent(&restConfig, "namespace-controller")
|
||||||
clientSet := clientset.NewForConfigOrDie(&restConfig)
|
clientSet := clientset.NewForConfigOrDie(&restConfig)
|
||||||
metadataClient, err := metadata.NewForConfig(&restConfig)
|
metadataClient, err := metadata.NewForConfig(&restConfig)
|
||||||
|
183
test/utils/ktesting/assert.go
Normal file
183
test/utils/ktesting/assert.go
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/format"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FailureError is an error where the error string is meant to be passed to
|
||||||
|
// [TContext.Fatal] directly, i.e. adding some prefix like "unexpected error" is not
|
||||||
|
// necessary. It is also not necessary to dump the error struct.
|
||||||
|
type FailureError struct {
|
||||||
|
Msg string
|
||||||
|
FullStackTrace string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FailureError) Error() string {
|
||||||
|
return f.Msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FailureError) Backtrace() string {
|
||||||
|
return f.FullStackTrace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FailureError) Is(target error) bool {
|
||||||
|
return target == ErrFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrFailure is an empty error that can be wrapped to indicate that an error
|
||||||
|
// is a FailureError. It can also be used to test for a FailureError:.
|
||||||
|
//
|
||||||
|
// return fmt.Errorf("some problem%w", ErrFailure)
|
||||||
|
// ...
|
||||||
|
// err := someOperation()
|
||||||
|
// if errors.Is(err, ErrFailure) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
var ErrFailure error = FailureError{}
|
||||||
|
|
||||||
|
func expect(tCtx TContext, actual interface{}, extra ...interface{}) gomega.Assertion {
|
||||||
|
tCtx.Helper()
|
||||||
|
return gomega.NewWithT(tCtx).Expect(actual, extra...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func expectNoError(tCtx TContext, err error, explain ...interface{}) {
|
||||||
|
tCtx.Helper()
|
||||||
|
|
||||||
|
description := buildDescription(explain)
|
||||||
|
|
||||||
|
var failure FailureError
|
||||||
|
if errors.As(err, &failure) {
|
||||||
|
if backtrace := failure.Backtrace(); backtrace != "" {
|
||||||
|
if description != "" {
|
||||||
|
tCtx.Log(description)
|
||||||
|
}
|
||||||
|
tCtx.Logf("Failed at:\n %s", strings.ReplaceAll(backtrace, "\n", "\n "))
|
||||||
|
}
|
||||||
|
if description != "" {
|
||||||
|
tCtx.Fatalf("%s: %s", description, err.Error())
|
||||||
|
}
|
||||||
|
tCtx.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if description == "" {
|
||||||
|
description = "Unexpected error"
|
||||||
|
}
|
||||||
|
tCtx.Logf("%s: %s\n%s", description, format.Object(err, 1))
|
||||||
|
tCtx.Fatalf("%s: %v", description, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildDescription(explain ...interface{}) string {
|
||||||
|
switch len(explain) {
|
||||||
|
case 0:
|
||||||
|
return ""
|
||||||
|
case 1:
|
||||||
|
if describe, ok := explain[0].(func() string); ok {
|
||||||
|
return describe()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(explain[0].(string), explain[1:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Eventually wraps [gomega.Eventually] such that a failure will be reported via
|
||||||
|
// TContext.Fatal.
|
||||||
|
//
|
||||||
|
// In contrast to [gomega.Eventually], the parameter is strongly typed. It must
|
||||||
|
// accept a TContext as first argument and return one value, the one which is
|
||||||
|
// then checked with the matcher.
|
||||||
|
//
|
||||||
|
// In contrast to direct usage of [gomega.Eventually], make additional
|
||||||
|
// assertions inside the callback is okay as long as they use the TContext that
|
||||||
|
// is passed in. For example, errors can be checked with ExpectNoError:
|
||||||
|
//
|
||||||
|
// cb := func(func(tCtx ktesting.TContext) int {
|
||||||
|
// value, err := doSomething(...)
|
||||||
|
// ktesting.ExpectNoError(tCtx, err, "something failed")
|
||||||
|
// return value
|
||||||
|
// }
|
||||||
|
// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything")
|
||||||
|
//
|
||||||
|
// If there is no value, then an error can be returned:
|
||||||
|
//
|
||||||
|
// cb := func(func(tCtx ktesting.TContext) error {
|
||||||
|
// err := doSomething(...)
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// tCtx.Eventually(cb).Should(gomega.Succeed(), "foobar should succeed")
|
||||||
|
//
|
||||||
|
// The default Gomega poll interval and timeout are used. Setting a specific
|
||||||
|
// timeout may be useful:
|
||||||
|
//
|
||||||
|
// tCtx.Eventually(cb).Timeout(5 * time.Second).Should(gomega.Succeed(), "foobar should succeed")
|
||||||
|
//
|
||||||
|
// Canceling the context in the callback only affects code in the callback. The
|
||||||
|
// context passed to Eventually is not getting canceled. To abort polling
|
||||||
|
// immediately because the expected condition is known to not be reached
|
||||||
|
// anymore, use [gomega.StopTrying]:
|
||||||
|
//
|
||||||
|
// cb := func(func(tCtx ktesting.TContext) int {
|
||||||
|
// value, err := doSomething(...)
|
||||||
|
// if errors.Is(err, SomeFinalErr) {
|
||||||
|
// gomega.StopTrying("permanent failure).Wrap(err).Now()
|
||||||
|
// }
|
||||||
|
// ktesting.ExpectNoError(tCtx, err, "something failed")
|
||||||
|
// return value
|
||||||
|
// }
|
||||||
|
// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything")
|
||||||
|
//
|
||||||
|
// To poll again after some specific timeout, use [gomega.TryAgainAfter]. This is
|
||||||
|
// particularly useful in [Consistently] to ignore some intermittent error.
|
||||||
|
//
|
||||||
|
// cb := func(func(tCtx ktesting.TContext) int {
|
||||||
|
// value, err := doSomething(...)
|
||||||
|
// var intermittentErr SomeIntermittentError
|
||||||
|
// if errors.As(err, &intermittentErr) {
|
||||||
|
// gomega.TryAgainAfter(intermittentErr.RetryPeriod).Wrap(err).Now()
|
||||||
|
// }
|
||||||
|
// ktesting.ExpectNoError(tCtx, err, "something failed")
|
||||||
|
// return value
|
||||||
|
// }
|
||||||
|
// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything")
|
||||||
|
func Eventually[T any](tCtx TContext, cb func(TContext) T) gomega.AsyncAssertion {
|
||||||
|
tCtx.Helper()
|
||||||
|
return gomega.NewWithT(tCtx).Eventually(tCtx, func(ctx context.Context) (val T, err error) {
|
||||||
|
tCtx := WithContext(tCtx, ctx)
|
||||||
|
tCtx, finalize := WithError(tCtx, &err)
|
||||||
|
defer finalize()
|
||||||
|
tCtx = WithCancel(tCtx)
|
||||||
|
return cb(tCtx), nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consistently wraps [gomega.Consistently] the same way as [Eventually] wraps
|
||||||
|
// [gomega.Eventually].
|
||||||
|
func Consistently[T any](tCtx TContext, cb func(TContext) T) gomega.AsyncAssertion {
|
||||||
|
tCtx.Helper()
|
||||||
|
return gomega.NewWithT(tCtx).Consistently(tCtx, func(ctx context.Context) (val T, err error) {
|
||||||
|
tCtx := WithContext(tCtx, ctx)
|
||||||
|
tCtx, finalize := WithError(tCtx, &err)
|
||||||
|
defer finalize()
|
||||||
|
return cb(tCtx), nil
|
||||||
|
})
|
||||||
|
}
|
194
test/utils/ktesting/assert_test.go
Normal file
194
test/utils/ktesting/assert_test.go
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAsync(t *testing.T) {
|
||||||
|
for name, tc := range map[string]struct {
|
||||||
|
cb func(TContext)
|
||||||
|
expectNoFail bool
|
||||||
|
expectError string
|
||||||
|
expectDuration time.Duration
|
||||||
|
}{
|
||||||
|
"eventually-timeout": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Eventually(tCtx, func(tCtx TContext) int {
|
||||||
|
// Canceling here is a nop.
|
||||||
|
tCtx.Cancel("testing")
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1))
|
||||||
|
},
|
||||||
|
expectDuration: time.Second,
|
||||||
|
expectError: `Timed out after x.y s.
|
||||||
|
Expected
|
||||||
|
<int>: 0
|
||||||
|
to equal
|
||||||
|
<int>: 1`,
|
||||||
|
},
|
||||||
|
"eventually-final": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||||
|
gomega.StopTrying("final error").Now()
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: 0,
|
||||||
|
expectError: `Told to stop trying after x.y s.
|
||||||
|
final error`,
|
||||||
|
},
|
||||||
|
"eventually-error": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||||
|
tCtx.Fatal("some error")
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: time.Second,
|
||||||
|
expectError: `Timed out after x.y s.
|
||||||
|
The function passed to Eventually returned the following error:
|
||||||
|
<*errors.joinError | 0xXXXX>:
|
||||||
|
some error
|
||||||
|
{
|
||||||
|
errs: [
|
||||||
|
<*errors.errorString | 0xXXXX>{s: "some error"},
|
||||||
|
],
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
"eventually-success": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||||
|
return 1.0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: 0,
|
||||||
|
expectNoFail: true,
|
||||||
|
expectError: ``,
|
||||||
|
},
|
||||||
|
"eventually-retry": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||||
|
gomega.TryAgainAfter(time.Millisecond).Now()
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: time.Second,
|
||||||
|
expectError: `Timed out after x.y s.
|
||||||
|
told to try again after 1ms`,
|
||||||
|
},
|
||||||
|
"consistently-timeout": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||||
|
// Canceling here is a nop.
|
||||||
|
tCtx.Cancel("testing")
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: 0,
|
||||||
|
expectError: `Failed after x.y s.
|
||||||
|
Expected
|
||||||
|
<float64>: 0
|
||||||
|
to equal
|
||||||
|
<float64>: 1`,
|
||||||
|
},
|
||||||
|
"consistently-final": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||||
|
gomega.StopTrying("final error").Now()
|
||||||
|
tCtx.FailNow()
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: 0,
|
||||||
|
expectError: `Told to stop trying after x.y s.
|
||||||
|
final error`,
|
||||||
|
},
|
||||||
|
"consistently-error": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||||
|
tCtx.Fatal("some error")
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: 0,
|
||||||
|
expectError: `Failed after x.y s.
|
||||||
|
The function passed to Consistently returned the following error:
|
||||||
|
<*errors.joinError | 0xXXXX>:
|
||||||
|
some error
|
||||||
|
{
|
||||||
|
errs: [
|
||||||
|
<*errors.errorString | 0xXXXX>{s: "some error"},
|
||||||
|
],
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
"consistently-success": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||||
|
return 1.0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: time.Second,
|
||||||
|
expectNoFail: true,
|
||||||
|
expectError: ``,
|
||||||
|
},
|
||||||
|
"consistently-retry": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||||
|
gomega.TryAgainAfter(time.Millisecond).Wrap(errors.New("intermittent error")).Now()
|
||||||
|
return 0
|
||||||
|
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||||
|
},
|
||||||
|
expectDuration: time.Second,
|
||||||
|
expectError: `Timed out while waiting on TryAgainAfter after x.y s.
|
||||||
|
told to try again after 1ms: intermittent error`,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
tc := tc
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
tCtx := Init(t)
|
||||||
|
var err error
|
||||||
|
tCtx, finalize := WithError(tCtx, &err)
|
||||||
|
start := time.Now()
|
||||||
|
func() {
|
||||||
|
defer finalize()
|
||||||
|
tc.cb(tCtx)
|
||||||
|
}()
|
||||||
|
duration := time.Since(start)
|
||||||
|
assert.InDelta(t, tc.expectDuration.Seconds(), duration.Seconds(), 0.1, fmt.Sprintf("callback invocation duration %s", duration))
|
||||||
|
assert.Equal(t, !tc.expectNoFail, tCtx.Failed(), "Failed()")
|
||||||
|
if tc.expectError == "" {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
} else if assert.NotNil(t, err) {
|
||||||
|
t.Logf("Result:\n%s", err.Error())
|
||||||
|
errMsg := err.Error()
|
||||||
|
errMsg = regexp.MustCompile(`[[:digit:]]+\.[[:digit:]]+s`).ReplaceAllString(errMsg, "x.y s")
|
||||||
|
errMsg = regexp.MustCompile(`0x[[:xdigit:]]+`).ReplaceAllString(errMsg, "0xXXXX")
|
||||||
|
assert.Equal(t, tc.expectError, errMsg)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
114
test/utils/ktesting/clientcontext.go
Normal file
114
test/utils/ktesting/clientcontext.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
|
"k8s.io/client-go/discovery/cached/memory"
|
||||||
|
"k8s.io/client-go/dynamic"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/restmapper"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithRESTConfig initializes all client-go clients with new clients
|
||||||
|
// created for the config. The current test name gets included in the UserAgent.
|
||||||
|
func WithRESTConfig(tCtx TContext, cfg *rest.Config) TContext {
|
||||||
|
cfg = rest.CopyConfig(cfg)
|
||||||
|
cfg.UserAgent = fmt.Sprintf("%s -- %s", rest.DefaultKubernetesUserAgent(), tCtx.Name())
|
||||||
|
|
||||||
|
cCtx := clientContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
restConfig: cfg,
|
||||||
|
client: clientset.NewForConfigOrDie(cfg),
|
||||||
|
dynamic: dynamic.NewForConfigOrDie(cfg),
|
||||||
|
apiextensions: apiextensions.NewForConfigOrDie(cfg),
|
||||||
|
}
|
||||||
|
|
||||||
|
cachedDiscovery := memory.NewMemCacheClient(cCtx.client.Discovery())
|
||||||
|
cCtx.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery)
|
||||||
|
|
||||||
|
return &cCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClients uses an existing config and clients.
|
||||||
|
func WithClients(tCtx TContext, cfg *rest.Config, mapper *restmapper.DeferredDiscoveryRESTMapper, client clientset.Interface, dynamic dynamic.Interface, apiextensions apiextensions.Interface) TContext {
|
||||||
|
return clientContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
restConfig: cfg,
|
||||||
|
restMapper: mapper,
|
||||||
|
client: client,
|
||||||
|
dynamic: dynamic,
|
||||||
|
apiextensions: apiextensions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientContext struct {
|
||||||
|
TContext
|
||||||
|
|
||||||
|
restConfig *rest.Config
|
||||||
|
restMapper *restmapper.DeferredDiscoveryRESTMapper
|
||||||
|
client clientset.Interface
|
||||||
|
dynamic dynamic.Interface
|
||||||
|
apiextensions apiextensions.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) CleanupCtx(cb func(TContext)) {
|
||||||
|
cCtx.Helper()
|
||||||
|
cleanupCtx(cCtx, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
|
||||||
|
cCtx.Helper()
|
||||||
|
return expect(cCtx, actual, extra...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) ExpectNoError(err error, explain ...interface{}) {
|
||||||
|
cCtx.Helper()
|
||||||
|
expectNoError(cCtx, err, explain...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) Logger() klog.Logger {
|
||||||
|
return klog.FromContext(cCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) RESTConfig() *rest.Config {
|
||||||
|
if cCtx.restConfig == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rest.CopyConfig(cCtx.restConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) RESTMapper() *restmapper.DeferredDiscoveryRESTMapper {
|
||||||
|
return cCtx.restMapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) Client() clientset.Interface {
|
||||||
|
return cCtx.client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) Dynamic() dynamic.Interface {
|
||||||
|
return cCtx.dynamic
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cCtx clientContext) APIExtensions() apiextensions.Interface {
|
||||||
|
return cCtx.apiextensions
|
||||||
|
}
|
91
test/utils/ktesting/contexthelper.go
Normal file
91
test/utils/ktesting/contexthelper.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cleanupErr creates a cause when canceling a context because the test has completed.
|
||||||
|
// It is a context.Canceled error.
|
||||||
|
func cleanupErr(testName string) error {
|
||||||
|
return canceledError(fmt.Sprintf("test %s is cleaning up", testName))
|
||||||
|
}
|
||||||
|
|
||||||
|
type canceledError string
|
||||||
|
|
||||||
|
func (c canceledError) Error() string { return string(c) }
|
||||||
|
|
||||||
|
func (c canceledError) Is(target error) bool {
|
||||||
|
return target == context.Canceled
|
||||||
|
}
|
||||||
|
|
||||||
|
// withTimeout corresponds to [context.WithTimeout]. In contrast to
|
||||||
|
// [context.WithTimeout], it automatically cancels during test cleanup, provides
|
||||||
|
// the given cause when the deadline is reached, and its cancel function
|
||||||
|
// requires a cause.
|
||||||
|
func withTimeout(ctx context.Context, tb TB, timeout time.Duration, timeoutCause string) (context.Context, func(cause string)) {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
cancelCtx, cancel := context.WithCancelCause(ctx)
|
||||||
|
after := time.NewTimer(timeout)
|
||||||
|
stopCtx, stop := context.WithCancel(ctx) // Only used internally, doesn't need a cause.
|
||||||
|
tb.Cleanup(func() {
|
||||||
|
cancel(cleanupErr(tb.Name()))
|
||||||
|
stop()
|
||||||
|
})
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case <-stopCtx.Done():
|
||||||
|
after.Stop()
|
||||||
|
// No need to set a cause here. The cause or error of
|
||||||
|
// the parent context will be used.
|
||||||
|
case <-after.C:
|
||||||
|
cancel(canceledError(timeoutCause))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Determine which deadline is sooner: ours or that of our parent.
|
||||||
|
deadline := now.Add(timeout)
|
||||||
|
if parentDeadline, ok := ctx.Deadline(); ok {
|
||||||
|
if deadline.After(parentDeadline) {
|
||||||
|
deadline = parentDeadline
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We always have a deadline.
|
||||||
|
return deadlineContext{Context: cancelCtx, deadline: deadline}, func(cause string) {
|
||||||
|
var cancelCause error
|
||||||
|
if cause != "" {
|
||||||
|
cancelCause = canceledError(cause)
|
||||||
|
}
|
||||||
|
cancel(cancelCause)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type deadlineContext struct {
|
||||||
|
context.Context
|
||||||
|
deadline time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d deadlineContext) Deadline() (time.Time, bool) {
|
||||||
|
return d.deadline, true
|
||||||
|
}
|
126
test/utils/ktesting/contexthelper_test.go
Normal file
126
test/utils/ktesting/contexthelper_test.go
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCleanupErr(t *testing.T) {
|
||||||
|
actual := cleanupErr(t.Name())
|
||||||
|
if !errors.Is(actual, context.Canceled) {
|
||||||
|
t.Errorf("cleanupErr %T should be a %T", actual, context.Canceled)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCause(t *testing.T) {
|
||||||
|
timeoutCause := canceledError("I timed out")
|
||||||
|
parentCause := errors.New("parent canceled")
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
for name, tt := range map[string]struct {
|
||||||
|
parentCtx context.Context
|
||||||
|
timeout time.Duration
|
||||||
|
sleep time.Duration
|
||||||
|
cancelCause string
|
||||||
|
expectErr, expectCause error
|
||||||
|
expectDeadline time.Duration
|
||||||
|
}{
|
||||||
|
"nothing": {
|
||||||
|
parentCtx: context.Background(),
|
||||||
|
timeout: 5 * time.Millisecond,
|
||||||
|
sleep: time.Millisecond,
|
||||||
|
},
|
||||||
|
"timeout": {
|
||||||
|
parentCtx: context.Background(),
|
||||||
|
timeout: time.Millisecond,
|
||||||
|
sleep: 5 * time.Millisecond,
|
||||||
|
expectErr: context.Canceled,
|
||||||
|
expectCause: canceledError(timeoutCause),
|
||||||
|
},
|
||||||
|
"parent-canceled": {
|
||||||
|
parentCtx: func() context.Context {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cancel()
|
||||||
|
return ctx
|
||||||
|
}(),
|
||||||
|
timeout: time.Millisecond,
|
||||||
|
sleep: 5 * time.Millisecond,
|
||||||
|
expectErr: context.Canceled,
|
||||||
|
expectCause: context.Canceled,
|
||||||
|
},
|
||||||
|
"parent-cause": {
|
||||||
|
parentCtx: func() context.Context {
|
||||||
|
ctx, cancel := context.WithCancelCause(context.Background())
|
||||||
|
cancel(parentCause)
|
||||||
|
return ctx
|
||||||
|
}(),
|
||||||
|
timeout: time.Millisecond,
|
||||||
|
sleep: 5 * time.Millisecond,
|
||||||
|
expectErr: context.Canceled,
|
||||||
|
expectCause: parentCause,
|
||||||
|
},
|
||||||
|
"deadline-no-parent": {
|
||||||
|
parentCtx: context.Background(),
|
||||||
|
timeout: time.Minute,
|
||||||
|
expectDeadline: time.Minute,
|
||||||
|
},
|
||||||
|
"deadline-parent": {
|
||||||
|
parentCtx: func() context.Context {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
return ctx
|
||||||
|
}(),
|
||||||
|
timeout: 2 * time.Minute,
|
||||||
|
expectDeadline: time.Minute,
|
||||||
|
},
|
||||||
|
"deadline-child": {
|
||||||
|
parentCtx: func() context.Context {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
return ctx
|
||||||
|
}(),
|
||||||
|
timeout: time.Minute,
|
||||||
|
expectDeadline: time.Minute,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
tt := tt
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
ctx, cancel := withTimeout(tt.parentCtx, t, tt.timeout, timeoutCause.Error())
|
||||||
|
if tt.cancelCause != "" {
|
||||||
|
cancel(tt.cancelCause)
|
||||||
|
}
|
||||||
|
if tt.expectDeadline != 0 {
|
||||||
|
actualDeadline, ok := ctx.Deadline()
|
||||||
|
if assert.True(t, ok, "should have had a deadline") {
|
||||||
|
assert.InDelta(t, time.Until(actualDeadline), tt.expectDeadline, float64(time.Second), "remaining time till Deadline()")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(tt.sleep)
|
||||||
|
actualErr := ctx.Err()
|
||||||
|
actualCause := context.Cause(ctx)
|
||||||
|
assert.Equal(t, tt.expectErr, actualErr, "ctx.Err()")
|
||||||
|
assert.Equal(t, tt.expectCause, actualCause, "context.Cause()")
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
32
test/utils/ktesting/doc.go
Normal file
32
test/utils/ktesting/doc.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package ktesting is a wrapper around k8s.io/klog/v2/ktesting. In contrast
|
||||||
|
// to the klog package, this one is opinionated and tailored towards testing
|
||||||
|
// Kubernetes.
|
||||||
|
//
|
||||||
|
// Importing it
|
||||||
|
// - adds the -v command line flag
|
||||||
|
// - enables better dumping of complex datatypes
|
||||||
|
// - sets the default verbosity to 5 (can be changed with [SetDefaultVerbosity])
|
||||||
|
//
|
||||||
|
// It also adds additional APIs and types for unit and integration tests
|
||||||
|
// which are too experimental for klog and/or are unrelated to logging.
|
||||||
|
// The ktesting package itself takes care of managing a test context
|
||||||
|
// with deadlines, timeouts, cancellation, and some common attributes
|
||||||
|
// as first-class members of the API. Sub-packages have additional APIs
|
||||||
|
// for propagating values via the context, implemented via [WithValue].
|
||||||
|
package ktesting
|
153
test/utils/ktesting/errorcontext.go
Normal file
153
test/utils/ktesting/errorcontext.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithError creates a context where test failures are collected and stored in
|
||||||
|
// the provided error instance when the caller is done. Use it like this:
|
||||||
|
//
|
||||||
|
// func doSomething(tCtx ktesting.TContext) (finalErr error) {
|
||||||
|
// tCtx, finalize := WithError(tCtx, &finalErr)
|
||||||
|
// defer finalize()
|
||||||
|
// ...
|
||||||
|
// tCtx.Fatal("some failure")
|
||||||
|
//
|
||||||
|
// Any error already stored in the variable will get overwritten by finalize if
|
||||||
|
// there were test failures, otherwise the variable is left unchanged.
|
||||||
|
// If there were multiple test errors, then the error will wrap all of
|
||||||
|
// them with errors.Join.
|
||||||
|
//
|
||||||
|
// Test failures are not propagated to the parent context.
|
||||||
|
func WithError(tCtx TContext, err *error) (TContext, func()) {
|
||||||
|
eCtx := &errorContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
}
|
||||||
|
|
||||||
|
return eCtx, func() {
|
||||||
|
// Recover has to be called in the deferred function. When called inside
|
||||||
|
// a function called by a deferred function (like finalize below), it
|
||||||
|
// returns nil.
|
||||||
|
if e := recover(); e != nil {
|
||||||
|
if _, ok := e.(fatalWithError); !ok {
|
||||||
|
// Not our own panic, pass it on instead of setting the error.
|
||||||
|
panic(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eCtx.finalize(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorContext struct {
|
||||||
|
TContext
|
||||||
|
|
||||||
|
mutex sync.Mutex
|
||||||
|
errors []error
|
||||||
|
failed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) finalize(err *error) {
|
||||||
|
eCtx.mutex.Lock()
|
||||||
|
defer eCtx.mutex.Unlock()
|
||||||
|
|
||||||
|
if !eCtx.failed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
errs := eCtx.errors
|
||||||
|
if len(errs) == 0 {
|
||||||
|
errs = []error{errFailedWithNoExplanation}
|
||||||
|
}
|
||||||
|
*err = errors.Join(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Error(args ...any) {
|
||||||
|
eCtx.mutex.Lock()
|
||||||
|
defer eCtx.mutex.Unlock()
|
||||||
|
|
||||||
|
// Gomega adds a leading newline in https://github.com/onsi/gomega/blob/f804ac6ada8d36164ecae0513295de8affce1245/internal/gomega.go#L37
|
||||||
|
// Let's strip that at start and end because ktesting will make errors
|
||||||
|
// stand out more with the "ERROR" prefix, so there's no need for additional
|
||||||
|
// line breaks.
|
||||||
|
eCtx.errors = append(eCtx.errors, errors.New(strings.TrimSpace(fmt.Sprintln(args...))))
|
||||||
|
eCtx.failed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Errorf(format string, args ...any) {
|
||||||
|
eCtx.mutex.Lock()
|
||||||
|
defer eCtx.mutex.Unlock()
|
||||||
|
|
||||||
|
eCtx.errors = append(eCtx.errors, errors.New(strings.TrimSpace(fmt.Sprintf(format, args...))))
|
||||||
|
eCtx.failed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Fail() {
|
||||||
|
eCtx.mutex.Lock()
|
||||||
|
defer eCtx.mutex.Unlock()
|
||||||
|
|
||||||
|
eCtx.failed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) FailNow() {
|
||||||
|
eCtx.Helper()
|
||||||
|
eCtx.Fail()
|
||||||
|
panic(failed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Failed() bool {
|
||||||
|
eCtx.mutex.Lock()
|
||||||
|
defer eCtx.mutex.Unlock()
|
||||||
|
|
||||||
|
return eCtx.failed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Fatal(args ...any) {
|
||||||
|
eCtx.Error(args...)
|
||||||
|
eCtx.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Fatalf(format string, args ...any) {
|
||||||
|
eCtx.Errorf(format, args...)
|
||||||
|
eCtx.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) CleanupCtx(cb func(TContext)) {
|
||||||
|
eCtx.Helper()
|
||||||
|
cleanupCtx(eCtx, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (eCtx *errorContext) Logger() klog.Logger {
|
||||||
|
return klog.FromContext(eCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fatalWithError is the internal type that should never get propagated up. The
|
||||||
|
// only case where that can happen is when the developer forgot to call
|
||||||
|
// finalize via defer. The string explains that, in case that developers get to
|
||||||
|
// see it.
|
||||||
|
type fatalWithError string
|
||||||
|
|
||||||
|
const failed = fatalWithError("WithError TContext encountered a fatal error, but the finalize function was not called via defer as it should have been.")
|
||||||
|
|
||||||
|
var errFailedWithNoExplanation = errors.New("WithError context was marked as failed without recording an error")
|
116
test/utils/ktesting/errorcontext_test.go
Normal file
116
test/utils/ktesting/errorcontext_test.go
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithError(t *testing.T) {
|
||||||
|
t.Run("panic", func(t *testing.T) {
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
tCtx := Init(t)
|
||||||
|
var err error
|
||||||
|
_, finalize := WithError(tCtx, &err)
|
||||||
|
defer finalize()
|
||||||
|
|
||||||
|
panic("pass me through")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
normalErr := errors.New("normal error")
|
||||||
|
|
||||||
|
for name, tc := range map[string]struct {
|
||||||
|
cb func(TContext)
|
||||||
|
expectNoFail bool
|
||||||
|
expectError string
|
||||||
|
}{
|
||||||
|
"none": {
|
||||||
|
cb: func(tCtx TContext) {},
|
||||||
|
expectNoFail: true,
|
||||||
|
expectError: normalErr.Error(),
|
||||||
|
},
|
||||||
|
"Error": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.Error("some error")
|
||||||
|
},
|
||||||
|
expectError: "some error",
|
||||||
|
},
|
||||||
|
"Errorf": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.Errorf("some %s", "error")
|
||||||
|
},
|
||||||
|
expectError: "some error",
|
||||||
|
},
|
||||||
|
"Fatal": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.Fatal("some error")
|
||||||
|
tCtx.Error("another error")
|
||||||
|
},
|
||||||
|
expectError: "some error",
|
||||||
|
},
|
||||||
|
"Fatalf": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.Fatalf("some %s", "error")
|
||||||
|
tCtx.Error("another error")
|
||||||
|
},
|
||||||
|
expectError: "some error",
|
||||||
|
},
|
||||||
|
"Fail": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.Fatalf("some %s", "error")
|
||||||
|
tCtx.Error("another error")
|
||||||
|
},
|
||||||
|
expectError: "some error",
|
||||||
|
},
|
||||||
|
"FailNow": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.FailNow()
|
||||||
|
tCtx.Error("another error")
|
||||||
|
},
|
||||||
|
expectError: errFailedWithNoExplanation.Error(),
|
||||||
|
},
|
||||||
|
"many": {
|
||||||
|
cb: func(tCtx TContext) {
|
||||||
|
tCtx.Error("first error")
|
||||||
|
tCtx.Error("second error")
|
||||||
|
},
|
||||||
|
expectError: `first error
|
||||||
|
second error`,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
tCtx := Init(t)
|
||||||
|
err := normalErr
|
||||||
|
tCtx, finalize := WithError(tCtx, &err)
|
||||||
|
func() {
|
||||||
|
defer finalize()
|
||||||
|
tc.cb(tCtx)
|
||||||
|
}()
|
||||||
|
|
||||||
|
assert.Equal(t, !tc.expectNoFail, tCtx.Failed(), "Failed()")
|
||||||
|
if tc.expectError == "" {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
} else if assert.NotNil(t, err) {
|
||||||
|
assert.Equal(t, tc.expectError, err.Error())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
20
test/utils/ktesting/examples/gomega/doc.go
Normal file
20
test/utils/ktesting/examples/gomega/doc.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// The tests will fail and therefore are excluded from normal "make test" via
|
||||||
|
// the "example" build tag. To run the tests and check the output, use "go test
|
||||||
|
// -tags example ."
|
||||||
|
package gomega
|
44
test/utils/ktesting/examples/gomega/example_test.go
Normal file
44
test/utils/ktesting/examples/gomega/example_test.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
//go:build example
|
||||||
|
// +build example
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package gomega
|
||||||
|
|
||||||
|
// The tests below will fail and therefore are excluded from
|
||||||
|
// normal "make test" via the "example" build tag. To run
|
||||||
|
// the tests and check the output, use "go test -tags example ."
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGomega(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
|
||||||
|
gomega.NewWithT(tCtx).Eventually(tCtx, func(ctx context.Context) int {
|
||||||
|
// TODO: tCtx = ktesting.WithContext(tCtx, ctx)
|
||||||
|
// Or some dedicated tCtx.Eventually?
|
||||||
|
|
||||||
|
return 42
|
||||||
|
}).WithPolling(time.Second).Should(gomega.Equal(1))
|
||||||
|
}
|
20
test/utils/ktesting/examples/logging/doc.go
Normal file
20
test/utils/ktesting/examples/logging/doc.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// The tests will fail and therefore are excluded from normal "make test" via
|
||||||
|
// the "example" build tag. To run the tests and check the output, use "go test
|
||||||
|
// -tags example ."
|
||||||
|
package logging
|
74
test/utils/ktesting/examples/logging/example_test.go
Normal file
74
test/utils/ktesting/examples/logging/example_test.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
//go:build example
|
||||||
|
// +build example
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logging
|
||||||
|
|
||||||
|
// The tests below will fail and therefore are excluded from
|
||||||
|
// normal "make test" via the "example" build tag. To run
|
||||||
|
// the tests and check the output, use "go test -tags example ."
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestError(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
tCtx.Error("some", "thing")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorf(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
tCtx.Errorf("some %s", "thing")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFatal(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
tCtx.Fatal("some", "thing")
|
||||||
|
tCtx.Log("not reached")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFatalf(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
tCtx.Fatalf("some %s", "thing")
|
||||||
|
tCtx.Log("not reached")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInfo(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
tCtx.Log("hello via Log")
|
||||||
|
tCtx.Logger().Info("hello via Info")
|
||||||
|
tCtx.Error("some", "thing")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithStep(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
bake(ktesting.WithStep(tCtx, "bake cake"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func bake(tCtx ktesting.TContext) {
|
||||||
|
heatOven(ktesting.WithStep(tCtx, "set heat for baking"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func heatOven(tCtx ktesting.TContext) {
|
||||||
|
tCtx.Log("Log()")
|
||||||
|
tCtx.Logger().Info("Logger().Info()")
|
||||||
|
tCtx.Fatal("oven not found")
|
||||||
|
}
|
20
test/utils/ktesting/examples/with_ktesting/doc.go
Normal file
20
test/utils/ktesting/examples/with_ktesting/doc.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// The tests will fail and therefore are excluded from normal "make test" via
|
||||||
|
// the "example" build tag. To run the tests and check the output, use "go test
|
||||||
|
// -tags example ."
|
||||||
|
package withktesting
|
52
test/utils/ktesting/examples/with_ktesting/example_test.go
Normal file
52
test/utils/ktesting/examples/with_ktesting/example_test.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
//go:build example
|
||||||
|
// +build example
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package withktesting
|
||||||
|
|
||||||
|
// The tests below will fail and therefore are excluded from
|
||||||
|
// normal "make test" via the "example" build tag. To run
|
||||||
|
// the tests and check the output, use "go test -tags example ."
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimeout(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
tmp := t.TempDir()
|
||||||
|
tCtx.Logf("Using %q as temporary directory.", tmp)
|
||||||
|
tCtx.Cleanup(func() {
|
||||||
|
t.Log("Cleaning up...")
|
||||||
|
})
|
||||||
|
if deadline, ok := t.Deadline(); ok {
|
||||||
|
t.Logf("Will fail shortly before the test suite deadline at %s.", deadline)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-time.After(1000 * time.Hour):
|
||||||
|
// This should not be reached.
|
||||||
|
tCtx.Log("Huh?! I shouldn't be that old.")
|
||||||
|
case <-tCtx.Done():
|
||||||
|
// But this will before the test suite timeout.
|
||||||
|
tCtx.Errorf("need to stop: %v", context.Cause(tCtx))
|
||||||
|
}
|
||||||
|
}
|
20
test/utils/ktesting/examples/without_ktesting/doc.go
Normal file
20
test/utils/ktesting/examples/without_ktesting/doc.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// The tests will fail and therefore are excluded from normal "make test" via
|
||||||
|
// the "example" build tag. To run the tests and check the output, use "go test
|
||||||
|
// -tags example ."
|
||||||
|
package withoutktesting
|
@@ -0,0 +1,40 @@
|
|||||||
|
//go:build example
|
||||||
|
// +build example
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package withoutktesting
|
||||||
|
|
||||||
|
// The tests below will fail and therefore are excluded from
|
||||||
|
// normal "make test" via the "example" build tag. To run
|
||||||
|
// the tests and check the output, use "go test -tags example ."
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimeout(t *testing.T) {
|
||||||
|
tmp := t.TempDir()
|
||||||
|
t.Logf("Using %q as temporary directory.", tmp)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
t.Log("Cleaning up...")
|
||||||
|
})
|
||||||
|
// This will not complete anytime soon...
|
||||||
|
t.Log("Please kill me.")
|
||||||
|
<-time.After(1000 * time.Hour)
|
||||||
|
}
|
30
test/utils/ktesting/initoption/initoption.go
Normal file
30
test/utils/ktesting/initoption/initoption.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package initoption
|
||||||
|
|
||||||
|
import "k8s.io/kubernetes/test/utils/ktesting/internal"
|
||||||
|
|
||||||
|
// InitOption is a functional option for Init and InitCtx.
|
||||||
|
type InitOption func(c *internal.InitConfig)
|
||||||
|
|
||||||
|
// PerTestOutput controls whether a per-test logger gets
|
||||||
|
// set up by Init. Has no effect in InitCtx.
|
||||||
|
func PerTestOutput(enabled bool) InitOption {
|
||||||
|
return func(c *internal.InitConfig) {
|
||||||
|
c.PerTestOutput = enabled
|
||||||
|
}
|
||||||
|
}
|
21
test/utils/ktesting/internal/config.go
Normal file
21
test/utils/ktesting/internal/config.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
type InitConfig struct {
|
||||||
|
PerTestOutput bool
|
||||||
|
}
|
76
test/utils/ktesting/klogcontext.go
Normal file
76
test/utils/ktesting/klogcontext.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var timeNow = time.Now // Can be stubbed out for testing.
|
||||||
|
|
||||||
|
// withKlogHeader creates a TB where the same "I<date> <time>]" prefix
|
||||||
|
// gets added to all log output, just as in the klog test logger.
|
||||||
|
// This is used internally when constructing a TContext for unit testing.
|
||||||
|
func withKlogHeader(tb TB) TB {
|
||||||
|
return klogTB{
|
||||||
|
TB: tb,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type klogTB struct {
|
||||||
|
TB
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k klogTB) Log(args ...any) {
|
||||||
|
k.Helper()
|
||||||
|
k.TB.Log(header() + strings.TrimSpace(fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k klogTB) Logf(format string, args ...any) {
|
||||||
|
k.Helper()
|
||||||
|
k.TB.Log(header() + strings.TrimSpace(fmt.Sprintf(format, args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k klogTB) Error(args ...any) {
|
||||||
|
k.Helper()
|
||||||
|
k.TB.Error(header() + strings.TrimSpace(fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k klogTB) Errorf(format string, args ...any) {
|
||||||
|
k.Helper()
|
||||||
|
k.TB.Error(header() + strings.TrimSpace(fmt.Sprintf(format, args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k klogTB) Fatal(args ...any) {
|
||||||
|
k.Helper()
|
||||||
|
k.TB.Fatal(header() + strings.TrimSpace(fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k klogTB) Fatalf(format string, args ...any) {
|
||||||
|
k.Helper()
|
||||||
|
k.TB.Fatal(header() + strings.TrimSpace(fmt.Sprintf(format, args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func header() string {
|
||||||
|
now := timeNow()
|
||||||
|
_, month, day := now.Date()
|
||||||
|
hour, minute, second := now.Clock()
|
||||||
|
return fmt.Sprintf("I%02d%02d %02d:%02d:%02d.%06d] ",
|
||||||
|
month, day, hour, minute, second, now.Nanosecond()/1000)
|
||||||
|
}
|
@@ -14,21 +14,18 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package ktesting is a wrapper around k8s.io/klog/v2/ktesting. It provides
|
|
||||||
// those (and only those) functions that test code in Kubernetes should use,
|
|
||||||
// plus better dumping of complex datatypes. It adds the klog command line
|
|
||||||
// flags and increases the default verbosity to 5.
|
|
||||||
package ktesting
|
package ktesting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
_ "k8s.io/component-base/logs/testinit"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/klog/v2/ktesting"
|
|
||||||
"k8s.io/kubernetes/test/utils/format"
|
// Initialize command line parameters.
|
||||||
|
_ "k8s.io/component-base/logs/testinit"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -44,30 +41,14 @@ func SetDefaultVerbosity(v int) {
|
|||||||
_ = f.Value.Set(fmt.Sprintf("%d", v))
|
_ = f.Value.Set(fmt.Sprintf("%d", v))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTestContext is a wrapper around ktesting.NewTestContext with settings
|
// NewTestContext is a drop-in replacement for ktesting.NewTestContext
|
||||||
// specific to Kubernetes.
|
// which returns a more versatile context.
|
||||||
func NewTestContext(tl ktesting.TL) (klog.Logger, context.Context) {
|
//
|
||||||
config := ktesting.NewConfig(
|
// The type of that context is still context.Context because replacing
|
||||||
ktesting.AnyToString(func(v interface{}) string {
|
// it with TContext breaks tests which use `WithCancel`.
|
||||||
return format.Object(v, 1)
|
//
|
||||||
}),
|
// TODO(pohly): change all of that code together with changing the return type.
|
||||||
ktesting.VerbosityFlagName("v"),
|
func NewTestContext(tb testing.TB) (klog.Logger, context.Context) {
|
||||||
ktesting.VModuleFlagName("vmodule"),
|
tCtx := Init(tb)
|
||||||
)
|
return tCtx.Logger(), tCtx
|
||||||
|
|
||||||
// Copy klog settings instead of making the ktesting logger
|
|
||||||
// configurable directly.
|
|
||||||
var fs flag.FlagSet
|
|
||||||
config.AddFlags(&fs)
|
|
||||||
for _, name := range []string{"v", "vmodule"} {
|
|
||||||
from := flag.CommandLine.Lookup(name)
|
|
||||||
to := fs.Lookup(name)
|
|
||||||
if err := to.Value.Set(from.Value.String()); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger := ktesting.NewLogger(tl, config)
|
|
||||||
ctx := klog.NewContext(context.Background(), logger)
|
|
||||||
return logger, ctx
|
|
||||||
}
|
}
|
||||||
|
139
test/utils/ktesting/signals.go
Normal file
139
test/utils/ktesting/signals.go
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
interruptCtx context.Context
|
||||||
|
|
||||||
|
defaultProgressReporter = new(progressReporter)
|
||||||
|
)
|
||||||
|
|
||||||
|
const ginkgoSpecContextKey = "GINKGO_SPEC_CONTEXT"
|
||||||
|
|
||||||
|
type ginkgoReporter interface {
|
||||||
|
AttachProgressReporter(reporter func() string) func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Setting up signals is intentionally done in an init function because
|
||||||
|
// then importing ktesting in a unit or integration test is sufficient
|
||||||
|
// to activate the signal behavior.
|
||||||
|
signalCtx, _ := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||||
|
cancelCtx, cancel := context.WithCancelCause(context.Background())
|
||||||
|
go func() {
|
||||||
|
<-signalCtx.Done()
|
||||||
|
cancel(errors.New("received interrupt signal"))
|
||||||
|
}()
|
||||||
|
|
||||||
|
// This reimplements the contract between Ginkgo and Gomega for progress reporting.
|
||||||
|
// When using Ginkgo contexts, Ginkgo will implement it. This here is for "go test".
|
||||||
|
//
|
||||||
|
// nolint:staticcheck // It complains about using a plain string. This can only be fixed
|
||||||
|
// by Ginkgo and Gomega formalizing this interface and define a type (somewhere...
|
||||||
|
// probably cannot be in either Ginkgo or Gomega).
|
||||||
|
interruptCtx = context.WithValue(cancelCtx, ginkgoSpecContextKey, defaultProgressReporter)
|
||||||
|
|
||||||
|
signalChannel := make(chan os.Signal, 1)
|
||||||
|
// progressSignals will be empty on Windows.
|
||||||
|
if len(progressSignals) > 0 {
|
||||||
|
signal.Notify(signalChannel, progressSignals...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// os.Stderr gets redirected by "go test". "go test -v" has to be
|
||||||
|
// used to see the output while a test runs.
|
||||||
|
go defaultProgressReporter.run(interruptCtx, os.Stderr, signalChannel)
|
||||||
|
}
|
||||||
|
|
||||||
|
type progressReporter struct {
|
||||||
|
mutex sync.Mutex
|
||||||
|
reporterCounter int64
|
||||||
|
reporters map[int64]func() string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ginkgoReporter = &progressReporter{}
|
||||||
|
|
||||||
|
// AttachProgressReporter implements Gomega's contextWithAttachProgressReporter.
|
||||||
|
func (p *progressReporter) AttachProgressReporter(reporter func() string) func() {
|
||||||
|
p.mutex.Lock()
|
||||||
|
defer p.mutex.Unlock()
|
||||||
|
|
||||||
|
// TODO (?): identify the caller and record that for dumpProgress.
|
||||||
|
p.reporterCounter++
|
||||||
|
id := p.reporterCounter
|
||||||
|
if p.reporters == nil {
|
||||||
|
p.reporters = make(map[int64]func() string)
|
||||||
|
}
|
||||||
|
p.reporters[id] = reporter
|
||||||
|
return func() {
|
||||||
|
p.detachProgressReporter(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *progressReporter) detachProgressReporter(id int64) {
|
||||||
|
p.mutex.Lock()
|
||||||
|
defer p.mutex.Unlock()
|
||||||
|
|
||||||
|
delete(p.reporters, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *progressReporter) run(ctx context.Context, out io.Writer, progressSignalChannel chan os.Signal) {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-progressSignalChannel:
|
||||||
|
p.dumpProgress(out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpProgress is less useful than the Ginkgo progress report. We can't fix
|
||||||
|
// that we don't know which tests are currently running and instead have to
|
||||||
|
// rely on "go test -v" for that.
|
||||||
|
//
|
||||||
|
// But perhaps dumping goroutines and their callstacks is useful anyway? TODO:
|
||||||
|
// look at how Ginkgo does it and replicate some of it.
|
||||||
|
func (p *progressReporter) dumpProgress(out io.Writer) {
|
||||||
|
p.mutex.Lock()
|
||||||
|
defer p.mutex.Unlock()
|
||||||
|
|
||||||
|
var buffer strings.Builder
|
||||||
|
buffer.WriteString("You requested a progress report.\n")
|
||||||
|
if len(p.reporters) == 0 {
|
||||||
|
buffer.WriteString("Currently there is no information about test progress available.\n")
|
||||||
|
}
|
||||||
|
for _, reporter := range p.reporters {
|
||||||
|
report := reporter()
|
||||||
|
buffer.WriteRune('\n')
|
||||||
|
buffer.WriteString(report)
|
||||||
|
if !strings.HasSuffix(report, "\n") {
|
||||||
|
buffer.WriteRune('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = out.Write([]byte(buffer.String()))
|
||||||
|
}
|
31
test/utils/ktesting/signals_non_win.go
Normal file
31
test/utils/ktesting/signals_non_win.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
//go:build !windows
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// If there ever is a non-Windows build platform that doesn't
|
||||||
|
// have SIGUSR1, then we need to add another exception for it.
|
||||||
|
progressSignals = []os.Signal{syscall.SIGUSR1}
|
||||||
|
)
|
28
test/utils/ktesting/signals_win.go
Normal file
28
test/utils/ktesting/signals_win.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
//go:build windows
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
progressSignals = []os.Signal{}
|
||||||
|
)
|
101
test/utils/ktesting/stepcontext.go
Normal file
101
test/utils/ktesting/stepcontext.go
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2024 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithStep creates a context where a prefix is added to all errors and log
|
||||||
|
// messages, similar to how errors are wrapped. This can be nested, leaving a
|
||||||
|
// trail of "bread crumbs" that help figure out where in a test some problem
|
||||||
|
// occurred or why some log output gets written:
|
||||||
|
//
|
||||||
|
// ERROR: bake cake: set heat for baking: oven not found
|
||||||
|
//
|
||||||
|
// The string should describe the operation that is about to happen ("starting
|
||||||
|
// the controller", "list items") or what is being operated on ("HTTP server").
|
||||||
|
// Multiple different prefixes get concatenated with a colon.
|
||||||
|
func WithStep(tCtx TContext, what string) TContext {
|
||||||
|
sCtx := &stepContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
what: what,
|
||||||
|
}
|
||||||
|
return WithLogger(sCtx, klog.LoggerWithName(sCtx.Logger(), what))
|
||||||
|
}
|
||||||
|
|
||||||
|
type stepContext struct {
|
||||||
|
TContext
|
||||||
|
what string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sCtx *stepContext) Log(args ...any) {
|
||||||
|
sCtx.Helper()
|
||||||
|
sCtx.TContext.Log(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sCtx *stepContext) Logf(format string, args ...any) {
|
||||||
|
sCtx.Helper()
|
||||||
|
sCtx.TContext.Log(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintf(format, args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sCtx *stepContext) Error(args ...any) {
|
||||||
|
sCtx.Helper()
|
||||||
|
sCtx.TContext.Error(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sCtx *stepContext) Errorf(format string, args ...any) {
|
||||||
|
sCtx.Helper()
|
||||||
|
sCtx.TContext.Error(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintf(format, args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sCtx *stepContext) Fatal(args ...any) {
|
||||||
|
sCtx.Helper()
|
||||||
|
sCtx.TContext.Fatal(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintln(args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sCtx *stepContext) Fatalf(format string, args ...any) {
|
||||||
|
sCtx.Helper()
|
||||||
|
sCtx.TContext.Fatal(sCtx.what + ": " + strings.TrimSpace(fmt.Sprintf(format, args...)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value intercepts a search for the special
|
||||||
|
func (sCtx *stepContext) Value(key any) any {
|
||||||
|
if s, ok := key.(string); ok && s == ginkgoSpecContextKey {
|
||||||
|
if reporter, ok := sCtx.TContext.Value(key).(ginkgoReporter); ok {
|
||||||
|
return ginkgoReporter(&stepReporter{reporter: reporter, what: sCtx.what})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sCtx.TContext.Value(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
type stepReporter struct {
|
||||||
|
reporter ginkgoReporter
|
||||||
|
what string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ginkgoReporter = &stepReporter{}
|
||||||
|
|
||||||
|
func (s *stepReporter) AttachProgressReporter(reporter func() string) func() {
|
||||||
|
return s.reporter.AttachProgressReporter(func() string {
|
||||||
|
report := reporter()
|
||||||
|
return s.what + ": " + report
|
||||||
|
})
|
||||||
|
}
|
466
test/utils/ktesting/tcontext.go
Normal file
466
test/utils/ktesting/tcontext.go
Normal file
@@ -0,0 +1,466 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
|
"k8s.io/client-go/dynamic"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/restmapper"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
|
"k8s.io/kubernetes/test/utils/format"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting/initoption"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CleanupGracePeriod is the time that a [TContext] gets canceled before the
|
||||||
|
// deadline of its underlying test suite (usually determined via "go test
|
||||||
|
// -timeout"). This gives the running test(s) time to fail with an informative
|
||||||
|
// timeout error. After that, all cleanup callbacks then have the remaining
|
||||||
|
// time to complete before the test binary is killed.
|
||||||
|
//
|
||||||
|
// For this to work, each blocking calls in a test must respect the
|
||||||
|
// cancellation of the [TContext].
|
||||||
|
//
|
||||||
|
// When using Ginkgo to manage the test suite and running tests, the
|
||||||
|
// CleanupGracePeriod is ignored because Ginkgo itself manages timeouts.
|
||||||
|
const CleanupGracePeriod = 5 * time.Second
|
||||||
|
|
||||||
|
// TContext combines [context.Context], [TB] and some additional
|
||||||
|
// methods. Log output is associated with the current test. Errors ([Error],
|
||||||
|
// [Errorf]) are recorded with "ERROR" as prefix, fatal errors ([Fatal],
|
||||||
|
// [Fatalf]) with "FATAL ERROR".
|
||||||
|
//
|
||||||
|
// TContext provides features offered by Ginkgo also when using normal Go [testing]:
|
||||||
|
// - The context contains a deadline that expires soon enough before
|
||||||
|
// the overall timeout that cleanup code can still run.
|
||||||
|
// - Cleanup callbacks can get their own, separate contexts when
|
||||||
|
// registered via [CleanupCtx].
|
||||||
|
// - CTRL-C aborts, prints a progress report, and then cleans up
|
||||||
|
// before terminating.
|
||||||
|
// - SIGUSR1 prints a progress report without aborting.
|
||||||
|
//
|
||||||
|
// Progress reporting is more informative when doing polling with
|
||||||
|
// [gomega.Eventually] and [gomega.Consistently]. Without that, it
|
||||||
|
// can only report which tests are active.
|
||||||
|
type TContext interface {
|
||||||
|
context.Context
|
||||||
|
TB
|
||||||
|
|
||||||
|
// Cancel can be invoked to cancel the context before the test is completed.
|
||||||
|
// Tests which use the context to control goroutines and then wait for
|
||||||
|
// termination of those goroutines must call Cancel to avoid a deadlock.
|
||||||
|
//
|
||||||
|
// The cause, if non-empty, is turned into an error which is equivalend
|
||||||
|
// to context.Canceled. context.Cause will return that error for the
|
||||||
|
// context.
|
||||||
|
Cancel(cause string)
|
||||||
|
|
||||||
|
// Cleanup registers a callback that will get invoked when the test
|
||||||
|
// has finished. Callbacks get invoked in first-in-first-out order.
|
||||||
|
//
|
||||||
|
// Beware of context cancellation. The following cleanup code
|
||||||
|
// will use a canceled context, which is not desirable:
|
||||||
|
//
|
||||||
|
// tCtx.Cleanup(func() { /* do something with tCtx */ })
|
||||||
|
// tCtx.Cancel()
|
||||||
|
//
|
||||||
|
// A safer way to run cleanup code is:
|
||||||
|
//
|
||||||
|
// tCtx.CleanupCtx(func (tCtx ktesting.TContext) { /* do something with cleanup tCtx */ })
|
||||||
|
Cleanup(func())
|
||||||
|
|
||||||
|
// CleanupCtx is an alternative for Cleanup. The callback is passed a
|
||||||
|
// new TContext with the same logger and clients as the one CleanupCtx
|
||||||
|
// was invoked for.
|
||||||
|
CleanupCtx(func(TContext))
|
||||||
|
|
||||||
|
// Expect wraps [gomega.Expect] such that a failure will be reported via
|
||||||
|
// [TContext.Fatal]. As with [gomega.Expect], additional values
|
||||||
|
// may get passed. Those values then all must be nil for the assertion
|
||||||
|
// to pass. This can be used with functions which return a value
|
||||||
|
// plus error:
|
||||||
|
//
|
||||||
|
// myAmazingThing := func(int, error) { ...}
|
||||||
|
// tCtx.Expect(myAmazingThing()).Should(gomega.Equal(1))
|
||||||
|
Expect(actual interface{}, extra ...interface{}) gomega.Assertion
|
||||||
|
|
||||||
|
// ExpectNoError asserts that no error has occurred.
|
||||||
|
//
|
||||||
|
// As in [gomega], the optional explanation can be:
|
||||||
|
// - a [fmt.Sprintf] format string plus its argument
|
||||||
|
// - a function returning a string, which will be called
|
||||||
|
// lazy to construct the explanation if needed
|
||||||
|
//
|
||||||
|
// If an explanation is provided, then it replaces the default "Unexpected
|
||||||
|
// error" in the failure message. It's combined with additional details by
|
||||||
|
// adding a colon at the end, as when wrapping an error. Therefore it should
|
||||||
|
// not end with a punctuation mark or line break.
|
||||||
|
//
|
||||||
|
// Using ExpectNoError instead of the corresponding Gomega or testify
|
||||||
|
// assertions has the advantage that the failure message is short (good for
|
||||||
|
// aggregation in https://go.k8s.io/triage) with more details captured in the
|
||||||
|
// test log output (good when investigating one particular failure).
|
||||||
|
ExpectNoError(err error, explain ...interface{})
|
||||||
|
|
||||||
|
// Logger returns a logger for the current test. This is a shortcut
|
||||||
|
// for calling klog.FromContext.
|
||||||
|
//
|
||||||
|
// Output emitted via this logger and the TB interface (like Logf)
|
||||||
|
// is formatted consistently. The TB interface generates a single
|
||||||
|
// message string, while Logger enables structured logging and can
|
||||||
|
// be passed down into code which expects a logger.
|
||||||
|
//
|
||||||
|
// To skip intermediate helper functions during stack unwinding,
|
||||||
|
// TB.Helper can be called in those functions.
|
||||||
|
Logger() klog.Logger
|
||||||
|
|
||||||
|
// TB returns the underlying TB. This can be used to "break the glass"
|
||||||
|
// and cast back into a testing.T or TB. Calling TB is necessary
|
||||||
|
// because TContext wraps the underlying TB.
|
||||||
|
TB() TB
|
||||||
|
|
||||||
|
// RESTConfig returns a config for a rest client with the UserAgent set
|
||||||
|
// to include the current test name or nil if not available. Several
|
||||||
|
// typed clients using this config are available through [Client],
|
||||||
|
// [Dynamic], [APIExtensions].
|
||||||
|
RESTConfig() *rest.Config
|
||||||
|
|
||||||
|
RESTMapper() *restmapper.DeferredDiscoveryRESTMapper
|
||||||
|
Client() clientset.Interface
|
||||||
|
Dynamic() dynamic.Interface
|
||||||
|
APIExtensions() apiextensions.Interface
|
||||||
|
|
||||||
|
// The following methods must be implemented by every implementation
|
||||||
|
// of TContext to ensure that the leaf TContext is used, not some
|
||||||
|
// embedded TContext:
|
||||||
|
// - CleanupCtx
|
||||||
|
// - Expect
|
||||||
|
// - ExpectNoError
|
||||||
|
// - Logger
|
||||||
|
//
|
||||||
|
// Usually these methods would be stand-alone functions with a TContext
|
||||||
|
// parameter. Offering them as methods simplifies the test code.
|
||||||
|
}
|
||||||
|
|
||||||
|
// TB is the interface common to [testing.T], [testing.B], [testing.F] and
|
||||||
|
// [github.com/onsi/ginkgo/v2]. In contrast to [testing.TB], it can be
|
||||||
|
// implemented also outside of the testing package.
|
||||||
|
type TB interface {
|
||||||
|
Cleanup(func())
|
||||||
|
Error(args ...any)
|
||||||
|
Errorf(format string, args ...any)
|
||||||
|
Fail()
|
||||||
|
FailNow()
|
||||||
|
Failed() bool
|
||||||
|
Fatal(args ...any)
|
||||||
|
Fatalf(format string, args ...any)
|
||||||
|
Helper()
|
||||||
|
Log(args ...any)
|
||||||
|
Logf(format string, args ...any)
|
||||||
|
Name() string
|
||||||
|
Setenv(key, value string)
|
||||||
|
Skip(args ...any)
|
||||||
|
SkipNow()
|
||||||
|
Skipf(format string, args ...any)
|
||||||
|
Skipped() bool
|
||||||
|
TempDir() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextTB adds support for cleanup callbacks with explicit context
|
||||||
|
// parameter. This is used when integrating with Ginkgo: then CleanupCtx
|
||||||
|
// gets implemented via ginkgo.DeferCleanup.
|
||||||
|
type ContextTB interface {
|
||||||
|
TB
|
||||||
|
CleanupCtx(func(ctx context.Context))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init can be called in a unit or integration test to create
|
||||||
|
// a test context which:
|
||||||
|
// - has a per-test logger with verbosity derived from the -v command line flag
|
||||||
|
// - gets canceled when the test finishes (via [TB.Cleanup])
|
||||||
|
//
|
||||||
|
// Note that the test context supports the interfaces of [TB] and
|
||||||
|
// [context.Context] and thus can be used like one of those where needed.
|
||||||
|
// It also has additional methods for retrieving the logger and canceling
|
||||||
|
// the context early, which can be useful in tests which want to wait
|
||||||
|
// for goroutines to terminate after cancellation.
|
||||||
|
//
|
||||||
|
// If the [TB] implementation also implements [ContextTB], then
|
||||||
|
// [TContext.CleanupCtx] uses [ContextTB.CleanupCtx] and uses
|
||||||
|
// the context passed into that callback. This can be used to let
|
||||||
|
// Ginkgo create a fresh context for cleanup code.
|
||||||
|
//
|
||||||
|
// Can be called more than once per test to get different contexts with
|
||||||
|
// independent cancellation. The default behavior describe above can be
|
||||||
|
// modified via optional functional options defined in [initoption].
|
||||||
|
func Init(tb TB, opts ...InitOption) TContext {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
c := internal.InitConfig{
|
||||||
|
PerTestOutput: true,
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't need a Deadline implementation, testing.B doesn't have it.
|
||||||
|
// But if we have one, we'll use it to set a timeout shortly before
|
||||||
|
// the deadline. This needs to come before we wrap tb.
|
||||||
|
deadlineTB, deadlineOK := tb.(interface {
|
||||||
|
Deadline() (time.Time, bool)
|
||||||
|
})
|
||||||
|
|
||||||
|
ctx := interruptCtx
|
||||||
|
if c.PerTestOutput {
|
||||||
|
config := ktesting.NewConfig(
|
||||||
|
ktesting.AnyToString(func(v interface{}) string {
|
||||||
|
return format.Object(v, 1)
|
||||||
|
}),
|
||||||
|
ktesting.VerbosityFlagName("v"),
|
||||||
|
ktesting.VModuleFlagName("vmodule"),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copy klog settings instead of making the ktesting logger
|
||||||
|
// configurable directly.
|
||||||
|
var fs flag.FlagSet
|
||||||
|
config.AddFlags(&fs)
|
||||||
|
for _, name := range []string{"v", "vmodule"} {
|
||||||
|
from := flag.CommandLine.Lookup(name)
|
||||||
|
to := fs.Lookup(name)
|
||||||
|
if err := to.Value.Set(from.Value.String()); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure consistent logging: this klog.Logger writes to tb, adding the
|
||||||
|
// date/time header, and our own wrapper emulates that behavior for
|
||||||
|
// Log/Logf/...
|
||||||
|
logger := ktesting.NewLogger(tb, config)
|
||||||
|
ctx = klog.NewContext(interruptCtx, logger)
|
||||||
|
|
||||||
|
tb = withKlogHeader(tb)
|
||||||
|
}
|
||||||
|
|
||||||
|
if deadlineOK {
|
||||||
|
if deadline, ok := deadlineTB.Deadline(); ok {
|
||||||
|
timeLeft := time.Until(deadline)
|
||||||
|
timeLeft -= CleanupGracePeriod
|
||||||
|
ctx, cancel := withTimeout(ctx, tb, timeLeft, fmt.Sprintf("test suite deadline (%s) is close, need to clean up before the %s cleanup grace period", deadline.Truncate(time.Second), CleanupGracePeriod))
|
||||||
|
tCtx := tContext{
|
||||||
|
Context: ctx,
|
||||||
|
testingTB: testingTB{TB: tb},
|
||||||
|
cancel: cancel,
|
||||||
|
}
|
||||||
|
return tCtx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return WithCancel(InitCtx(ctx, tb))
|
||||||
|
}
|
||||||
|
|
||||||
|
type InitOption = initoption.InitOption
|
||||||
|
|
||||||
|
// InitCtx is a variant of [Init] which uses an already existing context and
|
||||||
|
// whatever logger and timeouts are stored there.
|
||||||
|
// Functional options are part of the API, but currently
|
||||||
|
// there are none which have an effect.
|
||||||
|
func InitCtx(ctx context.Context, tb TB, _ ...InitOption) TContext {
|
||||||
|
tCtx := tContext{
|
||||||
|
Context: ctx,
|
||||||
|
testingTB: testingTB{TB: tb},
|
||||||
|
}
|
||||||
|
return tCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTB constructs a new TContext with a different TB instance.
|
||||||
|
// This can be used to set up some of the context, in particular
|
||||||
|
// clients, in the root test and then run sub-tests:
|
||||||
|
//
|
||||||
|
// func TestSomething(t *testing.T) {
|
||||||
|
// tCtx := ktesting.Init(t)
|
||||||
|
// ...
|
||||||
|
// tCtx = ktesting.WithRESTConfig(tCtx, config)
|
||||||
|
//
|
||||||
|
// t.Run("sub", func (t *testing.T) {
|
||||||
|
// tCtx := ktesting.WithTB(tCtx, t)
|
||||||
|
// ...
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// WithTB sets up cancellation for the sub-test.
|
||||||
|
func WithTB(parentCtx TContext, tb TB) TContext {
|
||||||
|
tCtx := InitCtx(parentCtx, tb)
|
||||||
|
tCtx = WithCancel(tCtx)
|
||||||
|
tCtx = WithClients(tCtx,
|
||||||
|
parentCtx.RESTConfig(),
|
||||||
|
parentCtx.RESTMapper(),
|
||||||
|
parentCtx.Client(),
|
||||||
|
parentCtx.Dynamic(),
|
||||||
|
parentCtx.APIExtensions(),
|
||||||
|
)
|
||||||
|
return tCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext constructs a new TContext with a different Context instance.
|
||||||
|
// This can be used in callbacks which receive a Context, for example
|
||||||
|
// from Gomega:
|
||||||
|
//
|
||||||
|
// gomega.Eventually(tCtx, func(ctx context.Context) {
|
||||||
|
// tCtx := ktesting.WithContext(tCtx, ctx)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// This is important because the Context in the callback could have
|
||||||
|
// a different deadline than in the parent TContext.
|
||||||
|
func WithContext(parentCtx TContext, ctx context.Context) TContext {
|
||||||
|
tCtx := InitCtx(ctx, parentCtx.TB())
|
||||||
|
tCtx = WithClients(tCtx,
|
||||||
|
parentCtx.RESTConfig(),
|
||||||
|
parentCtx.RESTMapper(),
|
||||||
|
parentCtx.Client(),
|
||||||
|
parentCtx.Dynamic(),
|
||||||
|
parentCtx.APIExtensions(),
|
||||||
|
)
|
||||||
|
return tCtx
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValue wraps context.WithValue such that the result is again a TContext.
|
||||||
|
func WithValue(parentCtx TContext, key, val any) TContext {
|
||||||
|
ctx := context.WithValue(parentCtx, key, val)
|
||||||
|
return WithContext(parentCtx, ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
type tContext struct {
|
||||||
|
context.Context
|
||||||
|
testingTB
|
||||||
|
cancel func(cause string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testingTB is needed to avoid a name conflict
|
||||||
|
// between field and method in tContext.
|
||||||
|
type testingTB struct {
|
||||||
|
TB
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Cancel(cause string) {
|
||||||
|
if tCtx.cancel != nil {
|
||||||
|
tCtx.cancel(cause)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) CleanupCtx(cb func(TContext)) {
|
||||||
|
tCtx.Helper()
|
||||||
|
cleanupCtx(tCtx, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
|
||||||
|
tCtx.Helper()
|
||||||
|
return expect(tCtx, actual, extra...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) ExpectNoError(err error, explain ...interface{}) {
|
||||||
|
tCtx.Helper()
|
||||||
|
expectNoError(tCtx, err, explain...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupCtx(tCtx TContext, cb func(TContext)) {
|
||||||
|
tCtx.Helper()
|
||||||
|
|
||||||
|
if tb, ok := tCtx.TB().(ContextTB); ok {
|
||||||
|
// Use context from base TB (most likely Ginkgo).
|
||||||
|
tb.CleanupCtx(func(ctx context.Context) {
|
||||||
|
tCtx := WithContext(tCtx, ctx)
|
||||||
|
cb(tCtx)
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tCtx.Cleanup(func() {
|
||||||
|
// Use new context. This is the code path for "go test". The
|
||||||
|
// context then has *no* deadline. In the code path above for
|
||||||
|
// Ginkgo, Ginkgo is more sophisticated and also applies
|
||||||
|
// timeouts to cleanup calls which accept a context.
|
||||||
|
childCtx := WithContext(tCtx, context.WithoutCancel(tCtx))
|
||||||
|
cb(childCtx)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Logger() klog.Logger {
|
||||||
|
return klog.FromContext(tCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Error(args ...any) {
|
||||||
|
tCtx.Helper()
|
||||||
|
args = append([]any{"ERROR:"}, args...)
|
||||||
|
tCtx.testingTB.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Errorf(format string, args ...any) {
|
||||||
|
tCtx.Helper()
|
||||||
|
error := fmt.Sprintf(format, args...)
|
||||||
|
error = "ERROR: " + error
|
||||||
|
tCtx.testingTB.Error(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Fatal(args ...any) {
|
||||||
|
tCtx.Helper()
|
||||||
|
args = append([]any{"FATAL ERROR:"}, args...)
|
||||||
|
tCtx.testingTB.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Fatalf(format string, args ...any) {
|
||||||
|
tCtx.Helper()
|
||||||
|
error := fmt.Sprintf(format, args...)
|
||||||
|
error = "FATAL ERROR: " + error
|
||||||
|
tCtx.testingTB.Fatal(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) TB() TB {
|
||||||
|
// Might have to unwrap twice, depending on how
|
||||||
|
// this tContext was constructed.
|
||||||
|
tb := tCtx.testingTB.TB
|
||||||
|
if k, ok := tb.(klogTB); ok {
|
||||||
|
return k.TB
|
||||||
|
}
|
||||||
|
return tb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) RESTConfig() *rest.Config {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) RESTMapper() *restmapper.DeferredDiscoveryRESTMapper {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Client() clientset.Interface {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) Dynamic() dynamic.Interface {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tCtx tContext) APIExtensions() apiextensions.Interface {
|
||||||
|
return nil
|
||||||
|
}
|
83
test/utils/ktesting/tcontext_test.go
Normal file
83
test/utils/ktesting/tcontext_test.go
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCancelManual(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
// Blocks until tCtx.Cancel is called below.
|
||||||
|
<-tCtx.Done()
|
||||||
|
}()
|
||||||
|
tCtx.Cancel("manually canceled")
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCancelAutomatic(t *testing.T) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
// This callback gets registered first and thus
|
||||||
|
// gets invoked last.
|
||||||
|
t.Cleanup(wg.Wait)
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
// Blocks until the context gets canceled automatically.
|
||||||
|
<-tCtx.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCancelCtx(t *testing.T) {
|
||||||
|
tCtx := ktesting.Init(t)
|
||||||
|
var discardLogger klog.Logger
|
||||||
|
tCtx = ktesting.WithLogger(tCtx, discardLogger)
|
||||||
|
tCtx = ktesting.WithRESTConfig(tCtx, new(rest.Config))
|
||||||
|
baseCtx := tCtx
|
||||||
|
|
||||||
|
tCtx.Cleanup(func() {
|
||||||
|
if tCtx.Err() == nil {
|
||||||
|
t.Error("context should be canceled but isn't")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
|
||||||
|
if tCtx.Err() != nil {
|
||||||
|
t.Errorf("context should not be canceled but is: %v", tCtx.Err())
|
||||||
|
}
|
||||||
|
assert.Equal(t, baseCtx.Logger(), tCtx.Logger(), "Logger()")
|
||||||
|
assert.Equal(t, baseCtx.RESTConfig(), tCtx.RESTConfig(), "RESTConfig()")
|
||||||
|
assert.Equal(t, baseCtx.RESTMapper(), tCtx.RESTMapper(), "RESTMapper()")
|
||||||
|
assert.Equal(t, baseCtx.Client(), tCtx.Client(), "Client()")
|
||||||
|
assert.Equal(t, baseCtx.Dynamic(), tCtx.Dynamic(), "Dynamic()")
|
||||||
|
assert.Equal(t, baseCtx.APIExtensions(), tCtx.APIExtensions(), "APIExtensions()")
|
||||||
|
})
|
||||||
|
|
||||||
|
// Cancel, then let testing.T invoke test cleanup.
|
||||||
|
tCtx.Cancel("test is complete")
|
||||||
|
}
|
123
test/utils/ktesting/withcontext.go
Normal file
123
test/utils/ktesting/withcontext.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2023 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ktesting
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithCancel sets up cancellation in a [TContext.Cleanup] callback and
|
||||||
|
// constructs a new TContext where [TContext.Cancel] cancels only the new
|
||||||
|
// context.
|
||||||
|
func WithCancel(tCtx TContext) TContext {
|
||||||
|
ctx, cancel := context.WithCancelCause(tCtx)
|
||||||
|
tCtx.Cleanup(func() {
|
||||||
|
cancel(cleanupErr(tCtx.Name()))
|
||||||
|
})
|
||||||
|
|
||||||
|
return withContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
Context: ctx,
|
||||||
|
cancel: func(cause string) {
|
||||||
|
var cancelCause error
|
||||||
|
if cause != "" {
|
||||||
|
cancelCause = canceledError(cause)
|
||||||
|
}
|
||||||
|
cancel(cancelCause)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimeout sets up new context with a timeout. Canceling the timeout gets
|
||||||
|
// registered in a [TContext.Cleanup] callback. [TContext.Cancel] cancels only
|
||||||
|
// the new context. The cause is used as reason why the context is canceled
|
||||||
|
// once the timeout is reached. It may be empty, in which case the usual
|
||||||
|
// "context canceled" error is used.
|
||||||
|
func WithTimeout(tCtx TContext, timeout time.Duration, timeoutCause string) TContext {
|
||||||
|
tCtx.Helper()
|
||||||
|
ctx, cancel := withTimeout(tCtx, tCtx.TB(), timeout, timeoutCause)
|
||||||
|
|
||||||
|
return withContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
Context: ctx,
|
||||||
|
cancel: cancel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithLogger constructs a new context with a different logger.
|
||||||
|
func WithLogger(tCtx TContext, logger klog.Logger) TContext {
|
||||||
|
ctx := klog.NewContext(tCtx, logger)
|
||||||
|
|
||||||
|
return withContext{
|
||||||
|
TContext: tCtx,
|
||||||
|
Context: ctx,
|
||||||
|
cancel: tCtx.Cancel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// withContext combines some TContext with a new [context.Context] derived
|
||||||
|
// from it. Because both provide the [context.Context] interface, methods must
|
||||||
|
// be defined which pick the newer one.
|
||||||
|
type withContext struct {
|
||||||
|
TContext
|
||||||
|
context.Context
|
||||||
|
|
||||||
|
cancel func(cause string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Cancel(cause string) {
|
||||||
|
wCtx.cancel(cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) CleanupCtx(cb func(TContext)) {
|
||||||
|
wCtx.Helper()
|
||||||
|
cleanupCtx(wCtx, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion {
|
||||||
|
wCtx.Helper()
|
||||||
|
return expect(wCtx, actual, extra...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) ExpectNoError(err error, explain ...interface{}) {
|
||||||
|
wCtx.Helper()
|
||||||
|
expectNoError(wCtx, err, explain...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Logger() klog.Logger {
|
||||||
|
return klog.FromContext(wCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Deadline() (time.Time, bool) {
|
||||||
|
return wCtx.Context.Deadline()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Done() <-chan struct{} {
|
||||||
|
return wCtx.Context.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Err() error {
|
||||||
|
return wCtx.Context.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wCtx withContext) Value(key any) any {
|
||||||
|
return wCtx.Context.Value(key)
|
||||||
|
}
|
Reference in New Issue
Block a user