diff --git a/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml b/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml index d3da6879e76..29048726caf 100644 --- a/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml +++ b/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClaimTemplate metadata: name: test-claim-template diff --git a/test/integration/scheduler_perf/config/dra/resourceclass.yaml b/test/integration/scheduler_perf/config/dra/resourceclass.yaml index d24aa82392f..8ba0fcb3a2a 100644 --- a/test/integration/scheduler_perf/config/dra/resourceclass.yaml +++ b/test/integration/scheduler_perf/config/dra/resourceclass.yaml @@ -1,4 +1,4 @@ -apiVersion: resource.k8s.io/v1alpha1 +apiVersion: resource.k8s.io/v1alpha2 kind: ResourceClass metadata: name: test-class diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index 8e4d735baa1..c3f96fb58d2 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -729,16 +729,16 @@ driverName: test-driver.cdi.k8s.io nodes: scheduler-perf-dra-* maxClaimsPerNodeParam: $maxClaimsPerNode - - opcode: createResourceClass + - opcode: createAny templatePath: config/dra/resourceclass.yaml - - opcode: createResourceClaimTemplate + - opcode: createAny templatePath: config/dra/resourceclaimtemplate.yaml namespace: init - opcode: createPods namespace: init countParam: $initPods podTemplatePath: config/dra/pod-with-claim-template.yaml - - opcode: createResourceClaimTemplate + - opcode: createAny templatePath: config/dra/resourceclaimtemplate.yaml namespace: test - opcode: createPods @@ -799,24 +799,24 @@ driverName: another-test-driver.cdi.k8s.io nodes: scheduler-perf-dra-* maxClaimsPerNodeParam: $maxClaimsPerNode - - opcode: createResourceClass + - opcode: createAny templatePath: config/dra/resourceclass.yaml - - opcode: createResourceClass + - opcode: createAny templatePath: config/dra/another-resourceclass.yaml - - opcode: createResourceClaimTemplate + - opcode: createAny templatePath: config/dra/resourceclaimtemplate.yaml namespace: init - - opcode: createResourceClaimTemplate + - opcode: createAny templatePath: config/dra/another-resourceclaimtemplate.yaml namespace: init - opcode: createPods namespace: init countParam: $initPods podTemplatePath: config/dra/pod-with-many-claim-templates.yaml - - opcode: createResourceClaimTemplate + - opcode: createAny templatePath: config/dra/resourceclaimtemplate.yaml namespace: test - - opcode: createResourceClaimTemplate + - opcode: createAny templatePath: config/dra/another-resourceclaimtemplate.yaml namespace: test - opcode: createPods diff --git a/test/integration/scheduler_perf/create.go b/test/integration/scheduler_perf/create.go index a2fcd296542..ddc0e350e75 100644 --- a/test/integration/scheduler_perf/create.go +++ b/test/integration/scheduler_perf/create.go @@ -19,17 +19,22 @@ package benchmark import ( "context" "fmt" - "testing" + "time" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/restmapper" + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/utils/ktesting" ) -// createOp defines an op where some object gets created from a template. -// Everything specific for that object (create call, op code, names) gets -// provided through a type. -type createOp[T interface{}, P createOpType[T]] struct { - // Must match createOpType.Opcode(). +// createAny defines an op where some object gets created from a YAML file. +// The nameset can be specified. +type createAny struct { + // Must match createAnyOpcode. Opcode operationCode // Namespace the object should be created in. Must be empty for cluster-scoped objects. Namespace string @@ -37,53 +42,85 @@ type createOp[T interface{}, P createOpType[T]] struct { TemplatePath string } -func (cro *createOp[T, P]) isValid(allowParameterization bool) error { - var p P - if cro.Opcode != p.Opcode() { - return fmt.Errorf("invalid opcode %q; expected %q", cro.Opcode, p.Opcode()) +var _ runnableOp = &createAny{} + +func (c *createAny) isValid(allowParameterization bool) error { + if c.Opcode != createAnyOpcode { + return fmt.Errorf("invalid opcode %q; expected %q", c.Opcode, createAnyOpcode) } - if p.Namespaced() && cro.Namespace == "" { - return fmt.Errorf("Namespace must be set") - } - if !p.Namespaced() && cro.Namespace != "" { - return fmt.Errorf("Namespace must not be set") - } - if cro.TemplatePath == "" { + if c.TemplatePath == "" { return fmt.Errorf("TemplatePath must be set") } + // The namespace can only be checked during later because we don't know yet + // whether the object is namespaced or cluster-scoped. return nil } -func (cro *createOp[T, P]) collectsMetrics() bool { +func (c *createAny) collectsMetrics() bool { return false } -func (cro *createOp[T, P]) patchParams(w *workload) (realOp, error) { - return cro, cro.isValid(false) +func (c *createAny) patchParams(w *workload) (realOp, error) { + return c, c.isValid(false) } -func (cro *createOp[T, P]) requiredNamespaces() []string { - if cro.Namespace == "" { +func (c *createAny) requiredNamespaces() []string { + if c.Namespace == "" { return nil } - return []string{cro.Namespace} + return []string{c.Namespace} } -func (cro *createOp[T, P]) run(ctx context.Context, tb testing.TB, client clientset.Interface) { - var obj *T - var p P - if err := getSpecFromFile(&cro.TemplatePath, &obj); err != nil { - tb.Fatalf("parsing %s %q: %v", p.Name(), cro.TemplatePath, err) +func (c *createAny) run(tCtx ktesting.TContext) { + var obj *unstructured.Unstructured + if err := getSpecFromFile(&c.TemplatePath, &obj); err != nil { + tCtx.Fatalf("%s: parsing failed: %v", c.TemplatePath, err) } - if _, err := p.CreateCall(client, cro.Namespace)(ctx, obj, metav1.CreateOptions{}); err != nil { - tb.Fatalf("create %s: %v", p.Name(), err) - } -} -// createOpType provides type-specific values for the generic createOp. -type createOpType[T interface{}] interface { - Opcode() operationCode - Name() string - Namespaced() bool - CreateCall(client clientset.Interface, namespace string) func(context.Context, *T, metav1.CreateOptions) (*T, error) + // Not caching the discovery result isn't very efficient, but good enough when + // createAny isn't done often. + discoveryCache := memory.NewMemCacheClient(tCtx.Client().Discovery()) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryCache) + gv, err := schema.ParseGroupVersion(obj.GetAPIVersion()) + if err != nil { + tCtx.Fatalf("%s: extract group+version from object %q: %v", c.TemplatePath, klog.KObj(obj), err) + } + gk := schema.GroupKind{Group: gv.Group, Kind: obj.GetKind()} + + create := func() error { + mapping, err := restMapper.RESTMapping(gk, gv.Version) + if err != nil { + // Cached mapping might be stale, refresh on next try. + restMapper.Reset() + return fmt.Errorf("map %q to resource: %v", gk, err) + } + resourceClient := tCtx.Dynamic().Resource(mapping.Resource) + + if c.Namespace != "" { + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + return fmt.Errorf("namespace %q set for %q, but %q has scope %q", c.Namespace, c.TemplatePath, gk, mapping.Scope.Name()) + } + _, err = resourceClient.Namespace(c.Namespace).Create(tCtx, obj, metav1.CreateOptions{}) + } else { + if mapping.Scope.Name() != meta.RESTScopeNameRoot { + return fmt.Errorf("namespace not set for %q, but %q has scope %q", c.TemplatePath, gk, mapping.Scope.Name()) + } + _, err = resourceClient.Create(tCtx, obj, metav1.CreateOptions{}) + } + return err + } + // Retry, some errors (like CRD just created and type not ready for use yet) are temporary. + ctx, cancel := context.WithTimeout(tCtx, 20*time.Second) + defer cancel() + for { + err := create() + if err == nil { + return + } + select { + case <-ctx.Done(): + tCtx.Fatalf("%s: timed out (%q) while creating %q, last error was: %v", c.TemplatePath, context.Cause(ctx), klog.KObj(obj), err) + case <-time.After(time.Second): + } + } } diff --git a/test/integration/scheduler_perf/dra.go b/test/integration/scheduler_perf/dra.go index eef0b2c1abd..3e263ace1f7 100644 --- a/test/integration/scheduler_perf/dra.go +++ b/test/integration/scheduler_perf/dra.go @@ -21,14 +21,13 @@ import ( "fmt" "path/filepath" "sync" - "testing" resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" draapp "k8s.io/kubernetes/test/e2e/dra/test-driver/app" + "k8s.io/kubernetes/test/utils/ktesting" ) // createResourceClaimsOp defines an op where resource claims are created. @@ -82,18 +81,18 @@ func (op *createResourceClaimsOp) requiredNamespaces() []string { return []string{op.Namespace} } -func (op *createResourceClaimsOp) run(ctx context.Context, tb testing.TB, clientset clientset.Interface) { - tb.Logf("creating %d claims in namespace %q", op.Count, op.Namespace) +func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) { + tCtx.Logf("creating %d claims in namespace %q", op.Count, op.Namespace) var claimTemplate *resourcev1alpha2.ResourceClaim if err := getSpecFromFile(&op.TemplatePath, &claimTemplate); err != nil { - tb.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err) + tCtx.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err) } var createErr error var mutex sync.Mutex create := func(i int) { err := func() error { - if _, err := clientset.ResourceV1alpha2().ResourceClaims(op.Namespace).Create(ctx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil { + if _, err := tCtx.Client().ResourceV1alpha2().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil { return fmt.Errorf("create claim: %v", err) } return nil @@ -109,34 +108,12 @@ func (op *createResourceClaimsOp) run(ctx context.Context, tb testing.TB, client if workers > 30 { workers = 30 } - workqueue.ParallelizeUntil(ctx, workers, op.Count, create) + workqueue.ParallelizeUntil(tCtx, workers, op.Count, create) if createErr != nil { - tb.Fatal(createErr.Error()) + tCtx.Fatal(createErr.Error()) } } -// createResourceClassOpType customizes createOp for creating a ResourceClass. -type createResourceClassOpType struct{} - -func (c createResourceClassOpType) Opcode() operationCode { return createResourceClassOpcode } -func (c createResourceClassOpType) Name() string { return "ResourceClass" } -func (c createResourceClassOpType) Namespaced() bool { return false } -func (c createResourceClassOpType) CreateCall(client clientset.Interface, namespace string) func(context.Context, *resourcev1alpha2.ResourceClass, metav1.CreateOptions) (*resourcev1alpha2.ResourceClass, error) { - return client.ResourceV1alpha2().ResourceClasses().Create -} - -// createResourceClassOpType customizes createOp for creating a ResourceClaim. -type createResourceClaimTemplateOpType struct{} - -func (c createResourceClaimTemplateOpType) Opcode() operationCode { - return createResourceClaimTemplateOpcode -} -func (c createResourceClaimTemplateOpType) Name() string { return "ResourceClaimTemplate" } -func (c createResourceClaimTemplateOpType) Namespaced() bool { return true } -func (c createResourceClaimTemplateOpType) CreateCall(client clientset.Interface, namespace string) func(context.Context, *resourcev1alpha2.ResourceClaimTemplate, metav1.CreateOptions) (*resourcev1alpha2.ResourceClaimTemplate, error) { - return client.ResourceV1alpha2().ResourceClaimTemplates(namespace).Create -} - // createResourceDriverOp defines an op where resource claims are created. type createResourceDriverOp struct { // Must be createResourceDriverOpcode. @@ -186,8 +163,8 @@ func (op *createResourceDriverOp) patchParams(w *workload) (realOp, error) { func (op *createResourceDriverOp) requiredNamespaces() []string { return nil } -func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, clientset clientset.Interface) { - tb.Logf("creating resource driver %q for nodes matching %q", op.DriverName, op.Nodes) +func (op *createResourceDriverOp) run(tCtx ktesting.TContext) { + tCtx.Logf("creating resource driver %q for nodes matching %q", op.DriverName, op.Nodes) // Start the controller side of the DRA test driver such that it simulates // per-node resources. @@ -197,22 +174,22 @@ func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, client MaxAllocations: op.MaxClaimsPerNode, } - nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + nodes, err := tCtx.Client().CoreV1().Nodes().List(tCtx, metav1.ListOptions{}) if err != nil { - tb.Fatalf("list nodes: %v", err) + tCtx.Fatalf("list nodes: %v", err) } for _, node := range nodes.Items { match, err := filepath.Match(op.Nodes, node.Name) if err != nil { - tb.Fatalf("matching glob pattern %q against node name %q: %v", op.Nodes, node.Name, err) + tCtx.Fatalf("matching glob pattern %q against node name %q: %v", op.Nodes, node.Name, err) } if match { resources.Nodes = append(resources.Nodes, node.Name) } } - controller := draapp.NewController(clientset, resources) - ctx, cancel := context.WithCancel(ctx) + controller := draapp.NewController(tCtx.Client(), resources) + ctx, cancel := context.WithCancel(tCtx) var wg sync.WaitGroup wg.Add(1) go func() { @@ -220,11 +197,11 @@ func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, client ctx := klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), op.DriverName)) controller.Run(ctx, 5 /* workers */) }() - tb.Cleanup(func() { - tb.Logf("stopping resource driver %q", op.DriverName) + tCtx.Cleanup(func() { + tCtx.Logf("stopping resource driver %q", op.DriverName) // We must cancel before waiting. cancel() wg.Wait() - tb.Logf("stopped resource driver %q", op.DriverName) + tCtx.Logf("stopped resource driver %q", op.DriverName) }) } diff --git a/test/integration/scheduler_perf/scheduler_perf.go b/test/integration/scheduler_perf/scheduler_perf.go index 2673a3e1a74..366756c502d 100644 --- a/test/integration/scheduler_perf/scheduler_perf.go +++ b/test/integration/scheduler_perf/scheduler_perf.go @@ -33,7 +33,6 @@ import ( "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -59,23 +58,23 @@ import ( "k8s.io/kubernetes/test/integration/framework" testutils "k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils/ktesting" + "k8s.io/kubernetes/test/utils/ktesting/initoption" "sigs.k8s.io/yaml" ) type operationCode string const ( - createNodesOpcode operationCode = "createNodes" - createNamespacesOpcode operationCode = "createNamespaces" - createPodsOpcode operationCode = "createPods" - createPodSetsOpcode operationCode = "createPodSets" - createResourceClaimsOpcode operationCode = "createResourceClaims" - createResourceClaimTemplateOpcode operationCode = "createResourceClaimTemplate" - createResourceClassOpcode operationCode = "createResourceClass" - createResourceDriverOpcode operationCode = "createResourceDriver" - churnOpcode operationCode = "churn" - barrierOpcode operationCode = "barrier" - sleepOpcode operationCode = "sleep" + createAnyOpcode operationCode = "createAny" + createNodesOpcode operationCode = "createNodes" + createNamespacesOpcode operationCode = "createNamespaces" + createPodsOpcode operationCode = "createPods" + createPodSetsOpcode operationCode = "createPodSets" + createResourceClaimsOpcode operationCode = "createResourceClaims" + createResourceDriverOpcode operationCode = "createResourceDriver" + churnOpcode operationCode = "churn" + barrierOpcode operationCode = "barrier" + sleepOpcode operationCode = "sleep" ) const ( @@ -238,13 +237,12 @@ type op struct { // which op we're decoding at runtime. func (op *op) UnmarshalJSON(b []byte) error { possibleOps := []realOp{ + &createAny{}, &createNodesOp{}, &createNamespacesOp{}, &createPodsOp{}, &createPodSetsOp{}, &createResourceClaimsOp{}, - &createOp[resourcev1alpha2.ResourceClaimTemplate, createResourceClaimTemplateOpType]{}, - &createOp[resourcev1alpha2.ResourceClass, createResourceClassOpType]{}, &createResourceDriverOp{}, &churnOp{}, &barrierOp{}, @@ -293,7 +291,7 @@ type runnableOp interface { // before running the operation. requiredNamespaces() []string // run executes the steps provided by the operation. - run(context.Context, testing.TB, clientset.Interface) + run(ktesting.TContext) } func isValidParameterizable(val string) bool { @@ -674,6 +672,7 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr if !enabled(*perfSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) { b.Skipf("disabled by label filter %q", *perfSchedulingLabelFilter) } + tCtx := ktesting.Init(b, initoption.PerTestOutput(*useTestingLog)) // Ensure that there are no leaked // goroutines. They could influence @@ -684,28 +683,19 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr // quit *before* restoring klog settings. framework.GoleakCheck(b) - ctx := context.Background() - - if *useTestingLog { - // In addition to redirection klog - // output, also enable contextual - // logging. - _, ctx = ktesting.NewTestContext(b) - } - // Now that we are ready to run, start // etcd. framework.StartEtcd(b, output) // 30 minutes should be plenty enough even for the 5000-node tests. - ctx, cancel := context.WithTimeout(ctx, 30*time.Minute) - b.Cleanup(cancel) + timeout := 30 * time.Minute + tCtx = ktesting.WithTimeout(tCtx, timeout, fmt.Sprintf("timed out after the %s per-test timeout", timeout)) for feature, flag := range tc.FeatureGates { defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)() } - informerFactory, client, dyncClient := setupClusterForWorkload(ctx, b, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry) - results := runWorkload(ctx, b, tc, w, informerFactory, client, dyncClient, false) + informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry) + results := runWorkload(tCtx, tc, w, informerFactory, false) dataItems.DataItems = append(dataItems.DataItems, results...) if len(results) > 0 { @@ -771,7 +761,7 @@ func loadSchedulerConfig(file string) (*config.KubeSchedulerConfiguration, error return nil, fmt.Errorf("couldn't decode as KubeSchedulerConfiguration, got %s: ", gvk) } -func unrollWorkloadTemplate(tb testing.TB, wt []op, w *workload) []op { +func unrollWorkloadTemplate(tb ktesting.TB, wt []op, w *workload) []op { var unrolled []op for opIndex, o := range wt { realOp, err := o.realOp.patchParams(w) @@ -794,23 +784,23 @@ func unrollWorkloadTemplate(tb testing.TB, wt []op, w *workload) []op { return unrolled } -func setupClusterForWorkload(ctx context.Context, tb testing.TB, configPath string, featureGates map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) { +func setupClusterForWorkload(tCtx ktesting.TContext, configPath string, featureGates map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) { var cfg *config.KubeSchedulerConfiguration var err error if configPath != "" { cfg, err = loadSchedulerConfig(configPath) if err != nil { - tb.Fatalf("error loading scheduler config file: %v", err) + tCtx.Fatalf("error loading scheduler config file: %v", err) } if err = validation.ValidateKubeSchedulerConfiguration(cfg); err != nil { - tb.Fatalf("validate scheduler config file failed: %v", err) + tCtx.Fatalf("validate scheduler config file failed: %v", err) } } - return mustSetupCluster(ctx, tb, cfg, featureGates, outOfTreePluginRegistry) + return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry) } -func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, client clientset.Interface, dynClient dynamic.Interface, cleanup bool) []DataItem { - b, benchmarking := tb.(*testing.B) +func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, cleanup bool) []DataItem { + b, benchmarking := tCtx.TB().(*testing.B) if benchmarking { start := time.Now() b.Cleanup(func() { @@ -839,10 +829,10 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, podInformer := informerFactory.Core().V1().Pods() // Everything else started by this function gets stopped before it returns. - ctx, cancel := context.WithCancel(ctx) + tCtx = ktesting.WithCancel(tCtx) var wg sync.WaitGroup defer wg.Wait() - defer cancel() + defer tCtx.Cancel("workload is done") var mu sync.Mutex var dataItems []DataItem @@ -853,48 +843,48 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, if cleanup { // This must run before controllers get shut down. - defer cleanupWorkload(ctx, tb, tc, client, numPodsScheduledPerNamespace) + defer cleanupWorkload(tCtx, tc, numPodsScheduledPerNamespace) } - for opIndex, op := range unrollWorkloadTemplate(tb, tc.WorkloadTemplate, w) { + for opIndex, op := range unrollWorkloadTemplate(tCtx, tc.WorkloadTemplate, w) { realOp, err := op.realOp.patchParams(w) if err != nil { - tb.Fatalf("op %d: %v", opIndex, err) + tCtx.Fatalf("op %d: %v", opIndex, err) } select { - case <-ctx.Done(): - tb.Fatalf("op %d: %v", opIndex, ctx.Err()) + case <-tCtx.Done(): + tCtx.Fatalf("op %d: %v", opIndex, context.Cause(tCtx)) default: } switch concreteOp := realOp.(type) { case *createNodesOp: - nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), concreteOp, client) + nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), concreteOp, tCtx.Client()) if err != nil { - tb.Fatalf("op %d: %v", opIndex, err) + tCtx.Fatalf("op %d: %v", opIndex, err) } - if err := nodePreparer.PrepareNodes(ctx, nextNodeIndex); err != nil { - tb.Fatalf("op %d: %v", opIndex, err) + if err := nodePreparer.PrepareNodes(tCtx, nextNodeIndex); err != nil { + tCtx.Fatalf("op %d: %v", opIndex, err) } if cleanup { defer func() { - if err := nodePreparer.CleanupNodes(ctx); err != nil { - tb.Fatalf("failed to clean up nodes, error: %v", err) + if err := nodePreparer.CleanupNodes(tCtx); err != nil { + tCtx.Fatalf("failed to clean up nodes, error: %v", err) } }() } nextNodeIndex += concreteOp.Count case *createNamespacesOp: - nsPreparer, err := newNamespacePreparer(concreteOp, client, tb) + nsPreparer, err := newNamespacePreparer(tCtx, concreteOp) if err != nil { - tb.Fatalf("op %d: %v", opIndex, err) + tCtx.Fatalf("op %d: %v", opIndex, err) } - if err := nsPreparer.prepare(ctx); err != nil { - err2 := nsPreparer.cleanup(ctx) + if err := nsPreparer.prepare(tCtx); err != nil { + err2 := nsPreparer.cleanup(tCtx) if err2 != nil { err = fmt.Errorf("prepare: %v; cleanup: %v", err, err2) } - tb.Fatalf("op %d: %v", opIndex, err) + tCtx.Fatalf("op %d: %v", opIndex, err) } for _, n := range nsPreparer.namespaces() { if _, ok := numPodsScheduledPerNamespace[n]; ok { @@ -911,7 +901,7 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, if concreteOp.Namespace != nil { namespace = *concreteOp.Namespace } - createNamespaceIfNotPresent(ctx, tb, client, namespace, &numPodsScheduledPerNamespace) + createNamespaceIfNotPresent(tCtx, namespace, &numPodsScheduledPerNamespace) if concreteOp.PodTemplatePath == nil { concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath } @@ -919,18 +909,17 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, // This needs a separate context and wait group because // the code below needs to be sure that the goroutines // are stopped. - var collectorCtx context.Context - var collectorCancel func() + var collectorCtx ktesting.TContext var collectorWG sync.WaitGroup defer collectorWG.Wait() if concreteOp.CollectMetrics { - collectorCtx, collectorCancel = context.WithCancel(ctx) - defer collectorCancel() - name := tb.Name() + collectorCtx = ktesting.WithCancel(tCtx) + defer collectorCtx.Cancel("cleaning up") + name := tCtx.Name() // The first part is the same for each work load, therefore we can strip it. name = name[strings.Index(name, "/")+1:] - collectors = getTestDataCollectors(tb, podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin) + collectors = getTestDataCollectors(collectorCtx, podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin) for _, collector := range collectors { // Need loop-local variable for function below. collector := collector @@ -941,23 +930,23 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, }() } } - if err := createPods(ctx, tb, namespace, concreteOp, client); err != nil { - tb.Fatalf("op %d: %v", opIndex, err) + if err := createPods(tCtx, namespace, concreteOp); err != nil { + tCtx.Fatalf("op %d: %v", opIndex, err) } if concreteOp.SkipWaitToCompletion { // Only record those namespaces that may potentially require barriers // in the future. numPodsScheduledPerNamespace[namespace] += concreteOp.Count } else { - if err := waitUntilPodsScheduledInNamespace(ctx, tb, podInformer, namespace, concreteOp.Count); err != nil { - tb.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err) + if err := waitUntilPodsScheduledInNamespace(tCtx, podInformer, namespace, concreteOp.Count); err != nil { + tCtx.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err) } } if concreteOp.CollectMetrics { // CollectMetrics and SkipWaitToCompletion can never be true at the // same time, so if we're here, it means that all pods have been // scheduled. - collectorCancel() + collectorCtx.Cancel("collecting metrix, collector must stop first") collectorWG.Wait() mu.Lock() for _, collector := range collectors { @@ -980,11 +969,11 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, } else { namespace = fmt.Sprintf("namespace-%d", opIndex) } - restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(client.Discovery())) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(tCtx.Client().Discovery())) // Ensure the namespace exists. nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - if _, err := client.CoreV1().Namespaces().Create(ctx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { - tb.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err) + if _, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + tCtx.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err) } var churnFns []func(name string) string @@ -992,31 +981,31 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, for i, path := range concreteOp.TemplatePaths { unstructuredObj, gvk, err := getUnstructuredFromFile(path) if err != nil { - tb.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err) + tCtx.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err) } // Obtain GVR. mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { - tb.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err) + tCtx.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err) } gvr := mapping.Resource // Distinguish cluster-scoped with namespaced API objects. var dynRes dynamic.ResourceInterface if mapping.Scope.Name() == meta.RESTScopeNameNamespace { - dynRes = dynClient.Resource(gvr).Namespace(namespace) + dynRes = tCtx.Dynamic().Resource(gvr).Namespace(namespace) } else { - dynRes = dynClient.Resource(gvr) + dynRes = tCtx.Dynamic().Resource(gvr) } churnFns = append(churnFns, func(name string) string { if name != "" { - if err := dynRes.Delete(ctx, name, metav1.DeleteOptions{}); err != nil { - tb.Errorf("op %d: unable to delete %v: %v", opIndex, name, err) + if err := dynRes.Delete(tCtx, name, metav1.DeleteOptions{}); err != nil { + tCtx.Errorf("op %d: unable to delete %v: %v", opIndex, name, err) } return "" } - live, err := dynRes.Create(ctx, unstructuredObj, metav1.CreateOptions{}) + live, err := dynRes.Create(tCtx, unstructuredObj, metav1.CreateOptions{}) if err != nil { return "" } @@ -1047,7 +1036,7 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, churnFns[i]("") } count++ - case <-ctx.Done(): + case <-tCtx.Done(): return } } @@ -1070,7 +1059,7 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, retVals[i][count%concreteOp.Number] = churnFns[i](retVals[i][count%concreteOp.Number]) } count++ - case <-ctx.Done(): + case <-tCtx.Done(): return } } @@ -1080,11 +1069,11 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, case *barrierOp: for _, namespace := range concreteOp.Namespaces { if _, ok := numPodsScheduledPerNamespace[namespace]; !ok { - tb.Fatalf("op %d: unknown namespace %s", opIndex, namespace) + tCtx.Fatalf("op %d: unknown namespace %s", opIndex, namespace) } } - if err := waitUntilPodsScheduled(ctx, tb, podInformer, concreteOp.Namespaces, numPodsScheduledPerNamespace); err != nil { - tb.Fatalf("op %d: %v", opIndex, err) + if err := waitUntilPodsScheduled(tCtx, podInformer, concreteOp.Namespaces, numPodsScheduledPerNamespace); err != nil { + tCtx.Fatalf("op %d: %v", opIndex, err) } // At the end of the barrier, we can be sure that there are no pods // pending scheduling in the namespaces that we just blocked on. @@ -1098,25 +1087,25 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, case *sleepOp: select { - case <-ctx.Done(): + case <-tCtx.Done(): case <-time.After(concreteOp.Duration): } default: runable, ok := concreteOp.(runnableOp) if !ok { - tb.Fatalf("op %d: invalid op %v", opIndex, concreteOp) + tCtx.Fatalf("op %d: invalid op %v", opIndex, concreteOp) } for _, namespace := range runable.requiredNamespaces() { - createNamespaceIfNotPresent(ctx, tb, client, namespace, &numPodsScheduledPerNamespace) + createNamespaceIfNotPresent(tCtx, namespace, &numPodsScheduledPerNamespace) } - runable.run(ctx, tb, client) + runable.run(tCtx) } } // check unused params and inform users unusedParams := w.unusedParams() if len(unusedParams) != 0 { - tb.Fatalf("the parameters %v are defined on workload %s, but unused.\nPlease make sure there are no typos.", unusedParams, w.Name) + tCtx.Fatalf("the parameters %v are defined on workload %s, but unused.\nPlease make sure there are no typos.", unusedParams, w.Name) } // Some tests have unschedulable pods. Do not add an implicit barrier at the @@ -1133,17 +1122,17 @@ func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, // // Calling cleanupWorkload can be skipped if it is known that the next workload // will run with a fresh etcd instance. -func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client clientset.Interface, numPodsScheduledPerNamespace map[string]int) { +func cleanupWorkload(tCtx ktesting.TContext, tc *testCase, numPodsScheduledPerNamespace map[string]int) { deleteNow := *metav1.NewDeleteOptions(0) for namespace := range numPodsScheduledPerNamespace { // Pods have to be deleted explicitly, with no grace period. Normally // kubelet will set the DeletionGracePeriodSeconds to zero when it's okay // to remove a deleted pod, but we don't run kubelet... - if err := client.CoreV1().Pods(namespace).DeleteCollection(ctx, deleteNow, metav1.ListOptions{}); err != nil { - tb.Fatalf("failed to delete pods in namespace %q: %v", namespace, err) + if err := tCtx.Client().CoreV1().Pods(namespace).DeleteCollection(tCtx, deleteNow, metav1.ListOptions{}); err != nil { + tCtx.Fatalf("failed to delete pods in namespace %q: %v", namespace, err) } - if err := client.CoreV1().Namespaces().Delete(ctx, namespace, deleteNow); err != nil { - tb.Fatalf("Deleting Namespace %q in numPodsScheduledPerNamespace: %v", namespace, err) + if err := tCtx.Client().CoreV1().Namespaces().Delete(tCtx, namespace, deleteNow); err != nil { + tCtx.Fatalf("Deleting Namespace %q in numPodsScheduledPerNamespace: %v", namespace, err) } } @@ -1151,8 +1140,8 @@ func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client cl // actually removing a namespace can take some time (garbage collecting // other generated object like secrets, etc.) and we don't want to // start the next workloads while that cleanup is still going on. - if err := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) { - namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err := wait.PollUntilContextTimeout(tCtx, time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) { + namespaces, err := tCtx.Client().CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -1165,33 +1154,33 @@ func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client cl // All namespaces gone. return true, nil }); err != nil { - tb.Fatalf("failed while waiting for namespace removal: %v", err) + tCtx.Fatalf("failed while waiting for namespace removal: %v", err) } } -func createNamespaceIfNotPresent(ctx context.Context, tb testing.TB, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) { +func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsPerNamespace *map[string]int) { if _, ok := (*podsPerNamespace)[namespace]; !ok { // The namespace has not created yet. // So, create that and register it. - _, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) + _, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) if err != nil { - tb.Fatalf("failed to create namespace for Pod: %v", namespace) + tCtx.Fatalf("failed to create namespace for Pod: %v", namespace) } (*podsPerNamespace)[namespace] = 0 } } type testDataCollector interface { - run(ctx context.Context) + run(tCtx ktesting.TContext) collect() []DataItem } -func getTestDataCollectors(tb testing.TB, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector { +func getTestDataCollectors(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector { if mcc == nil { mcc = &defaultMetricsCollectorConfig } return []testDataCollector{ - newThroughputCollector(tb, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin), + newThroughputCollector(tCtx, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin), newMetricsCollector(mcc, map[string]string{"Name": name}), } } @@ -1224,25 +1213,25 @@ func getNodePreparer(prefix string, cno *createNodesOp, clientset clientset.Inte ), nil } -func createPods(ctx context.Context, tb testing.TB, namespace string, cpo *createPodsOp, clientset clientset.Interface) error { +func createPods(tCtx ktesting.TContext, namespace string, cpo *createPodsOp) error { strategy, err := getPodStrategy(cpo) if err != nil { return err } - tb.Logf("creating %d pods in namespace %q", cpo.Count, namespace) + tCtx.Logf("creating %d pods in namespace %q", cpo.Count, namespace) config := testutils.NewTestPodCreatorConfig() config.AddStrategy(namespace, cpo.Count, strategy) - podCreator := testutils.NewTestPodCreator(clientset, config) - return podCreator.CreatePods(ctx) + podCreator := testutils.NewTestPodCreator(tCtx.Client(), config) + return podCreator.CreatePods(tCtx) } // waitUntilPodsScheduledInNamespace blocks until all pods in the given // namespace are scheduled. Times out after 10 minutes because even at the // lowest observed QPS of ~10 pods/sec, a 5000-node test should complete. -func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespace string, wantCount int) error { +func waitUntilPodsScheduledInNamespace(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, namespace string, wantCount int) error { var pendingPod *v1.Pod - err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextTimeout(tCtx, 1*time.Second, 10*time.Minute, true, func(ctx context.Context) (bool, error) { select { case <-ctx.Done(): return true, ctx.Err() @@ -1253,10 +1242,10 @@ func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podIn return false, err } if len(scheduled) >= wantCount { - tb.Logf("scheduling succeed") + tCtx.Logf("scheduling succeed") return true, nil } - tb.Logf("namespace: %s, pods: want %d, got %d", namespace, wantCount, len(scheduled)) + tCtx.Logf("namespace: %s, pods: want %d, got %d", namespace, wantCount, len(scheduled)) if len(unscheduled) > 0 { pendingPod = unscheduled[0] } else { @@ -1273,7 +1262,7 @@ func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podIn // waitUntilPodsScheduled blocks until the all pods in the given namespaces are // scheduled. -func waitUntilPodsScheduled(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespaces []string, numPodsScheduledPerNamespace map[string]int) error { +func waitUntilPodsScheduled(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, namespaces []string, numPodsScheduledPerNamespace map[string]int) error { // If unspecified, default to all known namespaces. if len(namespaces) == 0 { for namespace := range numPodsScheduledPerNamespace { @@ -1282,15 +1271,15 @@ func waitUntilPodsScheduled(ctx context.Context, tb testing.TB, podInformer core } for _, namespace := range namespaces { select { - case <-ctx.Done(): - return ctx.Err() + case <-tCtx.Done(): + return context.Cause(tCtx) default: } wantCount, ok := numPodsScheduledPerNamespace[namespace] if !ok { return fmt.Errorf("unknown namespace %s", namespace) } - if err := waitUntilPodsScheduledInNamespace(ctx, tb, podInformer, namespace, wantCount); err != nil { + if err := waitUntilPodsScheduledInNamespace(tCtx, podInformer, namespace, wantCount); err != nil { return fmt.Errorf("error waiting for pods in namespace %q: %w", namespace, err) } } @@ -1440,14 +1429,12 @@ func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.Pe // namespacePreparer holds configuration information for the test namespace preparer. type namespacePreparer struct { - client clientset.Interface count int prefix string spec *v1.Namespace - tb testing.TB } -func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface, tb testing.TB) (*namespacePreparer, error) { +func newNamespacePreparer(tCtx ktesting.TContext, cno *createNamespacesOp) (*namespacePreparer, error) { ns := &v1.Namespace{} if cno.NamespaceTemplatePath != nil { if err := getSpecFromFile(cno.NamespaceTemplatePath, ns); err != nil { @@ -1456,11 +1443,9 @@ func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface } return &namespacePreparer{ - client: clientset, count: cno.Count, prefix: cno.Prefix, spec: ns, - tb: tb, }, nil } @@ -1474,17 +1459,17 @@ func (p *namespacePreparer) namespaces() []string { } // prepare creates the namespaces. -func (p *namespacePreparer) prepare(ctx context.Context) error { +func (p *namespacePreparer) prepare(tCtx ktesting.TContext) error { base := &v1.Namespace{} if p.spec != nil { base = p.spec } - p.tb.Logf("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base) + tCtx.Logf("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base) for i := 0; i < p.count; i++ { n := base.DeepCopy() n.Name = fmt.Sprintf("%s-%d", p.prefix, i) if err := testutils.RetryWithExponentialBackOff(func() (bool, error) { - _, err := p.client.CoreV1().Namespaces().Create(ctx, n, metav1.CreateOptions{}) + _, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, n, metav1.CreateOptions{}) return err == nil || apierrors.IsAlreadyExists(err), nil }); err != nil { return err @@ -1494,12 +1479,12 @@ func (p *namespacePreparer) prepare(ctx context.Context) error { } // cleanup deletes existing test namespaces. -func (p *namespacePreparer) cleanup(ctx context.Context) error { +func (p *namespacePreparer) cleanup(tCtx ktesting.TContext) error { var errRet error for i := 0; i < p.count; i++ { n := fmt.Sprintf("%s-%d", p.prefix, i) - if err := p.client.CoreV1().Namespaces().Delete(ctx, n, metav1.DeleteOptions{}); err != nil { - p.tb.Errorf("Deleting Namespace: %v", err) + if err := tCtx.Client().CoreV1().Namespaces().Delete(tCtx, n, metav1.DeleteOptions{}); err != nil { + tCtx.Errorf("Deleting Namespace: %v", err) errRet = err } } diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index e28ec5307a5..dacc201f8e4 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -17,7 +17,6 @@ limitations under the License. package benchmark import ( - "context" "testing" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -70,16 +69,15 @@ func TestScheduling(t *testing.T) { for _, config := range configs { // Not a sub test because we don't have a good name for it. func() { - _, ctx := ktesting.NewTestContext(t) + tCtx := ktesting.Init(t) + // No timeout here because the `go test -timeout` will ensure that // the test doesn't get stuck forever. - ctx, cancel := context.WithCancel(ctx) - defer cancel() for feature, flag := range config.featureGates { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, feature, flag)() } - informerFactory, client, dynClient := setupClusterForWorkload(ctx, t, config.schedulerConfigPath, config.featureGates, nil) + informerFactory, tCtx := setupClusterForWorkload(tCtx, config.schedulerConfigPath, config.featureGates, nil) for _, tc := range testCases { if !config.equals(tc) { @@ -93,8 +91,8 @@ func TestScheduling(t *testing.T) { if !enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) { t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter) } - _, ctx := ktesting.NewTestContext(t) - runWorkload(ctx, t, tc, w, informerFactory, client, dynClient, true) + tCtx := ktesting.WithTB(tCtx, t) + runWorkload(tCtx, tc, w, informerFactory, true) }) } }) diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index 5f3c4a3f9df..5a359d151f1 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -18,7 +18,6 @@ package benchmark import ( "bytes" - "context" "encoding/json" "flag" "fmt" @@ -26,26 +25,22 @@ import ( "os" "path" "sort" - "testing" + "strings" "time" v1 "k8s.io/api/core/v1" - resourcev1alpha2 "k8s.io/api/resource/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" - clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" - cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/featuregate" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/testutil" "k8s.io/klog/v2" kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" @@ -53,6 +48,7 @@ import ( "k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/util" testutils "k8s.io/kubernetes/test/utils" + "k8s.io/kubernetes/test/utils/ktesting" ) const ( @@ -83,33 +79,34 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) { // remove resources after finished. // Notes on rate limiter: // - client rate limit is set to 5000. -func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) { +func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool, outOfTreePluginRegistry frameworkruntime.Registry) (informers.SharedInformerFactory, ktesting.TContext) { // Run API server with minimimal logging by default. Can be raised with -v. framework.MinVerbosity = 0 - _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, tb, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority"} - - // Enable DRA API group. - if enabledFeatures[features.DynamicResourceAllocation] { - opts.APIEnablement.RuntimeConfig = cliflag.ConfigurationMap{ - resourcev1alpha2.SchemeGroupVersion.String(): "true", - } - } - }, - }) - tb.Cleanup(tearDownFn) + // No alpha APIs (overrides api/all=true in https://github.com/kubernetes/kubernetes/blob/d647d19f6aef811bace300eec96a67644ff303d4/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/testing/testserver.go#L136), + // except for DRA API group when needed. + runtimeConfig := []string{"api/alpha=false"} + if enabledFeatures[features.DynamicResourceAllocation] { + runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha2=true") + } + customFlags := []string{ + // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. + "--disable-admission-plugins=ServiceAccount,TaintNodesByCondition,Priority", + "--runtime-config=" + strings.Join(runtimeConfig, ","), + } + server, err := apiservertesting.StartTestServer(tCtx, apiservertesting.NewDefaultTestServerOptions(), customFlags, framework.SharedEtcd()) + if err != nil { + tCtx.Fatalf("start apiserver: %v", err) + } + tCtx.Cleanup(server.TearDownFn) // Cleanup will be in reverse order: first the clients get cancelled, - // then the apiserver is torn down. - ctx, cancel := context.WithCancel(ctx) - tb.Cleanup(cancel) + // then the apiserver is torn down via the automatic cancelation of + // tCtx. // TODO: client connection configuration, such as QPS or Burst is configurable in theory, this could be derived from the `config`, need to // support this when there is any testcase that depends on such configuration. - cfg := restclient.CopyConfig(kubeConfig) + cfg := restclient.CopyConfig(server.ClientConfig) cfg.QPS = 5000.0 cfg.Burst = 5000 @@ -118,34 +115,33 @@ func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSch var err error config, err = newDefaultComponentConfig() if err != nil { - tb.Fatalf("Error creating default component config: %v", err) + tCtx.Fatalf("Error creating default component config: %v", err) } } - client := clientset.NewForConfigOrDie(cfg) - dynClient := dynamic.NewForConfigOrDie(cfg) + tCtx = ktesting.WithRESTConfig(tCtx, cfg) // Not all config options will be effective but only those mostly related with scheduler performance will // be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`. - _, informerFactory := util.StartScheduler(ctx, client, cfg, config, outOfTreePluginRegistry) - util.StartFakePVController(ctx, client, informerFactory) - runGC := util.CreateGCController(ctx, tb, *cfg, informerFactory) - runNS := util.CreateNamespaceController(ctx, tb, *cfg, informerFactory) + _, informerFactory := util.StartScheduler(tCtx, tCtx.Client(), cfg, config, outOfTreePluginRegistry) + util.StartFakePVController(tCtx, tCtx.Client(), informerFactory) + runGC := util.CreateGCController(tCtx, tCtx, *cfg, informerFactory) + runNS := util.CreateNamespaceController(tCtx, tCtx, *cfg, informerFactory) runResourceClaimController := func() {} if enabledFeatures[features.DynamicResourceAllocation] { // Testing of DRA with inline resource claims depends on this // controller for creating and removing ResourceClaims. - runResourceClaimController = util.CreateResourceClaimController(ctx, tb, client, informerFactory) + runResourceClaimController = util.CreateResourceClaimController(tCtx, tCtx, tCtx.Client(), informerFactory) } - informerFactory.Start(ctx.Done()) - informerFactory.WaitForCacheSync(ctx.Done()) + informerFactory.Start(tCtx.Done()) + informerFactory.WaitForCacheSync(tCtx.Done()) go runGC() go runNS() go runResourceClaimController() - return informerFactory, client, dynClient + return informerFactory, tCtx } // Returns the list of scheduled and unscheduled pods in the specified namespaces. @@ -268,7 +264,7 @@ func newMetricsCollector(config *metricsCollectorConfig, labels map[string]strin } } -func (*metricsCollector) run(ctx context.Context) { +func (*metricsCollector) run(tCtx ktesting.TContext) { // metricCollector doesn't need to start before the tests, so nothing to do here. } @@ -342,7 +338,6 @@ func collectHistogramVec(metric string, labels map[string]string, lvMap map[stri } type throughputCollector struct { - tb testing.TB podInformer coreinformers.PodInformer schedulingThroughputs []float64 labels map[string]string @@ -350,9 +345,8 @@ type throughputCollector struct { errorMargin float64 } -func newThroughputCollector(tb testing.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector { +func newThroughputCollector(tb ktesting.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector { return &throughputCollector{ - tb: tb, podInformer: podInformer, labels: labels, namespaces: namespaces, @@ -360,7 +354,7 @@ func newThroughputCollector(tb testing.TB, podInformer coreinformers.PodInformer } } -func (tc *throughputCollector) run(ctx context.Context) { +func (tc *throughputCollector) run(tCtx ktesting.TContext) { podsScheduled, _, err := getScheduledPods(tc.podInformer, tc.namespaces...) if err != nil { klog.Fatalf("%v", err) @@ -374,7 +368,7 @@ func (tc *throughputCollector) run(ctx context.Context) { for { select { - case <-ctx.Done(): + case <-tCtx.Done(): return case <-ticker.C: now := time.Now() @@ -419,7 +413,7 @@ func (tc *throughputCollector) run(ctx context.Context) { errorMargin := (duration - expectedDuration).Seconds() / expectedDuration.Seconds() * 100 if tc.errorMargin > 0 && math.Abs(errorMargin) > tc.errorMargin { // This might affect the result, report it. - tc.tb.Errorf("ERROR: Expected throuput collector to sample at regular time intervals. The %d most recent intervals took %s instead of %s, a difference of %0.1f%%.", skipped+1, duration, expectedDuration, errorMargin) + tCtx.Errorf("ERROR: Expected throuput collector to sample at regular time intervals. The %d most recent intervals took %s instead of %s, a difference of %0.1f%%.", skipped+1, duration, expectedDuration, errorMargin) } // To keep percentiles accurate, we have to record multiple samples with the same diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 13e34f4cfb2..c555b637db0 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -128,7 +128,7 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf return sched, informerFactory } -func CreateResourceClaimController(ctx context.Context, tb testing.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() { +func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() { podInformer := informerFactory.Core().V1().Pods() schedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts() claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims() @@ -190,7 +190,7 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface, i // CreateGCController creates a garbage controller and returns a run function // for it. The informer factory needs to be started before invoking that // function. -func CreateGCController(ctx context.Context, tb testing.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() { +func CreateGCController(ctx context.Context, tb ktesting.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() { restclient.AddUserAgent(&restConfig, "gc-controller") clientSet := clientset.NewForConfigOrDie(&restConfig) metadataClient, err := metadata.NewForConfig(&restConfig) @@ -227,7 +227,7 @@ func CreateGCController(ctx context.Context, tb testing.TB, restConfig restclien // CreateNamespaceController creates a namespace controller and returns a run // function for it. The informer factory needs to be started before invoking // that function. -func CreateNamespaceController(ctx context.Context, tb testing.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() { +func CreateNamespaceController(ctx context.Context, tb ktesting.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() { restclient.AddUserAgent(&restConfig, "namespace-controller") clientSet := clientset.NewForConfigOrDie(&restConfig) metadataClient, err := metadata.NewForConfig(&restConfig) diff --git a/test/utils/ktesting/assert.go b/test/utils/ktesting/assert.go new file mode 100644 index 00000000000..cf767231823 --- /dev/null +++ b/test/utils/ktesting/assert.go @@ -0,0 +1,183 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/onsi/gomega" + "github.com/onsi/gomega/format" +) + +// FailureError is an error where the error string is meant to be passed to +// [TContext.Fatal] directly, i.e. adding some prefix like "unexpected error" is not +// necessary. It is also not necessary to dump the error struct. +type FailureError struct { + Msg string + FullStackTrace string +} + +func (f FailureError) Error() string { + return f.Msg +} + +func (f FailureError) Backtrace() string { + return f.FullStackTrace +} + +func (f FailureError) Is(target error) bool { + return target == ErrFailure +} + +// ErrFailure is an empty error that can be wrapped to indicate that an error +// is a FailureError. It can also be used to test for a FailureError:. +// +// return fmt.Errorf("some problem%w", ErrFailure) +// ... +// err := someOperation() +// if errors.Is(err, ErrFailure) { +// ... +// } +var ErrFailure error = FailureError{} + +func expect(tCtx TContext, actual interface{}, extra ...interface{}) gomega.Assertion { + tCtx.Helper() + return gomega.NewWithT(tCtx).Expect(actual, extra...) +} + +func expectNoError(tCtx TContext, err error, explain ...interface{}) { + tCtx.Helper() + + description := buildDescription(explain) + + var failure FailureError + if errors.As(err, &failure) { + if backtrace := failure.Backtrace(); backtrace != "" { + if description != "" { + tCtx.Log(description) + } + tCtx.Logf("Failed at:\n %s", strings.ReplaceAll(backtrace, "\n", "\n ")) + } + if description != "" { + tCtx.Fatalf("%s: %s", description, err.Error()) + } + tCtx.Fatal(err.Error()) + } + + if description == "" { + description = "Unexpected error" + } + tCtx.Logf("%s: %s\n%s", description, format.Object(err, 1)) + tCtx.Fatalf("%s: %v", description, err.Error()) +} + +func buildDescription(explain ...interface{}) string { + switch len(explain) { + case 0: + return "" + case 1: + if describe, ok := explain[0].(func() string); ok { + return describe() + } + } + return fmt.Sprintf(explain[0].(string), explain[1:]...) +} + +// Eventually wraps [gomega.Eventually] such that a failure will be reported via +// TContext.Fatal. +// +// In contrast to [gomega.Eventually], the parameter is strongly typed. It must +// accept a TContext as first argument and return one value, the one which is +// then checked with the matcher. +// +// In contrast to direct usage of [gomega.Eventually], make additional +// assertions inside the callback is okay as long as they use the TContext that +// is passed in. For example, errors can be checked with ExpectNoError: +// +// cb := func(func(tCtx ktesting.TContext) int { +// value, err := doSomething(...) +// ktesting.ExpectNoError(tCtx, err, "something failed") +// return value +// } +// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything") +// +// If there is no value, then an error can be returned: +// +// cb := func(func(tCtx ktesting.TContext) error { +// err := doSomething(...) +// return err +// } +// tCtx.Eventually(cb).Should(gomega.Succeed(), "foobar should succeed") +// +// The default Gomega poll interval and timeout are used. Setting a specific +// timeout may be useful: +// +// tCtx.Eventually(cb).Timeout(5 * time.Second).Should(gomega.Succeed(), "foobar should succeed") +// +// Canceling the context in the callback only affects code in the callback. The +// context passed to Eventually is not getting canceled. To abort polling +// immediately because the expected condition is known to not be reached +// anymore, use [gomega.StopTrying]: +// +// cb := func(func(tCtx ktesting.TContext) int { +// value, err := doSomething(...) +// if errors.Is(err, SomeFinalErr) { +// gomega.StopTrying("permanent failure).Wrap(err).Now() +// } +// ktesting.ExpectNoError(tCtx, err, "something failed") +// return value +// } +// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything") +// +// To poll again after some specific timeout, use [gomega.TryAgainAfter]. This is +// particularly useful in [Consistently] to ignore some intermittent error. +// +// cb := func(func(tCtx ktesting.TContext) int { +// value, err := doSomething(...) +// var intermittentErr SomeIntermittentError +// if errors.As(err, &intermittentErr) { +// gomega.TryAgainAfter(intermittentErr.RetryPeriod).Wrap(err).Now() +// } +// ktesting.ExpectNoError(tCtx, err, "something failed") +// return value +// } +// tCtx.Eventually(cb).Should(gomega.Equal(42), "should be the answer to everything") +func Eventually[T any](tCtx TContext, cb func(TContext) T) gomega.AsyncAssertion { + tCtx.Helper() + return gomega.NewWithT(tCtx).Eventually(tCtx, func(ctx context.Context) (val T, err error) { + tCtx := WithContext(tCtx, ctx) + tCtx, finalize := WithError(tCtx, &err) + defer finalize() + tCtx = WithCancel(tCtx) + return cb(tCtx), nil + }) +} + +// Consistently wraps [gomega.Consistently] the same way as [Eventually] wraps +// [gomega.Eventually]. +func Consistently[T any](tCtx TContext, cb func(TContext) T) gomega.AsyncAssertion { + tCtx.Helper() + return gomega.NewWithT(tCtx).Consistently(tCtx, func(ctx context.Context) (val T, err error) { + tCtx := WithContext(tCtx, ctx) + tCtx, finalize := WithError(tCtx, &err) + defer finalize() + return cb(tCtx), nil + }) +} diff --git a/test/utils/ktesting/assert_test.go b/test/utils/ktesting/assert_test.go new file mode 100644 index 00000000000..5099eeb38d1 --- /dev/null +++ b/test/utils/ktesting/assert_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "errors" + "fmt" + "regexp" + "testing" + "time" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" +) + +func TestAsync(t *testing.T) { + for name, tc := range map[string]struct { + cb func(TContext) + expectNoFail bool + expectError string + expectDuration time.Duration + }{ + "eventually-timeout": { + cb: func(tCtx TContext) { + Eventually(tCtx, func(tCtx TContext) int { + // Canceling here is a nop. + tCtx.Cancel("testing") + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1)) + }, + expectDuration: time.Second, + expectError: `Timed out after x.y s. +Expected + : 0 +to equal + : 1`, + }, + "eventually-final": { + cb: func(tCtx TContext) { + Eventually(tCtx, func(tCtx TContext) float64 { + gomega.StopTrying("final error").Now() + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: 0, + expectError: `Told to stop trying after x.y s. +final error`, + }, + "eventually-error": { + cb: func(tCtx TContext) { + Eventually(tCtx, func(tCtx TContext) float64 { + tCtx.Fatal("some error") + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: time.Second, + expectError: `Timed out after x.y s. +The function passed to Eventually returned the following error: + <*errors.joinError | 0xXXXX>: + some error + { + errs: [ + <*errors.errorString | 0xXXXX>{s: "some error"}, + ], + }`, + }, + "eventually-success": { + cb: func(tCtx TContext) { + Eventually(tCtx, func(tCtx TContext) float64 { + return 1.0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: 0, + expectNoFail: true, + expectError: ``, + }, + "eventually-retry": { + cb: func(tCtx TContext) { + Eventually(tCtx, func(tCtx TContext) float64 { + gomega.TryAgainAfter(time.Millisecond).Now() + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: time.Second, + expectError: `Timed out after x.y s. +told to try again after 1ms`, + }, + "consistently-timeout": { + cb: func(tCtx TContext) { + Consistently(tCtx, func(tCtx TContext) float64 { + // Canceling here is a nop. + tCtx.Cancel("testing") + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: 0, + expectError: `Failed after x.y s. +Expected + : 0 +to equal + : 1`, + }, + "consistently-final": { + cb: func(tCtx TContext) { + Consistently(tCtx, func(tCtx TContext) float64 { + gomega.StopTrying("final error").Now() + tCtx.FailNow() + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: 0, + expectError: `Told to stop trying after x.y s. +final error`, + }, + "consistently-error": { + cb: func(tCtx TContext) { + Consistently(tCtx, func(tCtx TContext) float64 { + tCtx.Fatal("some error") + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: 0, + expectError: `Failed after x.y s. +The function passed to Consistently returned the following error: + <*errors.joinError | 0xXXXX>: + some error + { + errs: [ + <*errors.errorString | 0xXXXX>{s: "some error"}, + ], + }`, + }, + "consistently-success": { + cb: func(tCtx TContext) { + Consistently(tCtx, func(tCtx TContext) float64 { + return 1.0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: time.Second, + expectNoFail: true, + expectError: ``, + }, + "consistently-retry": { + cb: func(tCtx TContext) { + Consistently(tCtx, func(tCtx TContext) float64 { + gomega.TryAgainAfter(time.Millisecond).Wrap(errors.New("intermittent error")).Now() + return 0 + }).WithTimeout(time.Second).Should(gomega.Equal(1.0)) + }, + expectDuration: time.Second, + expectError: `Timed out while waiting on TryAgainAfter after x.y s. +told to try again after 1ms: intermittent error`, + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + tCtx := Init(t) + var err error + tCtx, finalize := WithError(tCtx, &err) + start := time.Now() + func() { + defer finalize() + tc.cb(tCtx) + }() + duration := time.Since(start) + assert.InDelta(t, tc.expectDuration.Seconds(), duration.Seconds(), 0.1, fmt.Sprintf("callback invocation duration %s", duration)) + assert.Equal(t, !tc.expectNoFail, tCtx.Failed(), "Failed()") + if tc.expectError == "" { + assert.NoError(t, err) + } else if assert.NotNil(t, err) { + t.Logf("Result:\n%s", err.Error()) + errMsg := err.Error() + errMsg = regexp.MustCompile(`[[:digit:]]+\.[[:digit:]]+s`).ReplaceAllString(errMsg, "x.y s") + errMsg = regexp.MustCompile(`0x[[:xdigit:]]+`).ReplaceAllString(errMsg, "0xXXXX") + assert.Equal(t, tc.expectError, errMsg) + } + }) + } +} diff --git a/test/utils/ktesting/clientcontext.go b/test/utils/ktesting/clientcontext.go new file mode 100644 index 00000000000..d07cbccf14a --- /dev/null +++ b/test/utils/ktesting/clientcontext.go @@ -0,0 +1,114 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "fmt" + + "github.com/onsi/gomega" + apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/dynamic" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/klog/v2" +) + +// WithRESTConfig initializes all client-go clients with new clients +// created for the config. The current test name gets included in the UserAgent. +func WithRESTConfig(tCtx TContext, cfg *rest.Config) TContext { + cfg = rest.CopyConfig(cfg) + cfg.UserAgent = fmt.Sprintf("%s -- %s", rest.DefaultKubernetesUserAgent(), tCtx.Name()) + + cCtx := clientContext{ + TContext: tCtx, + restConfig: cfg, + client: clientset.NewForConfigOrDie(cfg), + dynamic: dynamic.NewForConfigOrDie(cfg), + apiextensions: apiextensions.NewForConfigOrDie(cfg), + } + + cachedDiscovery := memory.NewMemCacheClient(cCtx.client.Discovery()) + cCtx.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery) + + return &cCtx +} + +// WithClients uses an existing config and clients. +func WithClients(tCtx TContext, cfg *rest.Config, mapper *restmapper.DeferredDiscoveryRESTMapper, client clientset.Interface, dynamic dynamic.Interface, apiextensions apiextensions.Interface) TContext { + return clientContext{ + TContext: tCtx, + restConfig: cfg, + restMapper: mapper, + client: client, + dynamic: dynamic, + apiextensions: apiextensions, + } +} + +type clientContext struct { + TContext + + restConfig *rest.Config + restMapper *restmapper.DeferredDiscoveryRESTMapper + client clientset.Interface + dynamic dynamic.Interface + apiextensions apiextensions.Interface +} + +func (cCtx clientContext) CleanupCtx(cb func(TContext)) { + cCtx.Helper() + cleanupCtx(cCtx, cb) +} + +func (cCtx clientContext) Expect(actual interface{}, extra ...interface{}) gomega.Assertion { + cCtx.Helper() + return expect(cCtx, actual, extra...) +} + +func (cCtx clientContext) ExpectNoError(err error, explain ...interface{}) { + cCtx.Helper() + expectNoError(cCtx, err, explain...) +} + +func (cCtx clientContext) Logger() klog.Logger { + return klog.FromContext(cCtx) +} + +func (cCtx clientContext) RESTConfig() *rest.Config { + if cCtx.restConfig == nil { + return nil + } + return rest.CopyConfig(cCtx.restConfig) +} + +func (cCtx clientContext) RESTMapper() *restmapper.DeferredDiscoveryRESTMapper { + return cCtx.restMapper +} + +func (cCtx clientContext) Client() clientset.Interface { + return cCtx.client +} + +func (cCtx clientContext) Dynamic() dynamic.Interface { + return cCtx.dynamic +} + +func (cCtx clientContext) APIExtensions() apiextensions.Interface { + return cCtx.apiextensions +} diff --git a/test/utils/ktesting/contexthelper.go b/test/utils/ktesting/contexthelper.go new file mode 100644 index 00000000000..28d2b245934 --- /dev/null +++ b/test/utils/ktesting/contexthelper.go @@ -0,0 +1,91 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "context" + "fmt" + "time" +) + +// cleanupErr creates a cause when canceling a context because the test has completed. +// It is a context.Canceled error. +func cleanupErr(testName string) error { + return canceledError(fmt.Sprintf("test %s is cleaning up", testName)) +} + +type canceledError string + +func (c canceledError) Error() string { return string(c) } + +func (c canceledError) Is(target error) bool { + return target == context.Canceled +} + +// withTimeout corresponds to [context.WithTimeout]. In contrast to +// [context.WithTimeout], it automatically cancels during test cleanup, provides +// the given cause when the deadline is reached, and its cancel function +// requires a cause. +func withTimeout(ctx context.Context, tb TB, timeout time.Duration, timeoutCause string) (context.Context, func(cause string)) { + tb.Helper() + + now := time.Now() + + cancelCtx, cancel := context.WithCancelCause(ctx) + after := time.NewTimer(timeout) + stopCtx, stop := context.WithCancel(ctx) // Only used internally, doesn't need a cause. + tb.Cleanup(func() { + cancel(cleanupErr(tb.Name())) + stop() + }) + go func() { + select { + case <-stopCtx.Done(): + after.Stop() + // No need to set a cause here. The cause or error of + // the parent context will be used. + case <-after.C: + cancel(canceledError(timeoutCause)) + } + }() + + // Determine which deadline is sooner: ours or that of our parent. + deadline := now.Add(timeout) + if parentDeadline, ok := ctx.Deadline(); ok { + if deadline.After(parentDeadline) { + deadline = parentDeadline + } + } + + // We always have a deadline. + return deadlineContext{Context: cancelCtx, deadline: deadline}, func(cause string) { + var cancelCause error + if cause != "" { + cancelCause = canceledError(cause) + } + cancel(cancelCause) + } +} + +type deadlineContext struct { + context.Context + deadline time.Time +} + +func (d deadlineContext) Deadline() (time.Time, bool) { + return d.deadline, true +} diff --git a/test/utils/ktesting/contexthelper_test.go b/test/utils/ktesting/contexthelper_test.go new file mode 100644 index 00000000000..548f99a975f --- /dev/null +++ b/test/utils/ktesting/contexthelper_test.go @@ -0,0 +1,126 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCleanupErr(t *testing.T) { + actual := cleanupErr(t.Name()) + if !errors.Is(actual, context.Canceled) { + t.Errorf("cleanupErr %T should be a %T", actual, context.Canceled) + } +} + +func TestCause(t *testing.T) { + timeoutCause := canceledError("I timed out") + parentCause := errors.New("parent canceled") + + t.Parallel() + for name, tt := range map[string]struct { + parentCtx context.Context + timeout time.Duration + sleep time.Duration + cancelCause string + expectErr, expectCause error + expectDeadline time.Duration + }{ + "nothing": { + parentCtx: context.Background(), + timeout: 5 * time.Millisecond, + sleep: time.Millisecond, + }, + "timeout": { + parentCtx: context.Background(), + timeout: time.Millisecond, + sleep: 5 * time.Millisecond, + expectErr: context.Canceled, + expectCause: canceledError(timeoutCause), + }, + "parent-canceled": { + parentCtx: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx + }(), + timeout: time.Millisecond, + sleep: 5 * time.Millisecond, + expectErr: context.Canceled, + expectCause: context.Canceled, + }, + "parent-cause": { + parentCtx: func() context.Context { + ctx, cancel := context.WithCancelCause(context.Background()) + cancel(parentCause) + return ctx + }(), + timeout: time.Millisecond, + sleep: 5 * time.Millisecond, + expectErr: context.Canceled, + expectCause: parentCause, + }, + "deadline-no-parent": { + parentCtx: context.Background(), + timeout: time.Minute, + expectDeadline: time.Minute, + }, + "deadline-parent": { + parentCtx: func() context.Context { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + t.Cleanup(cancel) + return ctx + }(), + timeout: 2 * time.Minute, + expectDeadline: time.Minute, + }, + "deadline-child": { + parentCtx: func() context.Context { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + t.Cleanup(cancel) + return ctx + }(), + timeout: time.Minute, + expectDeadline: time.Minute, + }, + } { + tt := tt + t.Run(name, func(t *testing.T) { + ctx, cancel := withTimeout(tt.parentCtx, t, tt.timeout, timeoutCause.Error()) + if tt.cancelCause != "" { + cancel(tt.cancelCause) + } + if tt.expectDeadline != 0 { + actualDeadline, ok := ctx.Deadline() + if assert.True(t, ok, "should have had a deadline") { + assert.InDelta(t, time.Until(actualDeadline), tt.expectDeadline, float64(time.Second), "remaining time till Deadline()") + } + } + time.Sleep(tt.sleep) + actualErr := ctx.Err() + actualCause := context.Cause(ctx) + assert.Equal(t, tt.expectErr, actualErr, "ctx.Err()") + assert.Equal(t, tt.expectCause, actualCause, "context.Cause()") + + }) + } +} diff --git a/test/utils/ktesting/doc.go b/test/utils/ktesting/doc.go new file mode 100644 index 00000000000..3207f311bc8 --- /dev/null +++ b/test/utils/ktesting/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ktesting is a wrapper around k8s.io/klog/v2/ktesting. In contrast +// to the klog package, this one is opinionated and tailored towards testing +// Kubernetes. +// +// Importing it +// - adds the -v command line flag +// - enables better dumping of complex datatypes +// - sets the default verbosity to 5 (can be changed with [SetDefaultVerbosity]) +// +// It also adds additional APIs and types for unit and integration tests +// which are too experimental for klog and/or are unrelated to logging. +// The ktesting package itself takes care of managing a test context +// with deadlines, timeouts, cancellation, and some common attributes +// as first-class members of the API. Sub-packages have additional APIs +// for propagating values via the context, implemented via [WithValue]. +package ktesting diff --git a/test/utils/ktesting/errorcontext.go b/test/utils/ktesting/errorcontext.go new file mode 100644 index 00000000000..a963b199e85 --- /dev/null +++ b/test/utils/ktesting/errorcontext.go @@ -0,0 +1,153 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "errors" + "fmt" + "strings" + "sync" + + "k8s.io/klog/v2" +) + +// WithError creates a context where test failures are collected and stored in +// the provided error instance when the caller is done. Use it like this: +// +// func doSomething(tCtx ktesting.TContext) (finalErr error) { +// tCtx, finalize := WithError(tCtx, &finalErr) +// defer finalize() +// ... +// tCtx.Fatal("some failure") +// +// Any error already stored in the variable will get overwritten by finalize if +// there were test failures, otherwise the variable is left unchanged. +// If there were multiple test errors, then the error will wrap all of +// them with errors.Join. +// +// Test failures are not propagated to the parent context. +func WithError(tCtx TContext, err *error) (TContext, func()) { + eCtx := &errorContext{ + TContext: tCtx, + } + + return eCtx, func() { + // Recover has to be called in the deferred function. When called inside + // a function called by a deferred function (like finalize below), it + // returns nil. + if e := recover(); e != nil { + if _, ok := e.(fatalWithError); !ok { + // Not our own panic, pass it on instead of setting the error. + panic(e) + } + } + + eCtx.finalize(err) + } +} + +type errorContext struct { + TContext + + mutex sync.Mutex + errors []error + failed bool +} + +func (eCtx *errorContext) finalize(err *error) { + eCtx.mutex.Lock() + defer eCtx.mutex.Unlock() + + if !eCtx.failed { + return + } + + errs := eCtx.errors + if len(errs) == 0 { + errs = []error{errFailedWithNoExplanation} + } + *err = errors.Join(errs...) +} + +func (eCtx *errorContext) Error(args ...any) { + eCtx.mutex.Lock() + defer eCtx.mutex.Unlock() + + // Gomega adds a leading newline in https://github.com/onsi/gomega/blob/f804ac6ada8d36164ecae0513295de8affce1245/internal/gomega.go#L37 + // Let's strip that at start and end because ktesting will make errors + // stand out more with the "ERROR" prefix, so there's no need for additional + // line breaks. + eCtx.errors = append(eCtx.errors, errors.New(strings.TrimSpace(fmt.Sprintln(args...)))) + eCtx.failed = true +} + +func (eCtx *errorContext) Errorf(format string, args ...any) { + eCtx.mutex.Lock() + defer eCtx.mutex.Unlock() + + eCtx.errors = append(eCtx.errors, errors.New(strings.TrimSpace(fmt.Sprintf(format, args...)))) + eCtx.failed = true +} + +func (eCtx *errorContext) Fail() { + eCtx.mutex.Lock() + defer eCtx.mutex.Unlock() + + eCtx.failed = true +} + +func (eCtx *errorContext) FailNow() { + eCtx.Helper() + eCtx.Fail() + panic(failed) +} + +func (eCtx *errorContext) Failed() bool { + eCtx.mutex.Lock() + defer eCtx.mutex.Unlock() + + return eCtx.failed +} + +func (eCtx *errorContext) Fatal(args ...any) { + eCtx.Error(args...) + eCtx.FailNow() +} + +func (eCtx *errorContext) Fatalf(format string, args ...any) { + eCtx.Errorf(format, args...) + eCtx.FailNow() +} + +func (eCtx *errorContext) CleanupCtx(cb func(TContext)) { + eCtx.Helper() + cleanupCtx(eCtx, cb) +} + +func (eCtx *errorContext) Logger() klog.Logger { + return klog.FromContext(eCtx) +} + +// fatalWithError is the internal type that should never get propagated up. The +// only case where that can happen is when the developer forgot to call +// finalize via defer. The string explains that, in case that developers get to +// see it. +type fatalWithError string + +const failed = fatalWithError("WithError TContext encountered a fatal error, but the finalize function was not called via defer as it should have been.") + +var errFailedWithNoExplanation = errors.New("WithError context was marked as failed without recording an error") diff --git a/test/utils/ktesting/errorcontext_test.go b/test/utils/ktesting/errorcontext_test.go new file mode 100644 index 00000000000..24492d9353d --- /dev/null +++ b/test/utils/ktesting/errorcontext_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWithError(t *testing.T) { + t.Run("panic", func(t *testing.T) { + assert.Panics(t, func() { + tCtx := Init(t) + var err error + _, finalize := WithError(tCtx, &err) + defer finalize() + + panic("pass me through") + }) + }) + + normalErr := errors.New("normal error") + + for name, tc := range map[string]struct { + cb func(TContext) + expectNoFail bool + expectError string + }{ + "none": { + cb: func(tCtx TContext) {}, + expectNoFail: true, + expectError: normalErr.Error(), + }, + "Error": { + cb: func(tCtx TContext) { + tCtx.Error("some error") + }, + expectError: "some error", + }, + "Errorf": { + cb: func(tCtx TContext) { + tCtx.Errorf("some %s", "error") + }, + expectError: "some error", + }, + "Fatal": { + cb: func(tCtx TContext) { + tCtx.Fatal("some error") + tCtx.Error("another error") + }, + expectError: "some error", + }, + "Fatalf": { + cb: func(tCtx TContext) { + tCtx.Fatalf("some %s", "error") + tCtx.Error("another error") + }, + expectError: "some error", + }, + "Fail": { + cb: func(tCtx TContext) { + tCtx.Fatalf("some %s", "error") + tCtx.Error("another error") + }, + expectError: "some error", + }, + "FailNow": { + cb: func(tCtx TContext) { + tCtx.FailNow() + tCtx.Error("another error") + }, + expectError: errFailedWithNoExplanation.Error(), + }, + "many": { + cb: func(tCtx TContext) { + tCtx.Error("first error") + tCtx.Error("second error") + }, + expectError: `first error +second error`, + }, + } { + t.Run(name, func(t *testing.T) { + tCtx := Init(t) + err := normalErr + tCtx, finalize := WithError(tCtx, &err) + func() { + defer finalize() + tc.cb(tCtx) + }() + + assert.Equal(t, !tc.expectNoFail, tCtx.Failed(), "Failed()") + if tc.expectError == "" { + assert.NoError(t, err) + } else if assert.NotNil(t, err) { + assert.Equal(t, tc.expectError, err.Error()) + } + }) + } +} diff --git a/test/utils/ktesting/examples/gomega/doc.go b/test/utils/ktesting/examples/gomega/doc.go new file mode 100644 index 00000000000..76312bb4eb0 --- /dev/null +++ b/test/utils/ktesting/examples/gomega/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The tests will fail and therefore are excluded from normal "make test" via +// the "example" build tag. To run the tests and check the output, use "go test +// -tags example ." +package gomega diff --git a/test/utils/ktesting/examples/gomega/example_test.go b/test/utils/ktesting/examples/gomega/example_test.go new file mode 100644 index 00000000000..48150766f86 --- /dev/null +++ b/test/utils/ktesting/examples/gomega/example_test.go @@ -0,0 +1,44 @@ +//go:build example +// +build example + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gomega + +// The tests below will fail and therefore are excluded from +// normal "make test" via the "example" build tag. To run +// the tests and check the output, use "go test -tags example ." + +import ( + "context" + "testing" + "time" + + "github.com/onsi/gomega" + "k8s.io/kubernetes/test/utils/ktesting" +) + +func TestGomega(t *testing.T) { + tCtx := ktesting.Init(t) + + gomega.NewWithT(tCtx).Eventually(tCtx, func(ctx context.Context) int { + // TODO: tCtx = ktesting.WithContext(tCtx, ctx) + // Or some dedicated tCtx.Eventually? + + return 42 + }).WithPolling(time.Second).Should(gomega.Equal(1)) +} diff --git a/test/utils/ktesting/examples/logging/doc.go b/test/utils/ktesting/examples/logging/doc.go new file mode 100644 index 00000000000..67085e2d5f0 --- /dev/null +++ b/test/utils/ktesting/examples/logging/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The tests will fail and therefore are excluded from normal "make test" via +// the "example" build tag. To run the tests and check the output, use "go test +// -tags example ." +package logging diff --git a/test/utils/ktesting/examples/logging/example_test.go b/test/utils/ktesting/examples/logging/example_test.go new file mode 100644 index 00000000000..6202838e56e --- /dev/null +++ b/test/utils/ktesting/examples/logging/example_test.go @@ -0,0 +1,74 @@ +//go:build example +// +build example + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +// The tests below will fail and therefore are excluded from +// normal "make test" via the "example" build tag. To run +// the tests and check the output, use "go test -tags example ." + +import ( + "testing" + + "k8s.io/kubernetes/test/utils/ktesting" +) + +func TestError(t *testing.T) { + tCtx := ktesting.Init(t) + tCtx.Error("some", "thing") +} + +func TestErrorf(t *testing.T) { + tCtx := ktesting.Init(t) + tCtx.Errorf("some %s", "thing") +} + +func TestFatal(t *testing.T) { + tCtx := ktesting.Init(t) + tCtx.Fatal("some", "thing") + tCtx.Log("not reached") +} + +func TestFatalf(t *testing.T) { + tCtx := ktesting.Init(t) + tCtx.Fatalf("some %s", "thing") + tCtx.Log("not reached") +} + +func TestInfo(t *testing.T) { + tCtx := ktesting.Init(t) + tCtx.Log("hello via Log") + tCtx.Logger().Info("hello via Info") + tCtx.Error("some", "thing") +} + +func TestWithStep(t *testing.T) { + tCtx := ktesting.Init(t) + bake(ktesting.WithStep(tCtx, "bake cake")) +} + +func bake(tCtx ktesting.TContext) { + heatOven(ktesting.WithStep(tCtx, "set heat for baking")) +} + +func heatOven(tCtx ktesting.TContext) { + tCtx.Log("Log()") + tCtx.Logger().Info("Logger().Info()") + tCtx.Fatal("oven not found") +} diff --git a/test/utils/ktesting/examples/with_ktesting/doc.go b/test/utils/ktesting/examples/with_ktesting/doc.go new file mode 100644 index 00000000000..42b2a6b42fb --- /dev/null +++ b/test/utils/ktesting/examples/with_ktesting/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The tests will fail and therefore are excluded from normal "make test" via +// the "example" build tag. To run the tests and check the output, use "go test +// -tags example ." +package withktesting diff --git a/test/utils/ktesting/examples/with_ktesting/example_test.go b/test/utils/ktesting/examples/with_ktesting/example_test.go new file mode 100644 index 00000000000..576f310c34c --- /dev/null +++ b/test/utils/ktesting/examples/with_ktesting/example_test.go @@ -0,0 +1,52 @@ +//go:build example +// +build example + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package withktesting + +// The tests below will fail and therefore are excluded from +// normal "make test" via the "example" build tag. To run +// the tests and check the output, use "go test -tags example ." + +import ( + "context" + "testing" + "time" + + "k8s.io/kubernetes/test/utils/ktesting" +) + +func TestTimeout(t *testing.T) { + tCtx := ktesting.Init(t) + tmp := t.TempDir() + tCtx.Logf("Using %q as temporary directory.", tmp) + tCtx.Cleanup(func() { + t.Log("Cleaning up...") + }) + if deadline, ok := t.Deadline(); ok { + t.Logf("Will fail shortly before the test suite deadline at %s.", deadline) + } + select { + case <-time.After(1000 * time.Hour): + // This should not be reached. + tCtx.Log("Huh?! I shouldn't be that old.") + case <-tCtx.Done(): + // But this will before the test suite timeout. + tCtx.Errorf("need to stop: %v", context.Cause(tCtx)) + } +} diff --git a/test/utils/ktesting/examples/without_ktesting/doc.go b/test/utils/ktesting/examples/without_ktesting/doc.go new file mode 100644 index 00000000000..9b02e894aa3 --- /dev/null +++ b/test/utils/ktesting/examples/without_ktesting/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The tests will fail and therefore are excluded from normal "make test" via +// the "example" build tag. To run the tests and check the output, use "go test +// -tags example ." +package withoutktesting diff --git a/test/utils/ktesting/examples/without_ktesting/example_test.go b/test/utils/ktesting/examples/without_ktesting/example_test.go new file mode 100644 index 00000000000..d0bc37eac3b --- /dev/null +++ b/test/utils/ktesting/examples/without_ktesting/example_test.go @@ -0,0 +1,40 @@ +//go:build example +// +build example + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package withoutktesting + +// The tests below will fail and therefore are excluded from +// normal "make test" via the "example" build tag. To run +// the tests and check the output, use "go test -tags example ." + +import ( + "testing" + "time" +) + +func TestTimeout(t *testing.T) { + tmp := t.TempDir() + t.Logf("Using %q as temporary directory.", tmp) + t.Cleanup(func() { + t.Log("Cleaning up...") + }) + // This will not complete anytime soon... + t.Log("Please kill me.") + <-time.After(1000 * time.Hour) +} diff --git a/test/utils/ktesting/initoption/initoption.go b/test/utils/ktesting/initoption/initoption.go new file mode 100644 index 00000000000..cdac2be3950 --- /dev/null +++ b/test/utils/ktesting/initoption/initoption.go @@ -0,0 +1,30 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initoption + +import "k8s.io/kubernetes/test/utils/ktesting/internal" + +// InitOption is a functional option for Init and InitCtx. +type InitOption func(c *internal.InitConfig) + +// PerTestOutput controls whether a per-test logger gets +// set up by Init. Has no effect in InitCtx. +func PerTestOutput(enabled bool) InitOption { + return func(c *internal.InitConfig) { + c.PerTestOutput = enabled + } +} diff --git a/test/utils/ktesting/internal/config.go b/test/utils/ktesting/internal/config.go new file mode 100644 index 00000000000..22f7560aedf --- /dev/null +++ b/test/utils/ktesting/internal/config.go @@ -0,0 +1,21 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +type InitConfig struct { + PerTestOutput bool +} diff --git a/test/utils/ktesting/klogcontext.go b/test/utils/ktesting/klogcontext.go new file mode 100644 index 00000000000..08cd8e7e6be --- /dev/null +++ b/test/utils/ktesting/klogcontext.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ktesting + +import ( + "fmt" + "strings" + "time" +) + +var timeNow = time.Now // Can be stubbed out for testing. + +// withKlogHeader creates a TB where the same "I