apiserver + controllers: enhance context support

27a68aee3a introduced context support for events. Creating an event
broadcaster with context makes tests more resilient against leaking goroutines
when that context gets canceled at the end of a test and enables per-test
output via ktesting.

The context could get passed to the constructor. A cleaner solution is to
enhance context support for the apiserver and then pass the context into the
controller's run method. This ripples up the call stack to all places which
start an apiserver.
This commit is contained in:
Patrick Ohly
2023-12-01 09:00:59 +01:00
parent 591855966c
commit b92273a760
25 changed files with 197 additions and 122 deletions

View File

@@ -94,7 +94,7 @@ func TestSecretsShouldBeTransformed(t *testing.T) {
if err != nil {
t.Fatalf("Failed to create test secret, error: %v", err)
}
test.runResource(test.logger, tt.unSealFunc, tt.transformerPrefix, "", "v1", "secrets", test.secret.Name, test.secret.Namespace)
test.runResource(test.TContext, tt.unSealFunc, tt.transformerPrefix, "", "v1", "secrets", test.secret.Name, test.secret.Namespace)
test.cleanUp()
}
}

View File

@@ -50,6 +50,7 @@ import (
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/etcd"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/test/utils/ktesting"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
)
@@ -75,7 +76,7 @@ const (
type unSealSecret func(ctx context.Context, cipherText []byte, dataCtx value.Context, config apiserverv1.ProviderConfiguration) ([]byte, error)
type transformTest struct {
logger kubeapiservertesting.Logger
ktesting.TContext
storageConfig *storagebackend.Config
configDir string
transformerConfig string
@@ -85,12 +86,13 @@ type transformTest struct {
secret *corev1.Secret
}
func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML string, reload bool, configDir string, storageConfig *storagebackend.Config) (*transformTest, error) {
func newTransformTest(tb testing.TB, transformerConfigYAML string, reload bool, configDir string, storageConfig *storagebackend.Config) (*transformTest, error) {
tCtx := ktesting.Init(tb)
if storageConfig == nil {
storageConfig = framework.SharedEtcd()
}
e := transformTest{
logger: l,
TContext: tCtx,
transformerConfig: transformerConfigYAML,
storageConfig: storageConfig,
}
@@ -113,7 +115,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin
return nil, fmt.Errorf("failed to read config file: %w", err)
}
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, nil, e.getEncryptionOptions(reload), e.storageConfig); err != nil {
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(tb, nil, e.getEncryptionOptions(reload), e.storageConfig); err != nil {
e.cleanUp()
return nil, fmt.Errorf("failed to start KubeAPI server: %w", err)
}
@@ -131,11 +133,11 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin
if transformerConfigYAML != "" && reload {
// when reloading is enabled, this healthz endpoint is always present
mustBeHealthy(l, "/kms-providers", "ok", e.kubeAPIServer.ClientConfig)
mustNotHaveLivez(l, "/kms-providers", "404 page not found", e.kubeAPIServer.ClientConfig)
mustBeHealthy(tCtx, "/kms-providers", "ok", e.kubeAPIServer.ClientConfig)
mustNotHaveLivez(tCtx, "/kms-providers", "404 page not found", e.kubeAPIServer.ClientConfig)
// excluding healthz endpoints even if they do not exist should work
mustBeHealthy(l, "", `warn: some health checks cannot be excluded: no matches for "kms-provider-0","kms-provider-1","kms-provider-2","kms-provider-3"`,
mustBeHealthy(tCtx, "", `warn: some health checks cannot be excluded: no matches for "kms-provider-0","kms-provider-1","kms-provider-2","kms-provider-3"`,
e.kubeAPIServer.ClientConfig, "kms-provider-0", "kms-provider-1", "kms-provider-2", "kms-provider-3")
}
@@ -530,7 +532,7 @@ func (e *transformTest) writeRawRecordToETCD(path string, data []byte) (*clientv
}
func (e *transformTest) printMetrics() error {
e.logger.Logf("Transformation Metrics:")
e.Logf("Transformation Metrics:")
metrics, err := legacyregistry.DefaultGatherer.Gather()
if err != nil {
return fmt.Errorf("failed to gather metrics: %s", err)
@@ -538,9 +540,9 @@ func (e *transformTest) printMetrics() error {
for _, mf := range metrics {
if strings.HasPrefix(*mf.Name, metricsPrefix) {
e.logger.Logf("%s", *mf.Name)
e.Logf("%s", *mf.Name)
for _, metric := range mf.GetMetric() {
e.logger.Logf("%v", metric)
e.Logf("%v", metric)
}
}
}

View File

@@ -49,6 +49,7 @@ import (
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/test/utils/ktesting"
netutils "k8s.io/utils/net"
// install all APIs
@@ -64,6 +65,8 @@ AwEHoUQDQgAEH6cuzP8XuD5wal6wf9M6xDljTOPLX2i8uIp/C/ASqiIGUeeKQtX0
// StartRealAPIServerOrDie starts an API server that is appropriate for use in tests that require one of every resource
func StartRealAPIServerOrDie(t *testing.T, configFuncs ...func(*options.ServerRunOptions)) *APIServer {
tCtx := ktesting.Init(t)
certDir, err := os.MkdirTemp("", t.Name())
if err != nil {
t.Fatal(err)
@@ -148,7 +151,6 @@ func StartRealAPIServerOrDie(t *testing.T, configFuncs ...func(*options.ServerRu
kubeClient := clientset.NewForConfigOrDie(kubeClientConfig)
stopCh := make(chan struct{})
errCh := make(chan error)
go func() {
// Catch panics that occur in this go routine so we get a comprehensible failure
@@ -164,7 +166,7 @@ func StartRealAPIServerOrDie(t *testing.T, configFuncs ...func(*options.ServerRu
errCh <- err
return
}
if err := prepared.Run(stopCh); err != nil {
if err := prepared.Run(tCtx); err != nil {
errCh <- err
t.Error(err)
return
@@ -215,9 +217,9 @@ func StartRealAPIServerOrDie(t *testing.T, configFuncs ...func(*options.ServerRu
}
cleanup := func() {
// Closing stopCh is stopping apiserver and cleaning up
// Cancel stopping apiserver and cleaning up
// after itself, including shutting down its storage layer.
close(stopCh)
tCtx.Cancel("cleaning up")
// If the apiserver was started, let's wait for it to
// shutdown clearly.

View File

@@ -62,9 +62,6 @@ func TestAPIServiceWaitOnStart(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
t.Cleanup(cancel)
stopCh := make(chan struct{})
defer close(stopCh)
etcdConfig := framework.SharedEtcd()
etcd3Client, _, err := integration.GetEtcdClients(etcdConfig.Transport)
@@ -235,9 +232,6 @@ func TestAggregatedAPIServer(t *testing.T) {
// makes the kube-apiserver very responsive. it's normally a minute
dynamiccertificates.FileRefreshDuration = 1 * time.Second
stopCh := make(chan struct{})
defer close(stopCh)
// we need the wardle port information first to set up the service resolver
listener, wardlePort, err := genericapiserveroptions.CreateListener("tcp", "127.0.0.1:0", net.ListenConfig{})
if err != nil {
@@ -291,7 +285,7 @@ func TestAggregatedAPIServer(t *testing.T) {
}
o.RecommendedOptions.SecureServing.Listener = listener
o.RecommendedOptions.SecureServing.BindAddress = netutils.ParseIPSloppy("127.0.0.1")
wardleCmd := sampleserver.NewCommandStartWardleServer(o, stopCh)
wardleCmd := sampleserver.NewCommandStartWardleServer(ctx, o)
wardleCmd.SetArgs([]string{
"--authentication-kubeconfig", wardleToKASKubeConfigFile,
"--authorization-kubeconfig", wardleToKASKubeConfigFile,

View File

@@ -176,7 +176,7 @@ func StartTestServer(ctx context.Context, t testing.TB, setup TestServerSetup) (
errCh = make(chan error)
go func() {
defer close(errCh)
if err := kubeAPIServer.ControlPlane.GenericAPIServer.PrepareRun().Run(ctx.Done()); err != nil {
if err := kubeAPIServer.ControlPlane.GenericAPIServer.PrepareRun().RunWithContext(ctx); err != nil {
errCh <- err
}
}()