Merge pull request #50995 from enj/enj/i/etcd_storage_flakes/49423
Automatic merge from submit-queue (batch tested with PRs 50381, 51307, 49645, 50995, 51523) Address TestEtcdStoragePath flakes - Wait for the master to be healthy - Wait longer for the master to start - Fail gracefully if starting the master panics Signed-off-by: Monis Khan <mkhan@redhat.com> ```release-note NONE ``` Fixes #49423 @kubernetes/sig-api-machinery-pr-reviews
This commit is contained in:
commit
1e663006fa
@ -43,7 +43,6 @@ import (
|
|||||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
kclient "k8s.io/client-go/kubernetes"
|
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||||
@ -655,6 +654,13 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
|||||||
storageConfigValue := atomic.Value{}
|
storageConfigValue := atomic.Value{}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
// Catch panics that occur in this go routine so we get a comprehensible failure
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
t.Errorf("Unexpected panic trying to start API master: %#v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
kubeAPIServerOptions := options.NewServerRunOptions()
|
kubeAPIServerOptions := options.NewServerRunOptions()
|
||||||
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
|
||||||
@ -695,26 +701,35 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV
|
|||||||
t.Log(err)
|
t.Log(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
if err := wait.PollImmediate(time.Second, time.Minute, func() (done bool, err error) {
|
||||||
obj := kubeClientConfigValue.Load()
|
obj := kubeClientConfigValue.Load()
|
||||||
if obj == nil {
|
if obj == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
kubeClientConfig := kubeClientConfigValue.Load().(*restclient.Config)
|
kubeClientConfig := kubeClientConfigValue.Load().(*restclient.Config)
|
||||||
kubeClient, err := kclient.NewForConfig(kubeClientConfig)
|
// make a copy so we can mutate it to set GroupVersion and NegotiatedSerializer
|
||||||
|
cfg := *kubeClientConfig
|
||||||
|
cfg.ContentConfig.GroupVersion = &schema.GroupVersion{}
|
||||||
|
cfg.ContentConfig.NegotiatedSerializer = kapi.Codecs
|
||||||
|
privilegedClient, err := restclient.RESTClientFor(&cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// this happens because we race the API server start
|
// this happens because we race the API server start
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if _, err := kubeClient.Discovery().ServerVersion(); err != nil {
|
// wait for the server to be healthy
|
||||||
|
result := privilegedClient.Get().AbsPath("/healthz").Do()
|
||||||
|
if errResult := result.Error(); errResult != nil {
|
||||||
|
t.Log(errResult)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
var status int
|
||||||
|
result.StatusCode(&status)
|
||||||
|
return status == http.StatusOK, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user