Merge pull request #113529 from enj/enj/i/kms_single_healthz
kms: add wiring to support automatic encryption config reload
This commit is contained in:
@@ -95,7 +95,7 @@ resources:
|
||||
secret: c2VjcmV0IGlzIHNlY3VyZQ==
|
||||
`
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig)
|
||||
test, err := newTransformTest(t, encryptionConfig, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start Kube API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err)
|
||||
}
|
||||
|
@@ -26,19 +26,16 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/cryptobyte"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
|
||||
mock "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/testing/v1beta1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
kmsapi "k8s.io/kms/apis/v1beta1"
|
||||
)
|
||||
|
||||
@@ -131,7 +128,7 @@ resources:
|
||||
}
|
||||
defer pluginMock.CleanUp()
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig)
|
||||
test, err := newTransformTest(t, encryptionConfig, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err)
|
||||
}
|
||||
@@ -320,7 +317,7 @@ resources:
|
||||
t.Fatalf("Failed to start KMS Plugin #2: err: %v", err)
|
||||
}
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig)
|
||||
test, err := newTransformTest(t, encryptionConfig, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start kube-apiserver, error: %v", err)
|
||||
}
|
||||
@@ -331,66 +328,103 @@ resources:
|
||||
|
||||
// Stage 1 - Since all kms-plugins are guaranteed to be up, healthz checks for:
|
||||
// healthz/kms-provider-0 and /healthz/kms-provider-1 should be OK.
|
||||
mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "/kms-provider-0", "ok", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "/kms-provider-1", "ok", test.kubeAPIServer.ClientConfig)
|
||||
|
||||
// Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the health check for provider-1
|
||||
// to fail, but provider-2 should still be OK
|
||||
pluginMock1.EnterFailedState()
|
||||
mustBeUnHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig)
|
||||
mustBeUnHealthy(t, "/kms-provider-0",
|
||||
"internal server error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "/kms-provider-1", "ok", test.kubeAPIServer.ClientConfig)
|
||||
pluginMock1.ExitFailedState()
|
||||
|
||||
// Stage 3 - kms-plugin for provider-1 is now up. Therefore, expect the health check for provider-1
|
||||
// to succeed now, but provider-2 is now down.
|
||||
// Need to sleep since health check chases responses for 3 seconds.
|
||||
pluginMock2.EnterFailedState()
|
||||
mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig)
|
||||
mustBeUnHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "/kms-provider-0", "ok", test.kubeAPIServer.ClientConfig)
|
||||
mustBeUnHealthy(t, "/kms-provider-1",
|
||||
"internal server error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
pluginMock2.ExitFailedState()
|
||||
|
||||
// Stage 4 - All kms-plugins are once again up,
|
||||
// the healthz check should be OK.
|
||||
mustBeHealthy(t, "/kms-provider-0", "ok", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "/kms-provider-1", "ok", test.kubeAPIServer.ClientConfig)
|
||||
}
|
||||
|
||||
func mustBeHealthy(t *testing.T, checkName string, clientConfig *rest.Config) {
|
||||
t.Helper()
|
||||
var restErr error
|
||||
pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
status, err := getHealthz(checkName, clientConfig)
|
||||
restErr = err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return status == http.StatusOK, nil
|
||||
})
|
||||
func TestKMSHealthzWithReload(t *testing.T) {
|
||||
encryptionConfig := `
|
||||
kind: EncryptionConfiguration
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- kms:
|
||||
name: provider-1
|
||||
endpoint: unix:///@kms-provider-1.sock
|
||||
- kms:
|
||||
name: provider-2
|
||||
endpoint: unix:///@kms-provider-2.sock
|
||||
`
|
||||
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
t.Fatalf("failed to get the expected healthz status of OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr)
|
||||
}
|
||||
}
|
||||
|
||||
func mustBeUnHealthy(t *testing.T, checkName string, clientConfig *rest.Config) {
|
||||
t.Helper()
|
||||
var restErr error
|
||||
pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
status, err := getHealthz(checkName, clientConfig)
|
||||
restErr = err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return status != http.StatusOK, nil
|
||||
})
|
||||
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
t.Fatalf("failed to get the expected healthz status of !OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr)
|
||||
}
|
||||
}
|
||||
|
||||
func getHealthz(checkName string, clientConfig *rest.Config) (int, error) {
|
||||
client, err := kubernetes.NewForConfig(clientConfig)
|
||||
pluginMock1, err := mock.NewBase64Plugin("@kms-provider-1.sock")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create a client: %v", err)
|
||||
t.Fatalf("failed to create mock of KMS Plugin #1: %v", err)
|
||||
}
|
||||
|
||||
result := client.CoreV1().RESTClient().Get().AbsPath(fmt.Sprintf("/healthz/%v", checkName)).Do(context.TODO())
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
return status, nil
|
||||
if err := pluginMock1.Start(); err != nil {
|
||||
t.Fatalf("Failed to start kms-plugin, err: %v", err)
|
||||
}
|
||||
defer pluginMock1.CleanUp()
|
||||
if err := mock.WaitForBase64PluginToBeUp(pluginMock1); err != nil {
|
||||
t.Fatalf("Failed to start plugin #1, err: %v", err)
|
||||
}
|
||||
|
||||
pluginMock2, err := mock.NewBase64Plugin("@kms-provider-2.sock")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create mock of KMS Plugin #2: err: %v", err)
|
||||
}
|
||||
if err := pluginMock2.Start(); err != nil {
|
||||
t.Fatalf("Failed to start kms-plugin, err: %v", err)
|
||||
}
|
||||
defer pluginMock2.CleanUp()
|
||||
if err := mock.WaitForBase64PluginToBeUp(pluginMock2); err != nil {
|
||||
t.Fatalf("Failed to start KMS Plugin #2: err: %v", err)
|
||||
}
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start kube-apiserver, error: %v", err)
|
||||
}
|
||||
defer test.cleanUp()
|
||||
|
||||
// Name of the healthz check is always "kms-provider-0" and it covers all kms plugins.
|
||||
|
||||
// Stage 1 - Since all kms-plugins are guaranteed to be up,
|
||||
// the healthz check should be OK.
|
||||
mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig)
|
||||
|
||||
// Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the healthz check
|
||||
// to fail and report that provider-1 is down
|
||||
pluginMock1.EnterFailedState()
|
||||
mustBeUnHealthy(t, "/kms-providers",
|
||||
"internal server error: kms-provider-0: rpc error: code = FailedPrecondition desc = failed precondition - key disabled",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
pluginMock1.ExitFailedState()
|
||||
|
||||
// Stage 3 - kms-plugin for provider-1 is now up. Therefore, expect the health check for provider-1
|
||||
// to succeed now, but provider-2 is now down.
|
||||
pluginMock2.EnterFailedState()
|
||||
mustBeUnHealthy(t, "/kms-providers",
|
||||
"internal server error: kms-provider-1: rpc error: code = FailedPrecondition desc = failed precondition - key disabled",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
pluginMock2.ExitFailedState()
|
||||
|
||||
// Stage 4 - All kms-plugins are once again up,
|
||||
// the healthz check should be OK.
|
||||
mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig)
|
||||
}
|
||||
|
@@ -140,7 +140,7 @@ resources:
|
||||
}
|
||||
defer pluginMock.CleanUp()
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig)
|
||||
test, err := newTransformTest(t, encryptionConfig, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err)
|
||||
}
|
||||
@@ -253,33 +253,46 @@ resources:
|
||||
t.Fatalf("Failed to start KMS Plugin #2: err: %v", err)
|
||||
}
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig)
|
||||
test, err := newTransformTest(t, encryptionConfig, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to start kube-apiserver, error: %v", err)
|
||||
}
|
||||
defer test.cleanUp()
|
||||
|
||||
// Name of the healthz check is calculated based on a constant "kms-provider-" + position of the
|
||||
// provider in the config.
|
||||
// Name of the healthz check is always "kms-provider-0" and it covers all kms plugins.
|
||||
|
||||
// Stage 1 - Since all kms-plugins are guaranteed to be up, healthz checks for:
|
||||
// healthz/kms-provider-0 and /healthz/kms-provider-1 should be OK.
|
||||
mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig)
|
||||
// Stage 1 - Since all kms-plugins are guaranteed to be up,
|
||||
// the healthz check should be OK.
|
||||
mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig)
|
||||
|
||||
// Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the health check for provider-1
|
||||
// to fail, but provider-2 should still be OK
|
||||
// Stage 2 - kms-plugin for provider-1 is down. Therefore, expect the healthz check
|
||||
// to fail and report that provider-1 is down
|
||||
pluginMock1.EnterFailedState()
|
||||
mustBeUnHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig)
|
||||
mustBeHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig)
|
||||
mustBeUnHealthy(t, "/kms-providers",
|
||||
"internal server error: kms-provider-0: rpc error: code = FailedPrecondition desc = failed precondition - key disabled",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
pluginMock1.ExitFailedState()
|
||||
|
||||
// Stage 3 - kms-plugin for provider-1 is now up. Therefore, expect the health check for provider-1
|
||||
// to succeed now, but provider-2 is now down.
|
||||
// Need to sleep since health check chases responses for 3 seconds.
|
||||
pluginMock2.EnterFailedState()
|
||||
mustBeHealthy(t, "kms-provider-0", test.kubeAPIServer.ClientConfig)
|
||||
mustBeUnHealthy(t, "kms-provider-1", test.kubeAPIServer.ClientConfig)
|
||||
mustBeUnHealthy(t, "/kms-providers",
|
||||
"internal server error: kms-provider-1: rpc error: code = FailedPrecondition desc = failed precondition - key disabled",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
pluginMock2.ExitFailedState()
|
||||
|
||||
// Stage 4 - All kms-plugins are once again up,
|
||||
// the healthz check should be OK.
|
||||
mustBeHealthy(t, "/kms-providers", "ok", test.kubeAPIServer.ClientConfig)
|
||||
|
||||
// Stage 5 - All kms-plugins are unhealthy at the same time and we can observe both failures.
|
||||
pluginMock1.EnterFailedState()
|
||||
pluginMock2.EnterFailedState()
|
||||
mustBeUnHealthy(t, "/kms-providers",
|
||||
"internal server error: "+
|
||||
"[kms-provider-0: failed to perform status section of the healthz check for KMS Provider provider-1, error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled,"+
|
||||
" kms-provider-1: failed to perform status section of the healthz check for KMS Provider provider-2, error: rpc error: code = FailedPrecondition desc = failed precondition - key disabled]",
|
||||
test.kubeAPIServer.ClientConfig)
|
||||
}
|
||||
|
||||
func TestKMSv2SingleService(t *testing.T) {
|
||||
@@ -328,7 +341,7 @@ resources:
|
||||
}
|
||||
t.Cleanup(pluginMock.CleanUp)
|
||||
|
||||
test, err := newTransformTest(t, encryptionConfig)
|
||||
test, err := newTransformTest(t, encryptionConfig, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start KUBE API Server with encryptionConfig\n %s, error: %v", encryptionConfig, err)
|
||||
}
|
||||
|
@@ -85,7 +85,7 @@ func TestSecretsShouldBeTransformed(t *testing.T) {
|
||||
// TODO: add secretbox
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
test, err := newTransformTest(t, tt.transformerConfigContent)
|
||||
test, err := newTransformTest(t, tt.transformerConfigContent, false)
|
||||
if err != nil {
|
||||
test.cleanUp()
|
||||
t.Errorf("failed to setup test for envelop %s, error was %v", tt.transformerPrefix, err)
|
||||
@@ -120,7 +120,7 @@ func BenchmarkAESCBCEnvelopeWrite(b *testing.B) {
|
||||
|
||||
func runBenchmark(b *testing.B, transformerConfig string) {
|
||||
b.StopTimer()
|
||||
test, err := newTransformTest(b, transformerConfig)
|
||||
test, err := newTransformTest(b, transformerConfig, false)
|
||||
defer test.cleanUp()
|
||||
if err != nil {
|
||||
b.Fatalf("failed to setup benchmark for config %s, error was %v", transformerConfig, err)
|
||||
|
@@ -26,26 +26,28 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"time"
|
||||
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/test/integration"
|
||||
"k8s.io/kubernetes/test/integration/etcd"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -78,7 +80,7 @@ type transformTest struct {
|
||||
secret *corev1.Secret
|
||||
}
|
||||
|
||||
func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML string) (*transformTest, error) {
|
||||
func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML string, reload bool) (*transformTest, error) {
|
||||
e := transformTest{
|
||||
logger: l,
|
||||
transformerConfig: transformerConfigYAML,
|
||||
@@ -92,7 +94,7 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin
|
||||
}
|
||||
}
|
||||
|
||||
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, nil, e.getEncryptionOptions(), e.storageConfig); err != nil {
|
||||
if e.kubeAPIServer, err = kubeapiservertesting.StartTestServer(l, nil, e.getEncryptionOptions(reload), e.storageConfig); err != nil {
|
||||
return nil, fmt.Errorf("failed to start KubeAPI server: %v", err)
|
||||
}
|
||||
klog.Infof("Started kube-apiserver %v", e.kubeAPIServer.ClientConfig.Host)
|
||||
@@ -105,6 +107,15 @@ func newTransformTest(l kubeapiservertesting.Logger, transformerConfigYAML strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if transformerConfigYAML != "" && reload {
|
||||
// when reloading is enabled, this healthz endpoint is always present
|
||||
mustBeHealthy(l, "/kms-providers", "ok", e.kubeAPIServer.ClientConfig)
|
||||
|
||||
// excluding healthz endpoints even if they do not exist should work
|
||||
mustBeHealthy(l, "", `warn: some health checks cannot be excluded: no matches for "kms-provider-0","kms-provider-1","kms-provider-2","kms-provider-3"`,
|
||||
e.kubeAPIServer.ClientConfig, "kms-provider-0", "kms-provider-1", "kms-provider-2", "kms-provider-3")
|
||||
}
|
||||
|
||||
return &e, nil
|
||||
}
|
||||
|
||||
@@ -228,10 +239,11 @@ func (e *transformTest) getRawSecretFromETCD() ([]byte, error) {
|
||||
return etcdResponse.Kvs[0].Value, nil
|
||||
}
|
||||
|
||||
func (e *transformTest) getEncryptionOptions() []string {
|
||||
func (e *transformTest) getEncryptionOptions(reload bool) []string {
|
||||
if e.transformerConfig != "" {
|
||||
return []string{
|
||||
"--encryption-provider-config", path.Join(e.configDir, encryptionConfigFileName),
|
||||
fmt.Sprintf("--encryption-provider-config-automatic-reload=%v", reload),
|
||||
"--disable-admission-plugins", "ServiceAccount"}
|
||||
}
|
||||
|
||||
@@ -401,3 +413,59 @@ func (e *transformTest) printMetrics() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustBeHealthy(t kubeapiservertesting.Logger, checkName, wantBodyContains string, clientConfig *rest.Config, excludes ...string) {
|
||||
t.Helper()
|
||||
var restErr error
|
||||
pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
body, ok, err := getHealthz(checkName, clientConfig, excludes...)
|
||||
restErr = err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
done := ok && strings.Contains(body, wantBodyContains)
|
||||
if !done {
|
||||
t.Logf("expected server check %q to be healthy with message %q but it is not: %s", checkName, wantBodyContains, body)
|
||||
}
|
||||
return done, nil
|
||||
})
|
||||
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
t.Fatalf("failed to get the expected healthz status of OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr)
|
||||
}
|
||||
}
|
||||
|
||||
func mustBeUnHealthy(t kubeapiservertesting.Logger, checkName, wantBodyContains string, clientConfig *rest.Config, excludes ...string) {
|
||||
t.Helper()
|
||||
var restErr error
|
||||
pollErr := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
body, ok, err := getHealthz(checkName, clientConfig, excludes...)
|
||||
restErr = err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
done := !ok && strings.Contains(body, wantBodyContains)
|
||||
if !done {
|
||||
t.Logf("expected server check %q to be unhealthy with message %q but it is not: %s", checkName, wantBodyContains, body)
|
||||
}
|
||||
return done, nil
|
||||
})
|
||||
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
t.Fatalf("failed to get the expected healthz status of !OK for check: %s, error: %v, debug inner error: %v", checkName, pollErr, restErr)
|
||||
}
|
||||
}
|
||||
|
||||
func getHealthz(checkName string, clientConfig *rest.Config, excludes ...string) (string, bool, error) {
|
||||
client, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return "", false, fmt.Errorf("failed to create a client: %v", err)
|
||||
}
|
||||
|
||||
req := client.CoreV1().RESTClient().Get().AbsPath(fmt.Sprintf("/healthz%v", checkName)).Param("verbose", "true")
|
||||
for _, exclude := range excludes {
|
||||
req.Param("exclude", exclude)
|
||||
}
|
||||
body, err := req.DoRaw(context.TODO()) // we can still have a response body during an error case
|
||||
return string(body), err == nil, nil
|
||||
}
|
Reference in New Issue
Block a user