Files
kubernetes/test/e2e/network/netpol/test_helper.go
Antonin Bas 2e282e8e02 Update Netpol e2e tests to use framework CreateNamespace
The main purpose of this change is to update the e2e Netpol tests to use
the srandard CreateNamespace function from the Framework. Before this
change, a custom Namespace creation function was used, with the
following consequences:

* Pod security admission settings had to be enforced locally (not using
  the centralized mechanism)
* the custom function was brittle, not waiting for default Namespace
  ServiceAccount creation, causing tests to fail in some infrastructures
* tests were not benefiting from standard framework capabilities:
  Namespace name generation, automatic Namespace deletion, etc.

As part of this change, we also do the following:

* clearly decouple responsibilities between the Model, which defines the
  K8s objects to be created, and the KubeManager, which has access to
  runtime information (actual Namespace names after their creation by
  the framework, Service IPs, etc.)
* simplify / clean-up tests and remove as much unneeded logic / funtions
  as possible for easier long-term maintenance
* remove the useFixedNamespaces compile-time constant switch, which
  aimed at re-using existing K8s resources across test cases. The
  reasons: a) it is currently broken as setting it to true causes most
  tests to panic on the master branch, b) it is not a good idea to have
  some switch like this which changes the behavior of the tests and is
  never exercised in CI, c) it cannot possibly work as different test
  cases have different Model requirements (e.g., the protocols list can
  differ) and hence different K8s resource requirements.

For #108298

Signed-off-by: Antonin Bas <abas@vmware.com>
2022-08-10 11:38:26 -07:00

209 lines
7.7 KiB
Go

/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package netpol
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"sigs.k8s.io/yaml"
)
const (
waitInterval = 1 * time.Second
waitTimeout = 30 * time.Second
)
// prettyPrint a networkPolicy
func prettyPrint(policy *networkingv1.NetworkPolicy) string {
raw, err := yaml.Marshal(policy)
framework.ExpectNoError(err, "marshal network policy to yaml")
return string(raw)
}
// CreatePolicy creates a policy in the given namespace
func CreatePolicy(k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespace string) {
if isVerbose {
framework.Logf("****************************************************************")
framework.Logf("Network Policy creating %s/%s \n%s", namespace, policy.Name, prettyPrint(policy))
framework.Logf("****************************************************************")
}
_, err := k8s.createNetworkPolicy(namespace, policy)
framework.ExpectNoError(err, "Unable to create netpol %s/%s", namespace, policy.Name)
}
// UpdatePolicy updates a networkpolicy
func UpdatePolicy(k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespace string) {
if isVerbose {
framework.Logf("****************************************************************")
framework.Logf("Network Policy updating %s/%s \n%s", namespace, policy.Name, prettyPrint(policy))
framework.Logf("****************************************************************")
}
_, err := k8s.updateNetworkPolicy(namespace, policy)
framework.ExpectNoError(err, "Unable to update netpol %s/%s", namespace, policy.Name)
}
// waitForHTTPServers waits for all webservers to be up, on all protocols sent in the input, and then validates them using the same probe logic as the rest of the suite.
func waitForHTTPServers(k *kubeManager, model *Model) error {
const maxTries = 10
framework.Logf("waiting for HTTP servers (ports 80 and/or 81) to become ready")
testCases := map[string]*TestCase{}
for _, port := range model.Ports {
// Protocols is provided as input so that we can skip udp polling for windows
for _, protocol := range model.Protocols {
fromPort := 81
desc := fmt.Sprintf("%d->%d,%s", fromPort, port, protocol)
testCases[desc] = &TestCase{ToPort: int(port), Protocol: protocol}
}
}
notReady := map[string]bool{}
for caseName := range testCases {
notReady[caseName] = true
}
for i := 0; i < maxTries; i++ {
for caseName, testCase := range testCases {
if notReady[caseName] {
reachability := NewReachability(k.AllPodStrings(), true)
testCase.Reachability = reachability
ProbePodToPodConnectivity(k, k.AllPods(), k.DNSDomain(), testCase)
_, wrong, _, _ := reachability.Summary(ignoreLoopback)
if wrong == 0 {
framework.Logf("server %s is ready", caseName)
delete(notReady, caseName)
} else {
framework.Logf("server %s is not ready", caseName)
}
}
}
if len(notReady) == 0 {
return nil
}
time.Sleep(waitInterval)
}
return fmt.Errorf("after %d tries, %d HTTP servers are not ready", maxTries, len(notReady))
}
// ValidateOrFail validates connectivity
func ValidateOrFail(k8s *kubeManager, testCase *TestCase) {
ginkgo.By("Validating reachability matrix...")
// 1st try
ginkgo.By("Validating reachability matrix... (FIRST TRY)")
ProbePodToPodConnectivity(k8s, k8s.AllPods(), k8s.DNSDomain(), testCase)
// 2nd try, in case first one failed
if _, wrong, _, _ := testCase.Reachability.Summary(ignoreLoopback); wrong != 0 {
framework.Logf("failed first probe %d wrong results ... retrying (SECOND TRY)", wrong)
ProbePodToPodConnectivity(k8s, k8s.AllPods(), k8s.DNSDomain(), testCase)
}
// at this point we know if we passed or failed, print final matrix and pass/fail the test.
if _, wrong, _, _ := testCase.Reachability.Summary(ignoreLoopback); wrong != 0 {
testCase.Reachability.PrintSummary(true, true, true)
framework.Failf("Had %d wrong results in reachability matrix", wrong)
}
if isVerbose {
testCase.Reachability.PrintSummary(true, true, true)
}
framework.Logf("VALIDATION SUCCESSFUL")
}
// AddNamespaceLabels adds a new label to a namespace
func AddNamespaceLabel(k8s *kubeManager, name string, key string, val string) {
ns, err := k8s.getNamespace(name)
framework.ExpectNoError(err, "Unable to get namespace %s", name)
ns.Labels[key] = val
_, err = k8s.clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Unable to update namespace %s", name)
}
// DeleteNamespaceLabel deletes a label from a namespace (if present)
func DeleteNamespaceLabel(k8s *kubeManager, name string, key string) {
ns, err := k8s.getNamespace(name)
framework.ExpectNoError(err, "Unable to get namespace %s", name)
if _, ok := ns.Labels[key]; !ok {
// nothing to do if the label is not present
return
}
delete(ns.Labels, key)
_, err = k8s.clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Unable to update namespace %s", name)
}
// AddPodLabels adds new labels to a running pod
func AddPodLabels(k8s *kubeManager, namespace string, name string, newPodLabels map[string]string) {
kubePod, err := k8s.clientSet.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s/%s", namespace, name)
if kubePod.Labels == nil {
kubePod.Labels = map[string]string{}
}
for key, val := range newPodLabels {
kubePod.Labels[key] = val
}
_, err = k8s.clientSet.CoreV1().Pods(namespace).Update(context.TODO(), kubePod, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Unable to add pod %s/%s labels", namespace, name)
err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) {
waitForPod, err := k8s.getPod(namespace, name)
if err != nil {
return false, err
}
for key, expected := range newPodLabels {
if actual, ok := waitForPod.Labels[key]; !ok || (expected != actual) {
return false, nil
}
}
return true, nil
})
framework.ExpectNoError(err, "Unable to wait for pod %s/%s to update labels", namespace, name)
}
// ResetPodLabels resets the labels for a deployment's template
func ResetPodLabels(k8s *kubeManager, namespace string, name string) {
kubePod, err := k8s.clientSet.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "Unable to get pod %s/%s", namespace, name)
labels := map[string]string{
podNameLabelKey(): name,
}
kubePod.Labels = labels
_, err = k8s.clientSet.CoreV1().Pods(namespace).Update(context.TODO(), kubePod, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Unable to add pod %s/%s labels", namespace, name)
err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) {
waitForPod, err := k8s.getPod(namespace, name)
if err != nil {
return false, nil
}
for key, expected := range labels {
if actual, ok := waitForPod.Labels[key]; !ok || (expected != actual) {
return false, nil
}
}
return true, nil
})
framework.ExpectNoError(err, "Unable to wait for pod %s/%s to update labels", namespace, name)
}