
This is the result of automatically editing source files like this: go install golang.org/x/tools/cmd/goimports@latest find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh with e2e-framework-sed.sh containing this: sed -i \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \ -e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \ -e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \ -e "s/framework.AllNodes\b/e2edebug.AllNodes/" \ -e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \ -e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \ -e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \ -e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \ -e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \ -e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \ -e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \ -e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \ -e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \ -e "s/framework.EventsLister\b/e2edebug.EventsLister/" \ -e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \ -e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \ -e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \ -e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \ -e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \ -e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \ -e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \ -e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \ -e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \ -e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \ -e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \ -e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \ -e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \ -e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \ -e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \ -e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \ -e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \ -e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \ -e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \ -e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \ -e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \ -e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \ -e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \ -e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \ -e "s/framework.NodesSet\b/e2edebug.NodesSet/" \ -e "s/framework.PodClient\b/e2epod.PodClient/" \ -e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \ -e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \ -e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \ -e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \ -e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \ -e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \ -e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \ -e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \ -e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \ -e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \ -e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \ -e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \ -e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \ -e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \ -e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \ -e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \ -e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \ -e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \ -e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \ -e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \ -e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \ -e "s/framework.WorkItem\b/e2edebug.WorkItem/" \ "$@" for i in "$@"; do # Import all sub packages and let goimports figure out which of those # are redundant (= already imported) or not needed. sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i" sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i" goimports -w "$i" done
232 lines
8.2 KiB
Go
232 lines
8.2 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package network
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/onsi/ginkgo/v2"
|
|
v1 "k8s.io/api/core/v1"
|
|
discoveryv1 "k8s.io/api/discovery/v1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset"
|
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
|
"k8s.io/kubernetes/test/e2e/network/common"
|
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
|
admissionapi "k8s.io/pod-security-admission/api"
|
|
)
|
|
|
|
var _ = common.SIGDescribe("[Feature:Topology Hints]", func() {
|
|
f := framework.NewDefaultFramework("topology-hints")
|
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
|
|
|
// filled in BeforeEach
|
|
var c clientset.Interface
|
|
|
|
ginkgo.BeforeEach(func() {
|
|
c = f.ClientSet
|
|
e2eskipper.SkipUnlessMultizone(c)
|
|
})
|
|
|
|
ginkgo.It("should distribute endpoints evenly", func() {
|
|
portNum := 9376
|
|
thLabels := map[string]string{labelKey: clientLabelValue}
|
|
img := imageutils.GetE2EImage(imageutils.Agnhost)
|
|
ports := []v1.ContainerPort{{ContainerPort: int32(portNum)}}
|
|
dsConf := e2edaemonset.NewDaemonSet("topology-serve-hostname", img, thLabels, nil, nil, ports, "serve-hostname")
|
|
ds, err := c.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), dsConf, metav1.CreateOptions{})
|
|
framework.ExpectNoError(err, "error creating DaemonSet")
|
|
|
|
svc := createServiceReportErr(c, f.Namespace.Name, &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: "topology-hints",
|
|
Annotations: map[string]string{
|
|
v1.AnnotationTopologyAwareHints: "Auto",
|
|
},
|
|
},
|
|
Spec: v1.ServiceSpec{
|
|
Selector: thLabels,
|
|
PublishNotReadyAddresses: true,
|
|
Ports: []v1.ServicePort{{
|
|
Name: "example",
|
|
Port: 80,
|
|
TargetPort: intstr.FromInt(portNum),
|
|
Protocol: v1.ProtocolTCP,
|
|
}},
|
|
},
|
|
})
|
|
|
|
err = wait.Poll(5*time.Second, framework.PodStartTimeout, func() (bool, error) {
|
|
return e2edaemonset.CheckRunningOnAllNodes(f, ds)
|
|
})
|
|
framework.ExpectNoError(err, "timed out waiting for DaemonSets to be ready")
|
|
|
|
// All Nodes should have same allocatable CPUs. If not, then skip the test.
|
|
schedulableNodes := map[string]*v1.Node{}
|
|
for _, nodeName := range e2edaemonset.SchedulableNodes(c, ds) {
|
|
schedulableNodes[nodeName] = nil
|
|
}
|
|
|
|
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
|
framework.ExpectNoError(err, "Error when listing all Nodes")
|
|
var lastNodeCPU resource.Quantity
|
|
firstNode := true
|
|
for i := range nodeList.Items {
|
|
node := nodeList.Items[i]
|
|
if _, ok := schedulableNodes[node.Name]; !ok {
|
|
continue
|
|
}
|
|
schedulableNodes[node.Name] = &node
|
|
|
|
nodeCPU, found := node.Status.Allocatable[v1.ResourceCPU]
|
|
if !found {
|
|
framework.Failf("Error when getting allocatable CPU of Node '%s'", node.Name)
|
|
}
|
|
if firstNode {
|
|
lastNodeCPU = nodeCPU
|
|
firstNode = false
|
|
} else if !nodeCPU.Equal(lastNodeCPU) {
|
|
e2eskipper.Skipf("Expected Nodes to have equivalent allocatable CPUs, but Node '%s' is different from the previous one. %d not equals %d",
|
|
node.Name, nodeCPU.Value(), lastNodeCPU.Value())
|
|
}
|
|
}
|
|
|
|
framework.Logf("Waiting for %d endpoints to be tracked in EndpointSlices", len(schedulableNodes))
|
|
|
|
var finalSlices []discoveryv1.EndpointSlice
|
|
err = wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
|
|
slices, listErr := c.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svc.Name)})
|
|
if listErr != nil {
|
|
return false, listErr
|
|
}
|
|
|
|
numEndpoints := 0
|
|
for _, slice := range slices.Items {
|
|
numEndpoints += len(slice.Endpoints)
|
|
}
|
|
if len(schedulableNodes) > numEndpoints {
|
|
framework.Logf("Expected %d endpoints, got %d", len(schedulableNodes), numEndpoints)
|
|
return false, nil
|
|
}
|
|
|
|
finalSlices = slices.Items
|
|
return true, nil
|
|
})
|
|
framework.ExpectNoError(err, "timed out waiting for EndpointSlices to be ready")
|
|
|
|
ginkgo.By("having hints set for each endpoint")
|
|
for _, slice := range finalSlices {
|
|
for _, ep := range slice.Endpoints {
|
|
if ep.Zone == nil {
|
|
framework.Failf("Expected endpoint in %s to have zone: %v", slice.Name, ep)
|
|
}
|
|
if ep.Hints == nil || len(ep.Hints.ForZones) == 0 {
|
|
framework.Failf("Expected endpoint in %s to have hints: %v", slice.Name, ep)
|
|
}
|
|
if len(ep.Hints.ForZones) > 1 {
|
|
framework.Failf("Expected endpoint in %s to have exactly 1 zone hint, got %d: %v", slice.Name, len(ep.Hints.ForZones), ep)
|
|
}
|
|
if *ep.Zone != ep.Hints.ForZones[0].Name {
|
|
framework.Failf("Expected endpoint in %s to have same zone hint, got %s: %v", slice.Name, *ep.Zone, ep)
|
|
}
|
|
}
|
|
}
|
|
|
|
nodesByZone := map[string]string{}
|
|
zonesWithNode := map[string]string{}
|
|
for _, node := range schedulableNodes {
|
|
if zone, ok := node.Labels[v1.LabelTopologyZone]; ok {
|
|
nodesByZone[node.Name] = zone
|
|
zonesWithNode[zone] = node.Name
|
|
}
|
|
}
|
|
|
|
podList, err := c.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
|
framework.ExpectNoError(err)
|
|
podsByZone := map[string]string{}
|
|
for _, pod := range podList.Items {
|
|
if zone, ok := nodesByZone[pod.Spec.NodeName]; ok {
|
|
podsByZone[pod.Name] = zone
|
|
}
|
|
}
|
|
|
|
ginkgo.By("keeping requests in the same zone")
|
|
for fromZone, nodeName := range zonesWithNode {
|
|
ginkgo.By("creating a client pod for probing the service from " + fromZone)
|
|
podName := "curl-from-" + fromZone
|
|
clientPod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil, "serve-hostname")
|
|
nodeSelection := e2epod.NodeSelection{Name: nodeName}
|
|
e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
|
|
cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do sleep 1; echo "Date: $(date) Try: ${i}"; curl -q -s --connect-timeout 2 http://%s:80/ ; echo; done`, svc.Name)
|
|
clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
|
|
clientPod.Spec.Containers[0].Name = clientPod.Name
|
|
e2epod.NewPodClient(f).CreateSync(clientPod)
|
|
|
|
framework.Logf("Ensuring that requests from %s pod on %s node stay in %s zone", clientPod.Name, nodeName, fromZone)
|
|
|
|
var logs string
|
|
if pollErr := wait.Poll(5*time.Second, e2eservice.KubeProxyLagTimeout, func() (bool, error) {
|
|
var err error
|
|
logs, err = e2epod.GetPodLogs(c, f.Namespace.Name, clientPod.Name, clientPod.Name)
|
|
framework.ExpectNoError(err)
|
|
framework.Logf("Pod client logs: %s", logs)
|
|
|
|
logLines := strings.Split(logs, "\n")
|
|
if len(logLines) < 6 {
|
|
framework.Logf("only %d log lines, waiting for at least 6", len(logLines))
|
|
return false, nil
|
|
}
|
|
|
|
consecutiveSameZone := 0
|
|
|
|
for i := len(logLines) - 1; i > 0; i-- {
|
|
if logLines[i] == "" || strings.HasPrefix(logLines[i], "Date:") {
|
|
continue
|
|
}
|
|
destZone, ok := podsByZone[logLines[i]]
|
|
if !ok {
|
|
framework.Logf("could not determine dest zone from log line: %s", logLines[i])
|
|
return false, nil
|
|
}
|
|
if fromZone != destZone {
|
|
framework.Logf("expected request from %s to stay in %s zone, delivered to %s zone", clientPod.Name, fromZone, destZone)
|
|
return false, nil
|
|
}
|
|
consecutiveSameZone++
|
|
if consecutiveSameZone >= 5 {
|
|
return true, nil
|
|
}
|
|
}
|
|
|
|
return false, nil
|
|
}); pollErr != nil {
|
|
framework.Failf("expected 5 consecutive requests from %s to stay in zone %s within %v, stdout: %v", clientPod.Name, fromZone, e2eservice.KubeProxyLagTimeout, logs)
|
|
}
|
|
}
|
|
})
|
|
})
|