e2e framework: move resource gathering into framework/debug

This helps getting rid of the ssh dependency. The same init package as for
dumping namespaces takes care of adding the functionality back to framework
instances.
This commit is contained in:
Patrick Ohly
2022-08-25 18:19:16 +02:00
parent f9bc4f837b
commit 70d0824f01
3 changed files with 77 additions and 67 deletions

View File

@@ -99,12 +99,6 @@ type Framework struct {
NamespaceDeletionTimeout time.Duration
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
gatherer *ContainerResourceGatherer
// Constraints that passed to a check which is executed after data is gathered to
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
// as expectations vary greatly. Constraints are grouped by the container names.
AddonResourceConstraints map[string]ResourceConstraint
// Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport
@@ -163,11 +157,10 @@ func NewDefaultFramework(baseName string) *Framework {
// NewFramework creates a test framework.
func NewFramework(baseName string, options Options, client clientset.Interface) *Framework {
f := &Framework{
BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint),
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
BaseName: baseName,
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
}
// The order is important here: if the extension calls ginkgo.BeforeEach
@@ -256,32 +249,6 @@ func (f *Framework) BeforeEach() {
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
var err error
var nodeMode NodesSet
switch TestContext.GatherKubeSystemResourceUsageData {
case "master":
nodeMode = MasterNodes
case "masteranddns":
nodeMode = MasterAndDNSNodes
default:
nodeMode = AllNodes
}
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
InKubemark: ProviderIs("kubemark"),
Nodes: nodeMode,
ResourceDataGatheringPeriod: 60 * time.Second,
ProbeDuration: 15 * time.Second,
PrintVerboseLogs: false,
}, nil)
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.StartGatheringData()
}
}
f.flakeReport = NewFlakeReport()
}
@@ -393,13 +360,6 @@ func (f *Framework) AfterEach() {
}
}()
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
ginkgo.By("Collecting resource usage data")
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
defer ExpectNoError(resourceViolationError)
f.TestSummaries = append(f.TestSummaries, summary)
}
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
// Report any flakes that were observed in the e2e test and reset.