e2e: bump ginkgo to v2.2.0

The new release adds support for intermediate progress reports.
This commit is contained in:
Patrick Ohly
2022-09-09 15:47:46 +02:00
parent 178f246bbc
commit 1e4edaf2fe
53 changed files with 1002 additions and 181 deletions

View File

@@ -279,7 +279,10 @@ func (g *group) run(specs Specs) {
}
for _, spec := range g.specs {
g.suite.selectiveLock.Lock()
g.suite.currentSpecReport = g.initialReportForSpec(spec)
g.suite.selectiveLock.Unlock()
g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
g.suite.reporter.WillRun(g.suite.currentSpecReport)
g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
@@ -318,7 +321,9 @@ func (g *group) run(specs Specs) {
if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
g.succeeded = false
}
g.suite.selectiveLock.Lock()
g.suite.currentSpecReport = types.SpecReport{}
g.suite.selectiveLock.Unlock()
}
}

View File

@@ -4,12 +4,10 @@ import (
"fmt"
"os"
"os/signal"
"runtime"
"sync"
"syscall"
"time"
"github.com/onsi/ginkgo/v2/formatter"
"github.com/onsi/ginkgo/v2/internal/parallel_support"
)
@@ -50,7 +48,7 @@ type InterruptHandlerInterface interface {
Status() InterruptStatus
SetInterruptPlaceholderMessage(string)
ClearInterruptPlaceholderMessage()
InterruptMessageWithStackTraces() string
InterruptMessage() (string, bool)
}
type InterruptHandler struct {
@@ -190,23 +188,9 @@ func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() {
handler.interruptPlaceholderMessage = ""
}
func (handler *InterruptHandler) InterruptMessageWithStackTraces() string {
func (handler *InterruptHandler) InterruptMessage() (string, bool) {
handler.lock.Lock()
out := fmt.Sprintf("%s\n\n", handler.interruptCause.String())
out := fmt.Sprintf("%s", handler.interruptCause.String())
defer handler.lock.Unlock()
if handler.interruptCause == InterruptCauseAbortByOtherProcess {
return out
}
out += "Here's a stack trace of all running goroutines:\n"
buf := make([]byte, 8192)
for {
n := runtime.Stack(buf, true)
if n < len(buf) {
buf = buf[:n]
break
}
buf = make([]byte, 2*len(buf))
}
out += formatter.Fi(1, "%s", string(buf))
return out
return out, handler.interruptCause != InterruptCauseAbortByOtherProcess
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"reflect"
"sort"
"time"
"sync"
@@ -48,6 +49,8 @@ type Node struct {
MarkedSuppressProgressReporting bool
FlakeAttempts int
Labels Labels
PollProgressAfter time.Duration
PollProgressInterval time.Duration
NodeIDWhereCleanupWasGenerated uint
}
@@ -71,6 +74,8 @@ type FlakeAttempts uint
type Offset uint
type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
type Labels []string
type PollProgressInterval time.Duration
type PollProgressAfter time.Duration
func UnionOfLabels(labels ...Labels) Labels {
out := Labels{}
@@ -123,6 +128,10 @@ func isDecoration(arg interface{}) bool {
return true
case t == reflect.TypeOf(Labels{}):
return true
case t == reflect.TypeOf(PollProgressInterval(0)):
return true
case t == reflect.TypeOf(PollProgressAfter(0)):
return true
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
return true
default:
@@ -146,13 +155,16 @@ func isSliceOfDecorations(slice interface{}) bool {
func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
PollProgressInterval: -1,
}
errors := []error{}
appendError := func(err error) {
if err != nil {
@@ -219,6 +231,16 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
}
case t == reflect.TypeOf(PollProgressAfter(0)):
node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
}
case t == reflect.TypeOf(PollProgressInterval(0)):
node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
}
case t == reflect.TypeOf(Labels{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
@@ -233,34 +255,60 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
case t.Kind() == reflect.Func:
if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody != nil {
if node.ReportEachBody == nil {
node.ReportEachBody = arg.(func(types.SpecReport))
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
if node.SynchronizedBeforeSuiteProc1Body == nil {
node.SynchronizedBeforeSuiteProc1Body = arg.(func() []byte)
} else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
node.SynchronizedBeforeSuiteAllProcsBody = arg.(func([]byte))
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
if node.SynchronizedAfterSuiteAllProcsBody == nil {
node.SynchronizedAfterSuiteAllProcsBody = arg.(func())
} else if node.SynchronizedAfterSuiteProc1Body == nil {
node.SynchronizedAfterSuiteProc1Body = arg.(func())
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
} else if nodeType.Is(types.NodeTypeReportAfterSuite) {
if node.ReportAfterSuiteBody == nil {
node.ReportAfterSuiteBody = arg.(func(types.Report))
} else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
//we can trust that the function is valid because the compiler has our back here
node.ReportEachBody = arg.(func(types.SpecReport))
break
}
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done)))
if !isValid {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if t.NumIn() == 0 {
node.Body = arg.(func())
} else {
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation)
deprecatedAsyncBody := arg.(func(Done))
node.Body = func() { deprecatedAsyncBody(make(Done)) }
if node.Body != nil {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done)))
if !isValid {
appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
trackedFunctionError = true
break
}
if t.NumIn() == 0 {
node.Body = arg.(func())
} else {
deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation)
deprecatedAsyncBody := arg.(func(Done))
node.Body = func() { deprecatedAsyncBody(make(Done)) }
}
}
default:
remainingArgs = append(remainingArgs, arg)
@@ -272,7 +320,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
}
if node.Body == nil && node.ReportEachBody == nil && !node.MarkedPending && !trackedFunctionError {
if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
}
for _, arg := range remainingArgs {
@@ -286,43 +334,15 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
return node, errors
}
func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeSynchronizedBeforeSuite,
SynchronizedBeforeSuiteProc1Body: proc1Body,
SynchronizedBeforeSuiteAllProcsBody: allProcsBody,
CodeLocation: codeLocation,
}, nil
}
func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeSynchronizedAfterSuite,
SynchronizedAfterSuiteAllProcsBody: allProcsBody,
SynchronizedAfterSuiteProc1Body: proc1Body,
CodeLocation: codeLocation,
}, nil
}
func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) {
return Node{
ID: UniqueNodeID(),
Text: text,
NodeType: types.NodeTypeReportAfterSuite,
ReportAfterSuiteBody: body,
CodeLocation: codeLocation,
}, nil
}
func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: types.NodeTypeCleanupInvalid,
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
ID: UniqueNodeID(),
NodeType: types.NodeTypeCleanupInvalid,
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
PollProgressInterval: -1,
}
remainingArgs := []interface{}{}
for _, arg := range args {
@@ -331,6 +351,10 @@ func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{})
node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
case t == reflect.TypeOf(types.CodeLocation{}):
node.CodeLocation = arg.(types.CodeLocation)
case t == reflect.TypeOf(PollProgressAfter(0)):
node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
case t == reflect.TypeOf(PollProgressInterval(0)):
node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
default:
remainingArgs = append(remainingArgs, arg)
}

View File

@@ -143,7 +143,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr)
// Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel

View File

@@ -28,7 +28,7 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil
// And then wrap the clone file descriptors in files.
// One benefit of this (that we don't use yet) is that we can actually write
// to these files to emit output to the console evne though we're intercepting output
// to these files to emit output to the console even though we're intercepting output
stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")

View File

@@ -49,6 +49,7 @@ type Client interface {
FetchNextCounter() (int, error)
PostAbort() error
ShouldAbort() bool
PostEmitProgressReport(report types.ProgressReport) error
Write(p []byte) (int, error)
}

View File

@@ -94,6 +94,10 @@ func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
return client.post("/suite-did-end", report)
}
func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
return client.post("/progress-report", report)
}
func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,

View File

@@ -49,6 +49,7 @@ func (server *httpServer) Start() {
mux.HandleFunc("/did-run", server.didRun)
mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
mux.HandleFunc("/emit-output", server.emitOutput)
mux.HandleFunc("/progress-report", server.emitProgressReport)
//synchronization endpoints
mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
@@ -155,6 +156,14 @@ func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.R
server.handleError(server.handler.EmitOutput(output, &n), writer)
}
func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
var report types.ProgressReport
if !server.decode(writer, request, &report) {
return
}
server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
}
func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
var beforeSuiteState BeforeSuiteState
if !server.decode(writer, request, &beforeSuiteState) {

View File

@@ -72,6 +72,10 @@ func (client *rpcClient) Write(p []byte) (int, error) {
return n, err
}
func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
}
func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
beforeSuiteState := BeforeSuiteState{
State: state,

View File

@@ -108,6 +108,13 @@ func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
return err
}
func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
handler.lock.Lock()
defer handler.lock.Unlock()
handler.reporter.EmitProgressReport(report)
return nil
}
func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
handler.lock.Lock()
defer handler.lock.Unlock()

View File

@@ -0,0 +1,289 @@
package internal
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/onsi/ginkgo/v2/types"
)
var _SOURCE_CACHE = map[string][]string{}
type ProgressSignalRegistrar func(func()) context.CancelFunc
func RegisterForProgressSignal(handler func()) context.CancelFunc {
signalChannel := make(chan os.Signal, 1)
if len(PROGRESS_SIGNALS) > 0 {
signal.Notify(signalChannel, PROGRESS_SIGNALS...)
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
for {
select {
case <-signalChannel:
handler()
case <-ctx.Done():
signal.Stop(signalChannel)
return
}
}
}()
return cancel
}
type ProgressStepCursor struct {
Text string
CodeLocation types.CodeLocation
StartTime time.Time
}
func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
pr := types.ProgressReport{
ParallelProcess: report.ParallelProcess,
RunningInParallel: isRunningInParallel,
Time: time.Now(),
ContainerHierarchyTexts: report.ContainerHierarchyTexts,
LeafNodeText: report.LeafNodeText,
LeafNodeLocation: report.LeafNodeLocation,
SpecStartTime: report.StartTime,
CurrentNodeType: currentNode.NodeType,
CurrentNodeText: currentNode.Text,
CurrentNodeLocation: currentNode.CodeLocation,
CurrentNodeStartTime: currentNodeStartTime,
CurrentStepText: currentStep.Text,
CurrentStepLocation: currentStep.CodeLocation,
CurrentStepStartTime: currentStep.StartTime,
CapturedGinkgoWriterOutput: gwOutput,
GinkgoWriterOffset: len(gwOutput),
}
goroutines, err := extractRunningGoroutines()
if err != nil {
return pr, err
}
pr.Goroutines = goroutines
// now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
packagesOfInterest := map[string]bool{}
packageFromFilename := func(filename string) string {
return filepath.Dir(filename)
}
addPackageFor := func(filename string) {
if filename != "" {
packagesOfInterest[packageFromFilename(filename)] = true
}
}
isPackageOfInterest := func(filename string) bool {
stackPackage := packageFromFilename(filename)
for packageOfInterest := range packagesOfInterest {
if strings.HasPrefix(stackPackage, packageOfInterest) {
return true
}
}
return false
}
for _, location := range report.ContainerHierarchyLocations {
addPackageFor(location.FileName)
}
addPackageFor(report.LeafNodeLocation.FileName)
addPackageFor(currentNode.CodeLocation.FileName)
addPackageFor(currentStep.CodeLocation.FileName)
//First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
specGoRoutineIdx := -1
runNodeFunctionCallIdx := -1
OUTER:
for goroutineIdx, goroutine := range pr.Goroutines {
for functionCallIdx, functionCall := range goroutine.Stack {
if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
specGoRoutineIdx = goroutineIdx
runNodeFunctionCallIdx = functionCallIdx
break OUTER
}
}
}
//Now, we find the first non-Ginkgo function call
if specGoRoutineIdx > -1 {
for runNodeFunctionCallIdx >= 0 {
fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
// these are all things that could potentially happen from within ginkgo
if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
runNodeFunctionCallIdx--
continue
}
if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
}
//found it! lets add its package of interest
addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
break
}
}
ginkgoEntryPointIdx := -1
OUTER_GINKGO_ENTRY_POINT:
for goroutineIdx, goroutine := range pr.Goroutines {
for _, functionCall := range goroutine.Stack {
if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
ginkgoEntryPointIdx = goroutineIdx
break OUTER_GINKGO_ENTRY_POINT
}
}
}
// Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
// Any goroutines with highlighted lines end up in the HighlightGoRoutines
for goroutineIdx, goroutine := range pr.Goroutines {
if goroutineIdx == ginkgoEntryPointIdx {
continue
}
if goroutineIdx == specGoRoutineIdx {
pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
}
for functionCallIdx, functionCall := range goroutine.Stack {
if isPackageOfInterest(functionCall.Filename) {
goroutine.Stack[functionCallIdx].Highlight = true
goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
}
}
}
if !includeAll {
goroutines := []types.Goroutine{pr.SpecGoroutine()}
goroutines = append(goroutines, pr.HighlightedGoroutines()...)
pr.Goroutines = goroutines
}
return pr, nil
}
func extractRunningGoroutines() ([]types.Goroutine, error) {
var stack []byte
for size := 64 * 1024; ; size *= 2 {
stack = make([]byte, size)
if n := runtime.Stack(stack, true); n < size {
stack = stack[:n]
break
}
}
r := bufio.NewReader(bytes.NewReader(stack))
out := []types.Goroutine{}
idx := -1
for {
line, err := r.ReadString('\n')
if err == io.EOF {
break
}
line = strings.TrimSuffix(line, "\n")
//skip blank lines
if line == "" {
continue
}
//parse headers for new goroutine frames
if strings.HasPrefix(line, "goroutine") {
out = append(out, types.Goroutine{})
idx = len(out) - 1
line = strings.TrimPrefix(line, "goroutine ")
line = strings.TrimSuffix(line, ":")
fields := strings.SplitN(line, " ", 2)
if len(fields) != 2 {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
}
out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
}
out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
continue
}
//if we are here we must be at a function call entry in the stack
functionCall := types.FunctionCall{
Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
}
line, err = r.ReadString('\n')
line = strings.TrimSuffix(line, "\n")
if err == io.EOF {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
}
line = strings.TrimLeft(line, " \t")
fields := strings.SplitN(line, ":", 2)
if len(fields) != 2 {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line))
}
functionCall.Filename = fields[0]
line = strings.Split(fields[1], " ")[0]
lineNumber, err := strconv.ParseInt(line, 10, 64)
functionCall.Line = int(lineNumber)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
}
out[idx].Stack = append(out[idx].Stack, functionCall)
}
return out, nil
}
func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
if filename == "" {
return []string{}, 0
}
var lines []string
var ok bool
if lines, ok = _SOURCE_CACHE[filename]; !ok {
sourceRoots := []string{""}
sourceRoots = append(sourceRoots, configuredSourceRoots...)
var data []byte
var err error
var found bool
for _, root := range sourceRoots {
data, err = os.ReadFile(filepath.Join(root, filename))
if err == nil {
found = true
break
}
}
if !found {
return []string{}, 0
}
lines = strings.Split(string(data), "\n")
_SOURCE_CACHE[filename] = lines
}
startIndex := lineNumber - span - 1
endIndex := startIndex + span + span + 1
if startIndex < 0 {
startIndex = 0
}
if endIndex > len(lines) {
endIndex = len(lines)
}
highlightIndex := lineNumber - 1 - startIndex
return lines[startIndex:endIndex], highlightIndex
}

View File

@@ -0,0 +1,11 @@
//go:build freebsd || openbsd || netbsd || darwin || dragonfly
// +build freebsd openbsd netbsd darwin dragonfly
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}

View File

@@ -0,0 +1,11 @@
//go:build linux || solaris
// +build linux solaris
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}

View File

@@ -0,0 +1,8 @@
//go:build windows
// +build windows
package internal
import "os"
var PROGRESS_SIGNALS = []os.Signal{}

View File

@@ -36,20 +36,35 @@ type Suite struct {
interruptHandler interrupt_handler.InterruptHandlerInterface
config types.SuiteConfig
skipAll bool
report types.Report
currentSpecReport types.SpecReport
currentSpecReportUserAccessLock *sync.Mutex
currentNode Node
skipAll bool
report types.Report
currentSpecReport types.SpecReport
currentNode Node
currentNodeStartTime time.Time
progressStepCursor ProgressStepCursor
/*
We don't need to lock around all operations. Just those that *could* happen concurrently.
Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
However, there are some operations that can happen concurrently:
- AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent.
- generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
*/
selectiveLock *sync.Mutex
client parallel_support.Client
}
func NewSuite() *Suite {
return &Suite{
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
currentSpecReportUserAccessLock: &sync.Mutex{},
tree: &TreeNode{},
phase: PhaseBuildTopLevel,
selectiveLock: &sync.Mutex{},
}
}
@@ -66,7 +81,7 @@ func (suite *Suite) BuildTree() error {
return nil
}
func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) {
func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
if suite.phase != PhaseBuildTree {
panic("cannot run before building the tree = call suite.BuildTree() first")
}
@@ -83,8 +98,12 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string
suite.interruptHandler = interruptHandler
suite.config = suiteConfig
cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
cancelProgressHandler()
return success, hasProgrammaticFocus
}
@@ -211,12 +230,23 @@ func (suite *Suite) pushCleanupNode(node Node) error {
return nil
}
/*
Pushing and popping the Step Cursor stack
*/
func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) {
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
suite.progressStepCursor = cursor
}
/*
Spec Running methods - used during PhaseRun
*/
func (suite *Suite) CurrentSpecReport() types.SpecReport {
suite.currentSpecReportUserAccessLock.Lock()
defer suite.currentSpecReportUserAccessLock.Unlock()
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
report := suite.currentSpecReport
if suite.writer != nil {
report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
@@ -227,8 +257,8 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
}
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
suite.currentSpecReportUserAccessLock.Lock()
defer suite.currentSpecReportUserAccessLock.Unlock()
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
if suite.phase != PhaseRun {
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
}
@@ -236,6 +266,37 @@ func (suite *Suite) AddReportEntry(entry ReportEntry) error {
return nil
}
func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
stepCursor := suite.progressStepCursor
gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, suite.config.SourceRoots, fullReport)
if err != nil {
fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
}
return pr
}
func (suite *Suite) handleProgressSignal() {
report := suite.generateProgressReport(false)
suite.selectiveLock.Lock()
suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
suite.selectiveLock.Unlock()
suite.reporter.EmitProgressReport(report)
if suite.isRunningInParallel() {
err := suite.client.PostEmitProgressReport(report)
if err != nil {
fmt.Println(err.Error())
}
}
}
func (suite *Suite) isRunningInParallel() bool {
return suite.config.ParallelTotal > 1
}
@@ -344,11 +405,14 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
interruptStatus := suite.interruptHandler.Status()
beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: beforeSuiteNode.NodeType,
LeafNodeLocation: beforeSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel)
if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
@@ -362,11 +426,14 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: afterSuiteNode.NodeType,
LeafNodeLocation: afterSuiteNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel)
suite.processCurrentSpecReport()
@@ -375,11 +442,14 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
if len(afterSuiteCleanup) > 0 {
for _, cleanupNode := range afterSuiteCleanup {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: cleanupNode.NodeType,
LeafNodeLocation: cleanupNode.CodeLocation,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel)
suite.processCurrentSpecReport()
@@ -389,12 +459,15 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
func (suite *Suite) runReportAfterSuite() {
for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) {
suite.selectiveLock.Lock()
suite.currentSpecReport = types.SpecReport{
LeafNodeType: node.NodeType,
LeafNodeLocation: node.CodeLocation,
LeafNodeText: node.Text,
ParallelProcess: suite.config.ParallelProcess,
}
suite.selectiveLock.Unlock()
suite.reporter.WillRun(suite.currentSpecReport)
suite.runReportAfterSuiteNode(node, suite.report)
suite.processCurrentSpecReport()
@@ -564,9 +637,16 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s
suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
}
suite.selectiveLock.Lock()
suite.currentNode = node
suite.currentNodeStartTime = time.Now()
suite.progressStepCursor = ProgressStepCursor{}
suite.selectiveLock.Unlock()
defer func() {
suite.selectiveLock.Lock()
suite.currentNode = Node{}
suite.currentNodeStartTime = time.Time{}
suite.selectiveLock.Unlock()
}()
if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting {
@@ -606,17 +686,44 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s
finished = true
}()
select {
case outcome := <-outcomeC:
failureFromRun := <-failureC
if outcome == types.SpecStatePassed {
return outcome, types.Failure{}
var emitProgressNow <-chan time.Time
var progressPoller *time.Timer
var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
if node.PollProgressAfter >= 0 {
pollProgressAfter = node.PollProgressAfter
}
if node.PollProgressInterval >= 0 {
pollProgressInterval = node.PollProgressInterval
}
if pollProgressAfter > 0 {
progressPoller = time.NewTimer(pollProgressAfter)
emitProgressNow = progressPoller.C
defer progressPoller.Stop()
}
for {
select {
case outcome := <-outcomeC:
failureFromRun := <-failureC
if outcome == types.SpecStatePassed {
return outcome, types.Failure{}
}
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
return outcome, failure
case <-interruptChannel:
reason, includeProgressReport := suite.interruptHandler.InterruptMessage()
failure.Message, failure.Location = reason, node.CodeLocation
if includeProgressReport {
failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput()
}
return types.SpecStateInterrupted, failure
case <-emitProgressNow:
suite.handleProgressSignal()
if pollProgressInterval > 0 {
progressPoller.Reset(pollProgressInterval)
}
}
failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic
return outcome, failure
case <-interruptChannel:
failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation
return types.SpecStateInterrupted, failure
}
}