dependencies: ginkgo v2.15.0, gomega v1.31.0

The main reason for updating is support for reporting the cause of context
cancellation: Ginkgo provides that information when canceling a context and
Gomega polling code includes that when generating a failure message.
This commit is contained in:
Patrick Ohly
2024-01-18 12:45:55 +01:00
parent 909faa3a9b
commit 18f0af1f00
51 changed files with 421 additions and 170 deletions

View File

@@ -0,0 +1,7 @@
//go:build wasm
package internal
func NewOutputInterceptor() OutputInterceptor {
return &NoopOutputInterceptor{}
}

View File

@@ -0,0 +1,10 @@
//go:build wasm
package internal
import (
"os"
"syscall"
)
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}

View File

@@ -17,7 +17,7 @@ type specContext struct {
context.Context
*ProgressReporterManager
cancel context.CancelFunc
cancel context.CancelCauseFunc
suite *Suite
}
@@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con
This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
*/
func NewSpecContext(suite *Suite) *specContext {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancelCause(context.Background())
sc := &specContext{
cancel: cancel,
suite: suite,

View File

@@ -79,7 +79,7 @@ func NewSuite() *Suite {
func (suite *Suite) Clone() (*Suite, error) {
if suite.phase != PhaseBuildTopLevel {
return nil, fmt.Errorf("cnanot clone suite after tree has been built")
return nil, fmt.Errorf("cannot clone suite after tree has been built")
}
return &Suite{
tree: &TreeNode{},
@@ -858,7 +858,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
}
sc := NewSpecContext(suite)
defer sc.cancel()
defer sc.cancel(fmt.Errorf("spec has finished"))
suite.selectiveLock.Lock()
suite.currentSpecContext = sc
@@ -958,7 +958,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck
sc.cancel()
sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
//and now we wait for the grace period
gracePeriodChannel = time.After(gracePeriod)
case <-interruptStatus.Channel:
@@ -985,7 +985,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
}
progressReport = progressReport.WithoutOtherGoroutines()
sc.cancel()
sc.cancel(fmt.Errorf(interruptStatus.Message()))
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
if interruptStatus.ShouldIncludeProgressReport() {