dependencies: ginkgo v2.15.0, gomega v1.31.0
The main reason for updating is support for reporting the cause of context cancellation: Ginkgo provides that information when canceling a context and Gomega polling code includes that when generating a failure message.
This commit is contained in:
7
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build wasm
|
||||
|
||||
package internal
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &NoopOutputInterceptor{}
|
||||
}
|
||||
10
vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
generated
vendored
Normal file
10
vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build wasm
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
|
||||
4
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
generated
vendored
@@ -17,7 +17,7 @@ type specContext struct {
|
||||
context.Context
|
||||
*ProgressReporterManager
|
||||
|
||||
cancel context.CancelFunc
|
||||
cancel context.CancelCauseFunc
|
||||
|
||||
suite *Suite
|
||||
}
|
||||
@@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con
|
||||
This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
|
||||
*/
|
||||
func NewSpecContext(suite *Suite) *specContext {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
sc := &specContext{
|
||||
cancel: cancel,
|
||||
suite: suite,
|
||||
|
||||
8
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
generated
vendored
@@ -79,7 +79,7 @@ func NewSuite() *Suite {
|
||||
|
||||
func (suite *Suite) Clone() (*Suite, error) {
|
||||
if suite.phase != PhaseBuildTopLevel {
|
||||
return nil, fmt.Errorf("cnanot clone suite after tree has been built")
|
||||
return nil, fmt.Errorf("cannot clone suite after tree has been built")
|
||||
}
|
||||
return &Suite{
|
||||
tree: &TreeNode{},
|
||||
@@ -858,7 +858,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
|
||||
sc := NewSpecContext(suite)
|
||||
defer sc.cancel()
|
||||
defer sc.cancel(fmt.Errorf("spec has finished"))
|
||||
|
||||
suite.selectiveLock.Lock()
|
||||
suite.currentSpecContext = sc
|
||||
@@ -958,7 +958,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
|
||||
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where
|
||||
// the spec is actually stuck
|
||||
sc.cancel()
|
||||
sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
|
||||
//and now we wait for the grace period
|
||||
gracePeriodChannel = time.After(gracePeriod)
|
||||
case <-interruptStatus.Channel:
|
||||
@@ -985,7 +985,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
|
||||
}
|
||||
|
||||
progressReport = progressReport.WithoutOtherGoroutines()
|
||||
sc.cancel()
|
||||
sc.cancel(fmt.Errorf(interruptStatus.Message()))
|
||||
|
||||
if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
|
||||
if interruptStatus.ShouldIncludeProgressReport() {
|
||||
|
||||
Reference in New Issue
Block a user