github.com/onsi/ginkgo v1.6.0
This commit is contained in:
3
vendor/github.com/hpcloud/tail/.gitignore
generated
vendored
Normal file
3
vendor/github.com/hpcloud/tail/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
.test
|
||||
.go
|
||||
|
||||
18
vendor/github.com/hpcloud/tail/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/hpcloud/tail/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
language: go
|
||||
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
install:
|
||||
- go get gopkg.in/fsnotify.v1
|
||||
- go get gopkg.in/tomb.v1
|
||||
44
vendor/github.com/hpcloud/tail/BUILD
generated
vendored
Normal file
44
vendor/github.com/hpcloud/tail/BUILD
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"tail.go",
|
||||
"tail_posix.go",
|
||||
"tail_windows.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail",
|
||||
importpath = "github.com/hpcloud/tail",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/hpcloud/tail/ratelimiter:go_default_library",
|
||||
"//vendor/github.com/hpcloud/tail/util:go_default_library",
|
||||
"//vendor/github.com/hpcloud/tail/watch:go_default_library",
|
||||
"//vendor/gopkg.in/tomb.v1:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/hpcloud/tail/winfile:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/github.com/hpcloud/tail/ratelimiter:all-srcs",
|
||||
"//vendor/github.com/hpcloud/tail/util:all-srcs",
|
||||
"//vendor/github.com/hpcloud/tail/watch:all-srcs",
|
||||
"//vendor/github.com/hpcloud/tail/winfile:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
63
vendor/github.com/hpcloud/tail/CHANGES.md
generated
vendored
Normal file
63
vendor/github.com/hpcloud/tail/CHANGES.md
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# API v1 (gopkg.in/hpcloud/tail.v1)
|
||||
|
||||
## April, 2016
|
||||
|
||||
* Migrated to godep, as depman is not longer supported
|
||||
* Introduced golang vendoring feature
|
||||
* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
|
||||
|
||||
## July, 2015
|
||||
|
||||
* Fix inotify watcher leak; remove `Cleanup` (#51)
|
||||
|
||||
# API v0 (gopkg.in/hpcloud/tail.v0)
|
||||
|
||||
## June, 2015
|
||||
|
||||
* Don't return partial lines (PR #40)
|
||||
* Use stable version of fsnotify (#46)
|
||||
|
||||
## July, 2014
|
||||
|
||||
* Fix tail for Windows (PR #36)
|
||||
|
||||
## May, 2014
|
||||
|
||||
* Improved rate limiting using leaky bucket (PR #29)
|
||||
* Fix odd line splitting (PR #30)
|
||||
|
||||
## Apr, 2014
|
||||
|
||||
* LimitRate now discards read buffer (PR #28)
|
||||
* allow reading of longer lines if MaxLineSize is unset (PR #24)
|
||||
* updated deps.json to latest fsnotify (441bbc86b1)
|
||||
|
||||
## Feb, 2014
|
||||
|
||||
* added `Config.Logger` to suppress library logging
|
||||
|
||||
## Nov, 2013
|
||||
|
||||
* add Cleanup to remove leaky inotify watches (PR #20)
|
||||
|
||||
## Aug, 2013
|
||||
|
||||
* redesigned Location field (PR #12)
|
||||
* add tail.Tell (PR #14)
|
||||
|
||||
## July, 2013
|
||||
|
||||
* Rate limiting (PR #10)
|
||||
|
||||
## May, 2013
|
||||
|
||||
* Detect file deletions/renames in polling file watcher (PR #1)
|
||||
* Detect file truncation
|
||||
* Fix potential race condition when reopening the file (issue 5)
|
||||
* Fix potential blocking of `tail.Stop` (issue 4)
|
||||
* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
|
||||
* Support Follow=false
|
||||
|
||||
## Feb, 2013
|
||||
|
||||
* Initial open source release
|
||||
19
vendor/github.com/hpcloud/tail/Dockerfile
generated
vendored
Normal file
19
vendor/github.com/hpcloud/tail/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM golang
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
|
||||
ADD . $GOPATH/src/github.com/hpcloud/tail/
|
||||
|
||||
# expecting to fetch dependencies successfully.
|
||||
RUN go get -v github.com/hpcloud/tail
|
||||
|
||||
# expecting to run the test successfully.
|
||||
RUN go test -v github.com/hpcloud/tail
|
||||
|
||||
# expecting to install successfully
|
||||
RUN go install -v github.com/hpcloud/tail
|
||||
RUN go install -v github.com/hpcloud/tail/cmd/gotail
|
||||
|
||||
RUN $GOPATH/bin/gotail -h || true
|
||||
|
||||
ENV PATH $GOPATH/bin:$PATH
|
||||
CMD ["gotail"]
|
||||
21
vendor/github.com/hpcloud/tail/LICENSE.txt
generated
vendored
Normal file
21
vendor/github.com/hpcloud/tail/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# The MIT License (MIT)
|
||||
|
||||
# © Copyright 2015 Hewlett Packard Enterprise Development LP
|
||||
Copyright (c) 2014 ActiveState
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
11
vendor/github.com/hpcloud/tail/Makefile
generated
vendored
Normal file
11
vendor/github.com/hpcloud/tail/Makefile
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
default: test
|
||||
|
||||
test: *.go
|
||||
go test -v -race ./...
|
||||
|
||||
fmt:
|
||||
gofmt -w .
|
||||
|
||||
# Run the test in an isolated environment.
|
||||
fulltest:
|
||||
docker build -t hpcloud/tail .
|
||||
28
vendor/github.com/hpcloud/tail/README.md
generated
vendored
Normal file
28
vendor/github.com/hpcloud/tail/README.md
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
[](https://travis-ci.org/hpcloud/tail)
|
||||
[](https://ci.appveyor.com/project/HelionCloudFoundry/tail)
|
||||
|
||||
# Go package for tail-ing files
|
||||
|
||||
A Go package striving to emulate the features of the BSD `tail` program.
|
||||
|
||||
```Go
|
||||
t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
}
|
||||
```
|
||||
|
||||
See [API documentation](http://godoc.org/github.com/hpcloud/tail).
|
||||
|
||||
## Log rotation
|
||||
|
||||
Tail comes with full support for truncation/move detection as it is
|
||||
designed to work with log rotation tools.
|
||||
|
||||
## Installing
|
||||
|
||||
go get github.com/hpcloud/tail/...
|
||||
|
||||
## Windows support
|
||||
|
||||
This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
|
||||
11
vendor/github.com/hpcloud/tail/appveyor.yml
generated
vendored
Normal file
11
vendor/github.com/hpcloud/tail/appveyor.yml
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
version: 0.{build}
|
||||
skip_tags: true
|
||||
cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
|
||||
build_script:
|
||||
- SET GOPATH=c:\workspace
|
||||
- go test -v -race ./...
|
||||
test: off
|
||||
clone_folder: c:\workspace\src\github.com\hpcloud\tail
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
27
vendor/github.com/hpcloud/tail/ratelimiter/BUILD
generated
vendored
Normal file
27
vendor/github.com/hpcloud/tail/ratelimiter/BUILD
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"leakybucket.go",
|
||||
"memory.go",
|
||||
"storage.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/ratelimiter",
|
||||
importpath = "github.com/hpcloud/tail/ratelimiter",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
7
vendor/github.com/hpcloud/tail/ratelimiter/Licence
generated
vendored
Normal file
7
vendor/github.com/hpcloud/tail/ratelimiter/Licence
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
Copyright (C) 2013 99designs
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
97
vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
generated
vendored
Normal file
97
vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
|
||||
package ratelimiter
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type LeakyBucket struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
Now func() time.Time
|
||||
}
|
||||
|
||||
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: size,
|
||||
Fill: 0,
|
||||
LeakInterval: leakInterval,
|
||||
Now: time.Now,
|
||||
Lastupdate: time.Now(),
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) updateFill() {
|
||||
now := b.Now()
|
||||
if b.Fill > 0 {
|
||||
elapsed := now.Sub(b.Lastupdate)
|
||||
|
||||
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
|
||||
if b.Fill < 0 {
|
||||
b.Fill = 0
|
||||
}
|
||||
}
|
||||
b.Lastupdate = now
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Pour(amount uint16) bool {
|
||||
b.updateFill()
|
||||
|
||||
var newfill float64 = b.Fill + float64(amount)
|
||||
|
||||
if newfill > float64(b.Size) {
|
||||
return false
|
||||
}
|
||||
|
||||
b.Fill = newfill
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// The time at which this bucket will be completely drained
|
||||
func (b *LeakyBucket) DrainedAt() time.Time {
|
||||
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
|
||||
}
|
||||
|
||||
// The duration until this bucket is completely drained
|
||||
func (b *LeakyBucket) TimeToDrain() time.Duration {
|
||||
return b.DrainedAt().Sub(b.Now())
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
|
||||
return b.Now().Sub(b.Lastupdate)
|
||||
}
|
||||
|
||||
type LeakyBucketSer struct {
|
||||
Size uint16
|
||||
Fill float64
|
||||
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||
Lastupdate time.Time
|
||||
}
|
||||
|
||||
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
|
||||
bucket := LeakyBucketSer{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
|
||||
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
|
||||
bucket := LeakyBucket{
|
||||
Size: b.Size,
|
||||
Fill: b.Fill,
|
||||
LeakInterval: b.LeakInterval,
|
||||
Lastupdate: b.Lastupdate,
|
||||
Now: time.Now,
|
||||
}
|
||||
|
||||
return &bucket
|
||||
}
|
||||
58
vendor/github.com/hpcloud/tail/ratelimiter/memory.go
generated
vendored
Normal file
58
vendor/github.com/hpcloud/tail/ratelimiter/memory.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package ratelimiter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const GC_SIZE int = 100
|
||||
|
||||
type Memory struct {
|
||||
store map[string]LeakyBucket
|
||||
lastGCCollected time.Time
|
||||
}
|
||||
|
||||
func NewMemory() *Memory {
|
||||
m := new(Memory)
|
||||
m.store = make(map[string]LeakyBucket)
|
||||
m.lastGCCollected = time.Now()
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
|
||||
|
||||
bucket, ok := m.store[key]
|
||||
if !ok {
|
||||
return nil, errors.New("miss")
|
||||
}
|
||||
|
||||
return &bucket, nil
|
||||
}
|
||||
|
||||
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
|
||||
|
||||
if len(m.store) > GC_SIZE {
|
||||
m.GarbageCollect()
|
||||
}
|
||||
|
||||
m.store[key] = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Memory) GarbageCollect() {
|
||||
now := time.Now()
|
||||
|
||||
// rate limit GC to once per minute
|
||||
if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
|
||||
|
||||
for key, bucket := range m.store {
|
||||
// if the bucket is drained, then GC
|
||||
if bucket.DrainedAt().Unix() > now.Unix() {
|
||||
delete(m.store, key)
|
||||
}
|
||||
}
|
||||
|
||||
m.lastGCCollected = now
|
||||
}
|
||||
}
|
||||
6
vendor/github.com/hpcloud/tail/ratelimiter/storage.go
generated
vendored
Normal file
6
vendor/github.com/hpcloud/tail/ratelimiter/storage.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
package ratelimiter
|
||||
|
||||
type Storage interface {
|
||||
GetBucketFor(string) (*LeakyBucket, error)
|
||||
SetBucketFor(string, LeakyBucket) error
|
||||
}
|
||||
438
vendor/github.com/hpcloud/tail/tail.go
generated
vendored
Normal file
438
vendor/github.com/hpcloud/tail/tail.go
generated
vendored
Normal file
@@ -0,0 +1,438 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/ratelimiter"
|
||||
"github.com/hpcloud/tail/util"
|
||||
"github.com/hpcloud/tail/watch"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrStop = fmt.Errorf("tail should now stop")
|
||||
)
|
||||
|
||||
type Line struct {
|
||||
Text string
|
||||
Time time.Time
|
||||
Err error // Error from tail
|
||||
}
|
||||
|
||||
// NewLine returns a Line with present time.
|
||||
func NewLine(text string) *Line {
|
||||
return &Line{text, time.Now(), nil}
|
||||
}
|
||||
|
||||
// SeekInfo represents arguments to `os.Seek`
|
||||
type SeekInfo struct {
|
||||
Offset int64
|
||||
Whence int // os.SEEK_*
|
||||
}
|
||||
|
||||
type logger interface {
|
||||
Fatal(v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
Fatalln(v ...interface{})
|
||||
Panic(v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
Panicln(v ...interface{})
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// Config is used to specify how a file must be tailed.
|
||||
type Config struct {
|
||||
// File-specifc
|
||||
Location *SeekInfo // Seek to this location before tailing
|
||||
ReOpen bool // Reopen recreated files (tail -F)
|
||||
MustExist bool // Fail early if the file does not exist
|
||||
Poll bool // Poll for file changes instead of using inotify
|
||||
Pipe bool // Is a named pipe (mkfifo)
|
||||
RateLimiter *ratelimiter.LeakyBucket
|
||||
|
||||
// Generic IO
|
||||
Follow bool // Continue looking for new lines (tail -f)
|
||||
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
||||
|
||||
// Logger, when nil, is set to tail.DefaultLogger
|
||||
// To disable logging: set field to tail.DiscardingLogger
|
||||
Logger logger
|
||||
}
|
||||
|
||||
type Tail struct {
|
||||
Filename string
|
||||
Lines chan *Line
|
||||
Config
|
||||
|
||||
file *os.File
|
||||
reader *bufio.Reader
|
||||
|
||||
watcher watch.FileWatcher
|
||||
changes *watch.FileChanges
|
||||
|
||||
tomb.Tomb // provides: Done, Kill, Dying
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultLogger is used when Config.Logger == nil
|
||||
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
// DiscardingLogger can be used to disable logging output
|
||||
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
||||
)
|
||||
|
||||
// TailFile begins tailing the file. Output stream is made available
|
||||
// via the `Tail.Lines` channel. To handle errors during tailing,
|
||||
// invoke the `Wait` or `Err` method after finishing reading from the
|
||||
// `Lines` channel.
|
||||
func TailFile(filename string, config Config) (*Tail, error) {
|
||||
if config.ReOpen && !config.Follow {
|
||||
util.Fatal("cannot set ReOpen without Follow.")
|
||||
}
|
||||
|
||||
t := &Tail{
|
||||
Filename: filename,
|
||||
Lines: make(chan *Line),
|
||||
Config: config,
|
||||
}
|
||||
|
||||
// when Logger was not specified in config, use default logger
|
||||
if t.Logger == nil {
|
||||
t.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
if t.Poll {
|
||||
t.watcher = watch.NewPollingFileWatcher(filename)
|
||||
} else {
|
||||
t.watcher = watch.NewInotifyFileWatcher(filename)
|
||||
}
|
||||
|
||||
if t.MustExist {
|
||||
var err error
|
||||
t.file, err = OpenFile(t.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go t.tailFileSync()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Return the file's current position, like stdio's ftell().
|
||||
// But this value is not very accurate.
|
||||
// it may readed one line in the chan(tail.Lines),
|
||||
// so it may lost one line.
|
||||
func (tail *Tail) Tell() (offset int64, err error) {
|
||||
if tail.file == nil {
|
||||
return
|
||||
}
|
||||
offset, err = tail.file.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tail.lk.Lock()
|
||||
defer tail.lk.Unlock()
|
||||
if tail.reader == nil {
|
||||
return
|
||||
}
|
||||
|
||||
offset -= int64(tail.reader.Buffered())
|
||||
return
|
||||
}
|
||||
|
||||
// Stop stops the tailing activity.
|
||||
func (tail *Tail) Stop() error {
|
||||
tail.Kill(nil)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
// StopAtEOF stops tailing as soon as the end of the file is reached.
|
||||
func (tail *Tail) StopAtEOF() error {
|
||||
tail.Kill(errStopAtEOF)
|
||||
return tail.Wait()
|
||||
}
|
||||
|
||||
var errStopAtEOF = errors.New("tail: stop at eof")
|
||||
|
||||
func (tail *Tail) close() {
|
||||
close(tail.Lines)
|
||||
tail.closeFile()
|
||||
}
|
||||
|
||||
func (tail *Tail) closeFile() {
|
||||
if tail.file != nil {
|
||||
tail.file.Close()
|
||||
tail.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) reopen() error {
|
||||
tail.closeFile()
|
||||
for {
|
||||
var err error
|
||||
tail.file, err = OpenFile(tail.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
|
||||
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
|
||||
if err == tomb.ErrDying {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tail *Tail) readLine() (string, error) {
|
||||
tail.lk.Lock()
|
||||
line, err := tail.reader.ReadString('\n')
|
||||
tail.lk.Unlock()
|
||||
if err != nil {
|
||||
// Note ReadString "returns the data read before the error" in
|
||||
// case of an error, including EOF, so we return it as is. The
|
||||
// caller is expected to process it if err is EOF.
|
||||
return line, err
|
||||
}
|
||||
|
||||
line = strings.TrimRight(line, "\n")
|
||||
|
||||
return line, err
|
||||
}
|
||||
|
||||
func (tail *Tail) tailFileSync() {
|
||||
defer tail.Done()
|
||||
defer tail.close()
|
||||
|
||||
if !tail.MustExist {
|
||||
// deferred first open.
|
||||
err := tail.reopen()
|
||||
if err != nil {
|
||||
if err != tomb.ErrDying {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Seek to requested location on first open of the file.
|
||||
if tail.Location != nil {
|
||||
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
||||
tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
|
||||
if err != nil {
|
||||
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tail.openReader()
|
||||
|
||||
var offset int64 = 0
|
||||
var err error
|
||||
|
||||
// Read line by line.
|
||||
for {
|
||||
// do not seek in named pipes
|
||||
if !tail.Pipe {
|
||||
// grab the position in case we need to back up in the event of a half-line
|
||||
offset, err = tail.Tell()
|
||||
if err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
line, err := tail.readLine()
|
||||
|
||||
// Process `line` even if err is EOF.
|
||||
if err == nil {
|
||||
cooloff := !tail.sendLine(line)
|
||||
if cooloff {
|
||||
// Wait a second before seeking till the end of
|
||||
// file when rate limit is reached.
|
||||
msg := fmt.Sprintf(
|
||||
"Too much log activity; waiting a second " +
|
||||
"before resuming tailing")
|
||||
tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-tail.Dying():
|
||||
return
|
||||
}
|
||||
if err := tail.seekEnd(); err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if !tail.Follow {
|
||||
if line != "" {
|
||||
tail.sendLine(line)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tail.Follow && line != "" {
|
||||
// this has the potential to never return the last line if
|
||||
// it's not followed by a newline; seems a fair trade here
|
||||
err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
|
||||
if err != nil {
|
||||
tail.Kill(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// When EOF is reached, wait for more data to become
|
||||
// available. Wait strategy is based on the `tail.watcher`
|
||||
// implementation (inotify or polling).
|
||||
err := tail.waitForChanges()
|
||||
if err != nil {
|
||||
if err != ErrStop {
|
||||
tail.Kill(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// non-EOF error
|
||||
tail.Killf("Error reading %s: %s", tail.Filename, err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.Dying():
|
||||
if tail.Err() == errStopAtEOF {
|
||||
continue
|
||||
}
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForChanges waits until the file has been appended, deleted,
|
||||
// moved or truncated. When moved or deleted - the file will be
|
||||
// reopened if ReOpen is true. Truncated files are always reopened.
|
||||
func (tail *Tail) waitForChanges() error {
|
||||
if tail.changes == nil {
|
||||
pos, err := tail.file.Seek(0, os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tail.changes.Modified:
|
||||
return nil
|
||||
case <-tail.changes.Deleted:
|
||||
tail.changes = nil
|
||||
if tail.ReOpen {
|
||||
// XXX: we must not log from a library.
|
||||
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
} else {
|
||||
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
||||
return ErrStop
|
||||
}
|
||||
case <-tail.changes.Truncated:
|
||||
// Always reopen truncated files (Follow is true)
|
||||
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
||||
if err := tail.reopen(); err != nil {
|
||||
return err
|
||||
}
|
||||
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
|
||||
tail.openReader()
|
||||
return nil
|
||||
case <-tail.Dying():
|
||||
return ErrStop
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (tail *Tail) openReader() {
|
||||
if tail.MaxLineSize > 0 {
|
||||
// add 2 to account for newline characters
|
||||
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
||||
} else {
|
||||
tail.reader = bufio.NewReader(tail.file)
|
||||
}
|
||||
}
|
||||
|
||||
func (tail *Tail) seekEnd() error {
|
||||
return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
|
||||
}
|
||||
|
||||
func (tail *Tail) seekTo(pos SeekInfo) error {
|
||||
_, err := tail.file.Seek(pos.Offset, pos.Whence)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
|
||||
}
|
||||
// Reset the read buffer whenever the file is re-seek'ed
|
||||
tail.reader.Reset(tail.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendLine sends the line(s) to Lines channel, splitting longer lines
|
||||
// if necessary. Return false if rate limit is reached.
|
||||
func (tail *Tail) sendLine(line string) bool {
|
||||
now := time.Now()
|
||||
lines := []string{line}
|
||||
|
||||
// Split longer lines
|
||||
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
|
||||
lines = util.PartitionString(line, tail.MaxLineSize)
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
tail.Lines <- &Line{line, now, nil}
|
||||
}
|
||||
|
||||
if tail.Config.RateLimiter != nil {
|
||||
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
||||
if !ok {
|
||||
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
|
||||
tail.Filename)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Cleanup removes inotify watches added by the tail package. This function is
|
||||
// meant to be invoked from a process's exit handler. Linux kernel may not
|
||||
// automatically remove inotify watches after the process exits.
|
||||
func (tail *Tail) Cleanup() {
|
||||
watch.Cleanup(tail.Filename)
|
||||
}
|
||||
11
vendor/github.com/hpcloud/tail/tail_posix.go
generated
vendored
Normal file
11
vendor/github.com/hpcloud/tail/tail_posix.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build linux darwin freebsd netbsd openbsd
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
12
vendor/github.com/hpcloud/tail/tail_windows.go
generated
vendored
Normal file
12
vendor/github.com/hpcloud/tail/tail_windows.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build windows
|
||||
|
||||
package tail
|
||||
|
||||
import (
|
||||
"github.com/hpcloud/tail/winfile"
|
||||
"os"
|
||||
)
|
||||
|
||||
func OpenFile(name string) (file *os.File, err error) {
|
||||
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||
}
|
||||
23
vendor/github.com/hpcloud/tail/util/BUILD
generated
vendored
Normal file
23
vendor/github.com/hpcloud/tail/util/BUILD
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/util",
|
||||
importpath = "github.com/hpcloud/tail/util",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
48
vendor/github.com/hpcloud/tail/util/util.go
generated
vendored
Normal file
48
vendor/github.com/hpcloud/tail/util/util.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
// fatal is like panic except it displays only the current goroutine's stack.
|
||||
func Fatal(format string, v ...interface{}) {
|
||||
// https://github.com/hpcloud/log/blob/master/log.go#L45
|
||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// partitionString partitions the string into chunks of given size,
|
||||
// with the last chunk of variable size.
|
||||
func PartitionString(s string, chunkSize int) []string {
|
||||
if chunkSize <= 0 {
|
||||
panic("invalid chunkSize")
|
||||
}
|
||||
length := len(s)
|
||||
chunks := 1 + length/chunkSize
|
||||
start := 0
|
||||
end := chunkSize
|
||||
parts := make([]string, 0, chunks)
|
||||
for {
|
||||
if end > length {
|
||||
end = length
|
||||
}
|
||||
parts = append(parts, s[start:end])
|
||||
if end == length {
|
||||
break
|
||||
}
|
||||
start, end = end, end+chunkSize
|
||||
}
|
||||
return parts
|
||||
}
|
||||
34
vendor/github.com/hpcloud/tail/watch/BUILD
generated
vendored
Normal file
34
vendor/github.com/hpcloud/tail/watch/BUILD
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"filechanges.go",
|
||||
"inotify.go",
|
||||
"inotify_tracker.go",
|
||||
"polling.go",
|
||||
"watch.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/watch",
|
||||
importpath = "github.com/hpcloud/tail/watch",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/hpcloud/tail/util:go_default_library",
|
||||
"//vendor/gopkg.in/fsnotify.v1:go_default_library",
|
||||
"//vendor/gopkg.in/tomb.v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
36
vendor/github.com/hpcloud/tail/watch/filechanges.go
generated
vendored
Normal file
36
vendor/github.com/hpcloud/tail/watch/filechanges.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package watch
|
||||
|
||||
type FileChanges struct {
|
||||
Modified chan bool // Channel to get notified of modifications
|
||||
Truncated chan bool // Channel to get notified of truncations
|
||||
Deleted chan bool // Channel to get notified of deletions/renames
|
||||
}
|
||||
|
||||
func NewFileChanges() *FileChanges {
|
||||
return &FileChanges{
|
||||
make(chan bool), make(chan bool), make(chan bool)}
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyModified() {
|
||||
sendOnlyIfEmpty(fc.Modified)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyTruncated() {
|
||||
sendOnlyIfEmpty(fc.Truncated)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyDeleted() {
|
||||
sendOnlyIfEmpty(fc.Deleted)
|
||||
}
|
||||
|
||||
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
||||
// backlog to be read by other goroutines. This concurrency pattern
|
||||
// can be used to notify other goroutines if and only if they are
|
||||
// looking for it (i.e., subsequent notifications can be compressed
|
||||
// into one).
|
||||
func sendOnlyIfEmpty(ch chan bool) {
|
||||
select {
|
||||
case ch <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
128
vendor/github.com/hpcloud/tail/watch/inotify.go
generated
vendored
Normal file
128
vendor/github.com/hpcloud/tail/watch/inotify.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// InotifyFileWatcher uses inotify to monitor file changes.
|
||||
type InotifyFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
||||
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
err := WatchCreate(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer RemoveWatchCreate(fw.Filename)
|
||||
|
||||
// Do a real check now as the file might have been created before
|
||||
// calling `WatchFlags` above.
|
||||
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
||||
// file exists, or stat returned an error.
|
||||
return err
|
||||
}
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-events:
|
||||
if !ok {
|
||||
return fmt.Errorf("inotify watcher has been closed")
|
||||
}
|
||||
evtName, err := filepath.Abs(evt.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fwFilename, err := filepath.Abs(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if evtName == fwFilename {
|
||||
return nil
|
||||
}
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
err := Watch(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
defer RemoveWatch(fw.Filename)
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
prevSize := fw.Size
|
||||
|
||||
var evt fsnotify.Event
|
||||
var ok bool
|
||||
|
||||
select {
|
||||
case evt, ok = <-events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-t.Dying():
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
|
||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
fw.Size = fi.Size()
|
||||
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
} else {
|
||||
changes.NotifyModified()
|
||||
}
|
||||
prevSize = fw.Size
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
260
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
generated
vendored
Normal file
260
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
)
|
||||
|
||||
type InotifyTracker struct {
|
||||
mux sync.Mutex
|
||||
watcher *fsnotify.Watcher
|
||||
chans map[string]chan fsnotify.Event
|
||||
done map[string]chan bool
|
||||
watchNums map[string]int
|
||||
watch chan *watchInfo
|
||||
remove chan *watchInfo
|
||||
error chan error
|
||||
}
|
||||
|
||||
type watchInfo struct {
|
||||
op fsnotify.Op
|
||||
fname string
|
||||
}
|
||||
|
||||
func (this *watchInfo) isCreate() bool {
|
||||
return this.op == fsnotify.Create
|
||||
}
|
||||
|
||||
var (
|
||||
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
||||
shared *InotifyTracker
|
||||
|
||||
// these are used to ensure the shared InotifyTracker is run exactly once
|
||||
once = sync.Once{}
|
||||
goRun = func() {
|
||||
shared = &InotifyTracker{
|
||||
mux: sync.Mutex{},
|
||||
chans: make(map[string]chan fsnotify.Event),
|
||||
done: make(map[string]chan bool),
|
||||
watchNums: make(map[string]int),
|
||||
watch: make(chan *watchInfo),
|
||||
remove: make(chan *watchInfo),
|
||||
error: make(chan error),
|
||||
}
|
||||
go shared.run()
|
||||
}
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Watch signals the run goroutine to begin watching the input filename
|
||||
func Watch(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// Watch create signals the run goroutine to begin watching the input filename
|
||||
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
||||
func WatchCreate(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func watch(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.watch <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatch(fname string) {
|
||||
remove(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatchCreate(fname string) {
|
||||
remove(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func remove(winfo *watchInfo) {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.mux.Lock()
|
||||
done := shared.done[winfo.fname]
|
||||
if done != nil {
|
||||
delete(shared.done, winfo.fname)
|
||||
close(done)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
shared.watchNums[fname]--
|
||||
watchNum := shared.watchNums[fname]
|
||||
if watchNum == 0 {
|
||||
delete(shared.watchNums, fname)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
// If we were the last ones to watch this file, unsubscribe from inotify.
|
||||
// This needs to happen after releasing the lock because fsnotify waits
|
||||
// synchronously for the kernel to acknowledge the removal of the watch
|
||||
// for this file, which causes us to deadlock if we still held the lock.
|
||||
if watchNum == 0 {
|
||||
shared.watcher.Remove(fname)
|
||||
}
|
||||
shared.remove <- winfo
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) <-chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) {
|
||||
RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] > 0 {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := shared.watcher.Add(fname)
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
|
||||
if !winfo.isCreate() {
|
||||
return
|
||||
}
|
||||
|
||||
shared.watchNums[winfo.fname]--
|
||||
if shared.watchNums[winfo.fname] == 0 {
|
||||
delete(shared.watchNums, winfo.fname)
|
||||
}
|
||||
}
|
||||
|
||||
// sendEvent sends the input event to the appropriate Tail.
|
||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||
name := filepath.Clean(event.Name)
|
||||
|
||||
shared.mux.Lock()
|
||||
ch := shared.chans[name]
|
||||
done := shared.done[name]
|
||||
shared.mux.Unlock()
|
||||
|
||||
if ch != nil && done != nil {
|
||||
select {
|
||||
case ch <- event:
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run starts the goroutine in which the shared struct reads events from its
|
||||
// Watcher's Event channel and sends the events to the appropriate Tail.
|
||||
func (shared *InotifyTracker) run() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
util.Fatal("failed to create Watcher")
|
||||
}
|
||||
shared.watcher = watcher
|
||||
|
||||
for {
|
||||
select {
|
||||
case winfo := <-shared.watch:
|
||||
shared.error <- shared.addWatch(winfo)
|
||||
|
||||
case winfo := <-shared.remove:
|
||||
shared.removeWatch(winfo)
|
||||
|
||||
case event, open := <-shared.watcher.Events:
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
shared.sendEvent(event)
|
||||
|
||||
case err, open := <-shared.watcher.Errors:
|
||||
if !open {
|
||||
return
|
||||
} else if err != nil {
|
||||
sysErr, ok := err.(*os.SyscallError)
|
||||
if !ok || sysErr.Err != syscall.EINTR {
|
||||
logger.Printf("Error in Watcher Error channel: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
118
vendor/github.com/hpcloud/tail/watch/polling.go
generated
vendored
Normal file
118
vendor/github.com/hpcloud/tail/watch/polling.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// PollingFileWatcher polls the file for changes.
|
||||
type PollingFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
||||
fw := &PollingFileWatcher{filename, 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
var POLL_DURATION time.Duration
|
||||
|
||||
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
for {
|
||||
if _, err := os.Stat(fw.Filename); err == nil {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-time.After(POLL_DURATION):
|
||||
continue
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
origFi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
var prevModTime time.Time
|
||||
|
||||
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
||||
// the fatal (below) with tomb's Kill.
|
||||
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
prevSize := fw.Size
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
time.Sleep(POLL_DURATION)
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
||||
// so it gives access denied to anything trying to read it until all handles are released.
|
||||
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
||||
// File does not exist (has been deleted).
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
|
||||
// File got moved/renamed?
|
||||
if !os.SameFile(origFi, fi) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// File got truncated?
|
||||
fw.Size = fi.Size()
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
// File got bigger?
|
||||
if prevSize > 0 && prevSize < fw.Size {
|
||||
changes.NotifyModified()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
prevSize = fw.Size
|
||||
|
||||
// File was appended to (changed)?
|
||||
modTime := fi.ModTime()
|
||||
if modTime != prevModTime {
|
||||
prevModTime = modTime
|
||||
changes.NotifyModified()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
POLL_DURATION = 250 * time.Millisecond
|
||||
}
|
||||
20
vendor/github.com/hpcloud/tail/watch/watch.go
generated
vendored
Normal file
20
vendor/github.com/hpcloud/tail/watch/watch.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import "gopkg.in/tomb.v1"
|
||||
|
||||
// FileWatcher monitors file-level events.
|
||||
type FileWatcher interface {
|
||||
// BlockUntilExists blocks until the file comes into existence.
|
||||
BlockUntilExists(*tomb.Tomb) error
|
||||
|
||||
// ChangeEvents reports on changes to a file, be it modification,
|
||||
// deletion, renames or truncations. Returned FileChanges group of
|
||||
// channels will be closed, thus become unusable, after a deletion
|
||||
// or truncation event.
|
||||
// In order to properly report truncations, ChangeEvents requires
|
||||
// the caller to pass their current offset in the file.
|
||||
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
||||
}
|
||||
23
vendor/github.com/hpcloud/tail/winfile/BUILD
generated
vendored
Normal file
23
vendor/github.com/hpcloud/tail/winfile/BUILD
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["winfile.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/hpcloud/tail/winfile",
|
||||
importpath = "github.com/hpcloud/tail/winfile",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
92
vendor/github.com/hpcloud/tail/winfile/winfile.go
generated
vendored
Normal file
92
vendor/github.com/hpcloud/tail/winfile/winfile.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// +build windows
|
||||
|
||||
package winfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// issue also described here
|
||||
//https://codereview.appspot.com/8203043/
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
|
||||
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := syscall.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
||||
case syscall.O_RDONLY:
|
||||
access = syscall.GENERIC_READ
|
||||
case syscall.O_WRONLY:
|
||||
access = syscall.GENERIC_WRITE
|
||||
case syscall.O_RDWR:
|
||||
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_CREAT != 0 {
|
||||
access |= syscall.GENERIC_WRITE
|
||||
}
|
||||
if mode&syscall.O_APPEND != 0 {
|
||||
access &^= syscall.GENERIC_WRITE
|
||||
access |= syscall.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
||||
var sa *syscall.SecurityAttributes
|
||||
if mode&syscall.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
||||
createmode = syscall.CREATE_NEW
|
||||
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
||||
createmode = syscall.CREATE_ALWAYS
|
||||
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
||||
createmode = syscall.OPEN_ALWAYS
|
||||
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
||||
createmode = syscall.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = syscall.OPEN_EXISTING
|
||||
}
|
||||
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
|
||||
func makeInheritSa() *syscall.SecurityAttributes {
|
||||
var sa syscall.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
|
||||
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
|
||||
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
// No mapping for Go's ModeTemporary (plan9 only).
|
||||
return
|
||||
}
|
||||
5
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
5
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
@@ -1,4 +1,7 @@
|
||||
.DS_Store
|
||||
TODO
|
||||
tmp/**/*
|
||||
*.coverprofile
|
||||
*.coverprofile
|
||||
.vscode
|
||||
.idea/
|
||||
*.log
|
||||
10
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
10
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
@@ -1,8 +1,10 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
@@ -11,4 +13,4 @@ install:
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
|
||||
|
||||
1
vendor/github.com/onsi/ginkgo/BUILD
generated
vendored
1
vendor/github.com/onsi/ginkgo/BUILD
generated
vendored
@@ -16,6 +16,7 @@ go_library(
|
||||
"//vendor/github.com/onsi/ginkgo/internal/writer:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
63
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
63
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
@@ -1,10 +1,71 @@
|
||||
## HEAD
|
||||
## 1.6.0
|
||||
|
||||
### New Features
|
||||
- add --debug flag to emit node output to files (#499) [39febac]
|
||||
|
||||
### Fixes
|
||||
- fix: for `go vet` to pass [69338ec]
|
||||
- docs: fix for contributing instructions [7004cb1]
|
||||
- consolidate and streamline contribution docs (#494) [d848015]
|
||||
- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
|
||||
- all: gofmt [000d317]
|
||||
- Increase eventually timeout to 30s [c73579c]
|
||||
- Clarify asynchronous test behaviour [294d8f4]
|
||||
- Travis badge should only show master [26d2143]
|
||||
|
||||
## 1.5.0 5/10/2018
|
||||
|
||||
### New Features
|
||||
- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
|
||||
- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
|
||||
- Re-add noisySkippings flag [652e15c]
|
||||
- Allow coverage to be displayed for focused specs (#367) [11459a8]
|
||||
- Handle -outputdir flag (#364) [228e3a8]
|
||||
- Handle -coverprofile flag (#355) [43392d5]
|
||||
|
||||
### Fixes
|
||||
- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
|
||||
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
|
||||
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
|
||||
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
|
||||
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
|
||||
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
|
||||
- Add an extra new line after reporting spec run completion for test2json [874520d]
|
||||
- added name name field to junit reported testsuite [ae61c63]
|
||||
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
|
||||
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
|
||||
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
|
||||
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
|
||||
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
|
||||
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
|
||||
- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
|
||||
- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
|
||||
- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
|
||||
- Replace GOPATH in Environment [4b883f0]
|
||||
|
||||
|
||||
## 1.4.0 7/16/2017
|
||||
|
||||
- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
|
||||
- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
|
||||
- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
|
||||
- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
|
||||
- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
|
||||
- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
|
||||
|
||||
## 1.3.0 3/28/2017
|
||||
|
||||
Improvements:
|
||||
|
||||
- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
|
||||
- `Skip(message)` can be used to skip the current test.
|
||||
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||
- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
|
||||
- Support for retrying flaky tests with `--flakeAttempts`
|
||||
- `ginkgo ./...` now recurses as you'd expect
|
||||
- Added `Specify` a synonym for `It`
|
||||
- Support colorise on Windows
|
||||
- Broader support for various go compilation flags in the `ginkgo` CLI
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
|
||||
33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
generated
vendored
Normal file
33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
# Contributing to Ginkgo
|
||||
|
||||
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
|
||||
|
||||
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
|
||||
- Ensure adequate test coverage:
|
||||
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
|
||||
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
|
||||
- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
|
||||
If relevant, please submit a docs PR to that branch alongside your code PR.
|
||||
|
||||
Thanks for supporting Ginkgo!
|
||||
|
||||
## Setup
|
||||
|
||||
Fork the repo, then:
|
||||
|
||||
```
|
||||
go get github.com/onsi/ginkgo
|
||||
go get github.com/onsi/gomega/...
|
||||
cd $GOPATH/src/github.com/onsi/ginkgo
|
||||
git remote add fork git@github.com:<NAME>/ginkgo.git
|
||||
|
||||
ginkgo -r -p # ensure tests are green
|
||||
go vet ./... # ensure linter is happy
|
||||
```
|
||||
|
||||
## Making the PR
|
||||
- go to a new branch `git checkout -b my-feature`
|
||||
- make your changes
|
||||
- run tests and linter again (see above)
|
||||
- `git push fork`
|
||||
- open PR 🎉
|
||||
36
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
36
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
@@ -1,19 +1,19 @@
|
||||

|
||||

|
||||
|
||||
[](https://travis-ci.org/onsi/ginkgo)
|
||||
[](https://travis-ci.org/onsi/ginkgo)
|
||||
|
||||
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
|
||||
|
||||
## Feature List
|
||||
|
||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||
|
||||
- Structure your BDD-style tests expressively:
|
||||
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||
- [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||
|
||||
@@ -25,7 +25,7 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google
|
||||
|
||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||
- `ginkgo -cover` runs your tests using Golang's code coverage tool
|
||||
- `ginkgo -cover` runs your tests using Go's code coverage tool
|
||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||
- `ginkgo -r` runs all tests suites under the current directory
|
||||
@@ -43,6 +43,8 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google
|
||||
|
||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||
|
||||
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
|
||||
|
||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||
|
||||
- A modular architecture that lets you easily:
|
||||
@@ -53,18 +55,18 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google
|
||||
|
||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
|
||||
|
||||
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
|
||||
## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
|
||||
|
||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
|
||||
|
||||
## Set Me Up!
|
||||
|
||||
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
||||
You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
|
||||
|
||||
```bash
|
||||
|
||||
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||
go get github.com/onsi/gomega # fetches the matcher library
|
||||
go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||
go get -u github.com/onsi/gomega/... # fetches the matcher library
|
||||
|
||||
cd path/to/package/you/want/to/test
|
||||
|
||||
@@ -83,11 +85,11 @@ Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Go
|
||||
|
||||
With that said, it's great to know what your options are :)
|
||||
|
||||
### What Golang gives you out of the box
|
||||
### What Go gives you out of the box
|
||||
|
||||
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
|
||||
### Matcher libraries for Golang's XUnit style tests
|
||||
### Matcher libraries for Go's XUnit style tests
|
||||
|
||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||
|
||||
@@ -98,7 +100,7 @@ You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomeg
|
||||
|
||||
### BDD style testing frameworks
|
||||
|
||||
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
|
||||
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
|
||||
|
||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||
@@ -106,10 +108,14 @@ There are a handful of BDD-style testing frameworks written for Golang. Here ar
|
||||
- [Mao](https://github.com/azer/mao)
|
||||
- [Zen](https://github.com/pranavraja/zen)
|
||||
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
|
||||
|
||||
Go explore!
|
||||
|
||||
## License
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
14
vendor/github.com/onsi/ginkgo/RELEASING.md
generated
vendored
Normal file
14
vendor/github.com/onsi/ginkgo/RELEASING.md
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
|
||||
|
||||
1. Ensure CHANGELOG.md is up to date.
|
||||
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
|
||||
- Categorize the changes into
|
||||
- Breaking Changes (requires a major version)
|
||||
- New Features (minor version)
|
||||
- Fixes (fix version)
|
||||
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
|
||||
1. Update `VERSION` in `config/config.go`
|
||||
1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
|
||||
1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
|
||||
1. Push the commit and tag to GitHub
|
||||
1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
|
||||
17
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
17
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.2.0"
|
||||
const VERSION = "1.6.0"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
@@ -34,6 +34,7 @@ type GinkgoConfigType struct {
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
DebugParallel bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
@@ -47,6 +48,7 @@ type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
NoisySkippings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
@@ -64,7 +66,7 @@ func processPrefix(prefix string) string {
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
@@ -80,6 +82,8 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
@@ -90,6 +94,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
@@ -139,6 +144,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DebugParallel {
|
||||
result = append(result, fmt.Sprintf("--%sdebug", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
@@ -171,6 +180,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if !reporter.NoisySkippings {
|
||||
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
8
vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
@@ -43,10 +43,10 @@ func BuildBootstrapCommand() *Command {
|
||||
var bootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
@@ -58,11 +58,11 @@ func Test{{.FormattedName}}(t *testing.T) {
|
||||
var agoutiBootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
@@ -46,7 +46,7 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
||||
|
||||
passed := true
|
||||
for _, suite := range suites {
|
||||
runner := testrunner.New(suite, 1, false, r.commandFlags.GoOpts, nil)
|
||||
runner := testrunner.New(suite, 1, false, 0, r.commandFlags.GoOpts, nil)
|
||||
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||
|
||||
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
@@ -3,8 +3,9 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||
)
|
||||
|
||||
func BuildConvertCommand() *Command {
|
||||
|
||||
12
vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
12
vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
@@ -34,10 +34,10 @@ func BuildGenerateCommand() *Command {
|
||||
var specText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
|
||||
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
|
||||
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
@@ -45,15 +45,15 @@ var _ = Describe("{{.Subject}}", func() {
|
||||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}_test
|
||||
var agoutiSpecText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
|
||||
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
"github.com/sclevine/agouti"
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
|
||||
{{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
|
||||
4
vendor/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
@@ -50,6 +50,10 @@ By default, when running multiple tests (with -r or a list of packages) Ginkgo w
|
||||
|
||||
ginkgo -keepGoing
|
||||
|
||||
To fail if there are ginkgo tests in a directory but no test suite (missing `RunSpecs`)
|
||||
|
||||
ginkgo -requireSuite
|
||||
|
||||
To monitor packages and rerun tests when changes occur:
|
||||
|
||||
ginkgo watch <-r> </path/to/package>
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
@@ -3,11 +3,12 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildNodotCommand() *Command {
|
||||
|
||||
89
vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
89
vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
@@ -5,8 +5,12 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
@@ -71,7 +75,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
|
||||
runners := []*testrunner.TestRunner{}
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.GoOpts, additionalArgs))
|
||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs))
|
||||
}
|
||||
|
||||
numSuites := 0
|
||||
@@ -104,10 +108,25 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
if r.isInCoverageMode() {
|
||||
if r.getOutputDir() != "" {
|
||||
// If coverprofile is set, combine coverages
|
||||
if r.getCoverprofile() != "" {
|
||||
if err := r.combineCoverprofiles(runners); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Just move them
|
||||
r.moveCoverprofiles(runners)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
|
||||
|
||||
if runResult.Passed {
|
||||
if runResult.HasProgrammaticFocus {
|
||||
if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
@@ -121,6 +140,70 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Moves all generated profiles to specified directory
|
||||
func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) {
|
||||
for _, runner := range runners {
|
||||
_, filename := filepath.Split(runner.CoverageFile)
|
||||
err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename))
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Combines all generated profiles in the specified directory
|
||||
func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error {
|
||||
|
||||
path, _ := filepath.Abs(r.getOutputDir())
|
||||
if !fileExists(path) {
|
||||
return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir())
|
||||
}
|
||||
|
||||
fmt.Println("path is " + path)
|
||||
|
||||
combined, err := os.OpenFile(filepath.Join(path, r.getCoverprofile()),
|
||||
os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to create combined profile, %v\n", err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
|
||||
for _, runner := range runners {
|
||||
contents, err := ioutil.ReadFile(runner.CoverageFile)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to append to coverprofile, %v\n", err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("All profiles combined")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SpecRunner) isInCoverageMode() bool {
|
||||
opts := r.commandFlags.GoOpts
|
||||
return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != ""
|
||||
}
|
||||
|
||||
func (r *SpecRunner) getCoverprofile() string {
|
||||
return *r.commandFlags.GoOpts["coverprofile"].(*string)
|
||||
}
|
||||
|
||||
func (r *SpecRunner) getOutputDir() string {
|
||||
return *r.commandFlags.GoOpts["outputdir"].(*string)
|
||||
}
|
||||
|
||||
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
@@ -171,7 +254,7 @@ func orcMessage(iteration int) string {
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Here we go again...",
|
||||
"Oh boy, here I go testin' again!",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
|
||||
9
vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
9
vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
@@ -4,6 +4,8 @@ import (
|
||||
"flag"
|
||||
"runtime"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
@@ -19,6 +21,7 @@ type RunWatchAndBuildCommandFlags struct {
|
||||
Notify bool
|
||||
AfterSuiteHook string
|
||||
AutoNodes bool
|
||||
Timeout time.Duration
|
||||
|
||||
//only for run command
|
||||
KeepGoing bool
|
||||
@@ -26,7 +29,8 @@ type RunWatchAndBuildCommandFlags struct {
|
||||
RandomizeSuites bool
|
||||
|
||||
//only for watch command
|
||||
Depth int
|
||||
Depth int
|
||||
WatchRegExp string
|
||||
|
||||
FlagSet *flag.FlagSet
|
||||
}
|
||||
@@ -135,6 +139,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
c.FlagSet.StringVar(c.stringSlot("memprofile"), "memprofile", "", "Write a memory profile to the specified file after all tests have passed.")
|
||||
c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.")
|
||||
c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)")
|
||||
|
||||
if mode == runMode || mode == watchMode {
|
||||
config.Flags(c.FlagSet, "", false)
|
||||
@@ -146,6 +151,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||
}
|
||||
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
|
||||
c.FlagSet.DurationVar(&(c.Timeout), "timeout", 24*time.Hour, "Suite fails if it does not complete within the specified timeout")
|
||||
}
|
||||
|
||||
if mode == runMode {
|
||||
@@ -156,5 +162,6 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
|
||||
if mode == watchMode {
|
||||
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
|
||||
c.FlagSet.StringVar(&(c.WatchRegExp), "watchRegExp", `\.go$`, "Files matching this regular expression will be watched for changes")
|
||||
}
|
||||
}
|
||||
|
||||
1
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/BUILD
generated
vendored
1
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/BUILD
generated
vendored
@@ -15,6 +15,7 @@ go_library(
|
||||
"//vendor/github.com/onsi/ginkgo/ginkgo/testsuite:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/internal/remote:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
156
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
156
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
@@ -29,17 +29,23 @@ type TestRunner struct {
|
||||
|
||||
numCPU int
|
||||
parallelStream bool
|
||||
timeout time.Duration
|
||||
goOpts map[string]interface{}
|
||||
additionalArgs []string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
CoverageFile string
|
||||
}
|
||||
|
||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, goOpts map[string]interface{}, additionalArgs []string) *TestRunner {
|
||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner {
|
||||
runner := &TestRunner{
|
||||
Suite: suite,
|
||||
numCPU: numCPU,
|
||||
parallelStream: parallelStream,
|
||||
goOpts: goOpts,
|
||||
additionalArgs: additionalArgs,
|
||||
timeout: timeout,
|
||||
stderr: new(bytes.Buffer),
|
||||
}
|
||||
|
||||
if !suite.Precompiled {
|
||||
@@ -60,10 +66,10 @@ func (t *TestRunner) Compile() error {
|
||||
func (t *TestRunner) BuildArgs(path string) []string {
|
||||
args := []string{"test", "-c", "-i", "-o", path, t.Suite.Path}
|
||||
|
||||
if *t.goOpts["covermode"].(*string) != "" {
|
||||
args = append(args, "-cover", fmt.Sprintf("-covermode=%s", *t.goOpts["covermode"].(*string)))
|
||||
if t.getCoverMode() != "" {
|
||||
args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode()))
|
||||
} else {
|
||||
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" {
|
||||
if t.shouldCover() || t.getCoverPackage() != "" {
|
||||
args = append(args, "-cover", "-covermode=atomic")
|
||||
}
|
||||
}
|
||||
@@ -136,13 +142,16 @@ func (t *TestRunner) CompileTo(path string) error {
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
|
||||
if len(output) > 0 {
|
||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
|
||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output)
|
||||
}
|
||||
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||
}
|
||||
|
||||
if len(output) > 0 {
|
||||
fmt.Println(string(output))
|
||||
}
|
||||
|
||||
if fileExists(path) == false {
|
||||
compiledFile := t.Suite.PackageName + ".test"
|
||||
if fileExists(compiledFile) {
|
||||
@@ -215,38 +224,6 @@ func copyFile(src, dst string) error {
|
||||
return out.Chmod(mode)
|
||||
}
|
||||
|
||||
/*
|
||||
go test -c -i spits package.test out into the cwd. there's no way to change this.
|
||||
|
||||
to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
|
||||
|
||||
unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
|
||||
|
||||
this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
|
||||
|
||||
fixCompilationOutput..... rewrites the output to fix the paths.
|
||||
|
||||
yeah......
|
||||
*/
|
||||
func fixCompilationOutput(output string, relToPath string) string {
|
||||
relToPath = filepath.Join(relToPath)
|
||||
re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
|
||||
lines := strings.Split(output, "\n")
|
||||
for i, line := range lines {
|
||||
indices := re.FindStringSubmatchIndex(line)
|
||||
if len(indices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
path := line[indices[2]:indices[3]]
|
||||
if filepath.Dir(path) != relToPath {
|
||||
path = filepath.Join(relToPath, path)
|
||||
lines[i] = path + line[indices[3]:]
|
||||
}
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (t *TestRunner) Run() RunResult {
|
||||
if t.Suite.IsGinkgo {
|
||||
if t.numCPU > 1 {
|
||||
@@ -324,7 +301,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" {
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
@@ -337,7 +314,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
reports := make([]*bytes.Buffer, t.numCPU)
|
||||
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1)
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
||||
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
@@ -390,9 +367,8 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
| |
|
||||
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||
| |
|
||||
-------------------------------------------------------------------
|
||||
`)
|
||||
|
||||
-------------------------------------------------------------------`)
|
||||
fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path)
|
||||
os.Stdout.Sync()
|
||||
|
||||
for _, writer := range writers {
|
||||
@@ -406,21 +382,40 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
os.Stdout.Sync()
|
||||
}
|
||||
|
||||
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" {
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
const CoverProfileSuffix = ".coverprofile"
|
||||
|
||||
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||
args := []string{"--test.timeout=24h"}
|
||||
if *t.goOpts["cover"].(*bool) || *t.goOpts["coverpkg"].(*string) != "" || *t.goOpts["covermode"].(*string) != "" {
|
||||
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
|
||||
if t.numCPU > 1 {
|
||||
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
|
||||
args := []string{"--test.timeout=" + t.timeout.String()}
|
||||
|
||||
coverProfile := t.getCoverProfile()
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
|
||||
testCoverProfile := "--test.coverprofile="
|
||||
|
||||
coverageFile := ""
|
||||
// Set default name for coverage results
|
||||
if coverProfile == "" {
|
||||
coverageFile = t.Suite.PackageName + CoverProfileSuffix
|
||||
} else {
|
||||
coverageFile = coverProfile
|
||||
}
|
||||
args = append(args, coverprofile)
|
||||
|
||||
testCoverProfile += coverageFile
|
||||
|
||||
t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile)
|
||||
|
||||
if t.numCPU > 1 {
|
||||
testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node)
|
||||
}
|
||||
args = append(args, testCoverProfile)
|
||||
}
|
||||
|
||||
args = append(args, ginkgoArgs...)
|
||||
@@ -434,12 +429,36 @@ func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.
|
||||
cmd := exec.Command(path, args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
cmd.Stderr = stream
|
||||
cmd.Stderr = io.MultiWriter(stream, t.stderr)
|
||||
cmd.Stdout = stream
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldCover() bool {
|
||||
return *t.goOpts["cover"].(*bool)
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldRequireSuite() bool {
|
||||
return *t.goOpts["requireSuite"].(*bool)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverProfile() string {
|
||||
return *t.goOpts["coverprofile"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverPackage() string {
|
||||
return *t.goOpts["coverpkg"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverMode() string {
|
||||
return *t.goOpts["covermode"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldCombineCoverprofiles() bool {
|
||||
return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != ""
|
||||
}
|
||||
|
||||
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||
var res RunResult
|
||||
|
||||
@@ -456,17 +475,34 @@ func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
|
||||
if strings.Contains(t.stderr.String(), "warning: no tests to run") {
|
||||
if t.shouldRequireSuite() {
|
||||
res.Passed = false
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) combineCoverprofiles() {
|
||||
profiles := []string{}
|
||||
|
||||
coverProfile := t.getCoverProfile()
|
||||
|
||||
for cpu := 1; cpu <= t.numCPU; cpu++ {
|
||||
coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
|
||||
var coverFile string
|
||||
if coverProfile == "" {
|
||||
coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu)
|
||||
} else {
|
||||
coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu)
|
||||
}
|
||||
|
||||
coverFile = filepath.Join(t.Suite.Path, coverFile)
|
||||
coverProfile, err := ioutil.ReadFile(coverFile)
|
||||
os.Remove(coverFile)
|
||||
@@ -502,5 +538,17 @@ func (t *TestRunner) combineCoverprofiles() {
|
||||
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
|
||||
}
|
||||
finalOutput := strings.Join(output, "\n")
|
||||
ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
|
||||
|
||||
finalFilename := ""
|
||||
|
||||
if coverProfile != "" {
|
||||
finalFilename = coverProfile
|
||||
} else {
|
||||
finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix)
|
||||
}
|
||||
|
||||
coverageFilepath := filepath.Join(t.Suite.Path, finalFilename)
|
||||
ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666)
|
||||
|
||||
t.CoverageFile = coverageFilepath
|
||||
}
|
||||
|
||||
8
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
@@ -53,7 +53,7 @@ func SuitesInDir(dir string, recurse bool) []TestSuite {
|
||||
}
|
||||
|
||||
files, _ := ioutil.ReadDir(dir)
|
||||
re := regexp.MustCompile(`_test\.go$`)
|
||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, New(dir, files))
|
||||
@@ -77,7 +77,11 @@ func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
|
||||
if string(dir[0]) != "." {
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
|
||||
31
vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
31
vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
@@ -3,7 +3,9 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func BuildUnfocusCommand() *Command {
|
||||
@@ -26,13 +28,34 @@ func unfocusSpecs([]string, []string) {
|
||||
unfocus("Measure")
|
||||
unfocus("DescribeTable")
|
||||
unfocus("Entry")
|
||||
unfocus("Specify")
|
||||
unfocus("When")
|
||||
}
|
||||
|
||||
func unfocus(component string) {
|
||||
fmt.Printf("Removing F%s...\n", component)
|
||||
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if string(out) != "" {
|
||||
println(string(out))
|
||||
files, err := ioutil.ReadDir(".")
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
for _, f := range files {
|
||||
// Exclude "vendor" directory
|
||||
if f.IsDir() && f.Name() == "vendor" {
|
||||
continue
|
||||
}
|
||||
// Exclude non-go files in the current directory
|
||||
if !f.IsDir() && !strings.HasSuffix(f.Name(), ".go") {
|
||||
continue
|
||||
}
|
||||
// Recursively run `gofmt` otherwise
|
||||
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", f.Name())
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
if string(out) != "" {
|
||||
fmt.Println(string(out))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1
vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
1
vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
|
||||
8
vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
@@ -3,6 +3,8 @@ package watch
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
@@ -10,14 +12,16 @@ type SuiteErrors map[testsuite.TestSuite]error
|
||||
|
||||
type DeltaTracker struct {
|
||||
maxDepth int
|
||||
watchRegExp *regexp.Regexp
|
||||
suites map[string]*Suite
|
||||
packageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewDeltaTracker(maxDepth int) *DeltaTracker {
|
||||
func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
|
||||
return &DeltaTracker{
|
||||
maxDepth: maxDepth,
|
||||
packageHashes: NewPackageHashes(),
|
||||
watchRegExp: watchRegExp,
|
||||
packageHashes: NewPackageHashes(watchRegExp),
|
||||
suites: map[string]*Suite{},
|
||||
}
|
||||
}
|
||||
|
||||
15
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
15
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var goRegExp = regexp.MustCompile(`\.go$`)
|
||||
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||
|
||||
type PackageHash struct {
|
||||
@@ -16,14 +15,16 @@ type PackageHash struct {
|
||||
TestModifiedTime time.Time
|
||||
Deleted bool
|
||||
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
watchRegExp *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewPackageHash(path string) *PackageHash {
|
||||
func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
|
||||
p := &PackageHash{
|
||||
path: path,
|
||||
path: path,
|
||||
watchRegExp: watchRegExp,
|
||||
}
|
||||
|
||||
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||
@@ -82,7 +83,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
|
||||
continue
|
||||
}
|
||||
|
||||
if goRegExp.Match([]byte(info.Name())) {
|
||||
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
|
||||
7
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
@@ -2,19 +2,22 @@ package watch
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PackageHashes struct {
|
||||
PackageHashes map[string]*PackageHash
|
||||
usedPaths map[string]bool
|
||||
watchRegExp *regexp.Regexp
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewPackageHashes() *PackageHashes {
|
||||
func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
|
||||
return &PackageHashes{
|
||||
PackageHashes: map[string]*PackageHash{},
|
||||
usedPaths: nil,
|
||||
watchRegExp: watchRegExp,
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
@@ -41,7 +44,7 @@ func (p *PackageHashes) Add(path string) *PackageHash {
|
||||
path, _ = filepath.Abs(path)
|
||||
_, ok := p.PackageHashes[path]
|
||||
if !ok {
|
||||
p.PackageHashes[path] = NewPackageHash(path)
|
||||
p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
|
||||
}
|
||||
|
||||
if p.usedPaths != nil {
|
||||
|
||||
5
vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
5
vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
@@ -58,7 +59,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA
|
||||
runners := []*testrunner.TestRunner{}
|
||||
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.GoOpts, additionalArgs))
|
||||
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs))
|
||||
}
|
||||
|
||||
return runners
|
||||
@@ -72,7 +73,7 @@ func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
|
||||
}
|
||||
|
||||
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
|
||||
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
|
||||
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth, regexp.MustCompile(w.commandFlags.WatchRegExp))
|
||||
delta, errors := deltaTracker.Delta(suites)
|
||||
|
||||
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
|
||||
68
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
68
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
@@ -149,7 +150,8 @@ type GinkgoTestDescription struct {
|
||||
FileName string
|
||||
LineNumber int
|
||||
|
||||
Failed bool
|
||||
Failed bool
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||
@@ -169,6 +171,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||
FileName: subjectCodeLocation.FileName,
|
||||
LineNumber: subjectCodeLocation.LineNumber,
|
||||
Failed: summary.HasFailureState(),
|
||||
Duration: summary.RunTime,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,7 +205,7 @@ func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||
//RunSpecs() with this method.
|
||||
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
|
||||
specReporters = append(specReporters, buildDefaultReporter())
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
@@ -216,7 +219,7 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor
|
||||
reporters[i] = reporter
|
||||
}
|
||||
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||
if passed && hasFocusedTests {
|
||||
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Println("PASS | FOCUSED")
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
}
|
||||
@@ -226,14 +229,18 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor
|
||||
func buildDefaultReporter() Reporter {
|
||||
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||
if remoteReportingServer == "" {
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1)
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
||||
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||
} else {
|
||||
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
|
||||
debugFile := ""
|
||||
if config.GinkgoConfig.DebugParallel {
|
||||
debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
|
||||
}
|
||||
return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
|
||||
}
|
||||
}
|
||||
|
||||
//Skip notifies Ginkgo that the current spec should be skipped.
|
||||
//Skip notifies Ginkgo that the current spec was skipped.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
@@ -275,9 +282,9 @@ func GinkgoRecover() {
|
||||
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||
func Describe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
@@ -304,9 +311,9 @@ func XDescribe(text string, body func()) bool {
|
||||
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||
func Context(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
@@ -330,6 +337,35 @@ func XContext(text string, body func()) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
//When blocks allow you to organize your specs. A When block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||
func When(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FWhen
|
||||
func FWhen(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PWhen
|
||||
func PWhen(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XWhen
|
||||
func XWhen(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||
//within an It block.
|
||||
//
|
||||
@@ -362,22 +398,26 @@ func XIt(text string, _ ...interface{}) bool {
|
||||
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
|
||||
//which apply to It blocks.
|
||||
func Specify(text string, body interface{}, timeout ...float64) bool {
|
||||
return It(text, body, timeout...)
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Specifys using FSpecify
|
||||
func FSpecify(text string, body interface{}, timeout ...float64) bool {
|
||||
return FIt(text, body, timeout...)
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Specifys as pending using PSpecify
|
||||
func PSpecify(text string, is ...interface{}) bool {
|
||||
return PIt(text, is...)
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Specifys as pending using XSpecify
|
||||
func XSpecify(text string, is ...interface{}) bool {
|
||||
return XIt(text, is...)
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//By allows you to better document large Its.
|
||||
|
||||
4
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
4
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
@@ -35,15 +35,15 @@ func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elaps
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
@@ -1,9 +1,10 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
@@ -1,9 +1,10 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
|
||||
8
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
@@ -2,11 +2,12 @@ package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
@@ -86,6 +87,9 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
// If this goroutine gets no CPU time before the select block,
|
||||
// the <-done case may complete even if the test took longer than the timeoutThreshold.
|
||||
// This can cause flaky behaviour, but we haven't seen it in the wild.
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
@@ -1,9 +1,10 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
@@ -1,9 +1,10 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
|
||||
5
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
5
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
@@ -2,11 +2,12 @@ package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
|
||||
7
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
@@ -3,12 +3,13 @@ package leafnodes
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
@@ -109,8 +110,6 @@ func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecSt
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
return types.SpecStateFailed, failure("Shouldn't get here!")
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
|
||||
20
vendor/github.com/onsi/ginkgo/internal/remote/BUILD
generated
vendored
20
vendor/github.com/onsi/ginkgo/internal/remote/BUILD
generated
vendored
@@ -19,11 +19,31 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/internal/spec_iterator:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/internal/writer:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters/stenographer:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/types:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//vendor/github.com/hpcloud/tail:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
@@ -207,7 +207,7 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
|
||||
61
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
61
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
@@ -3,8 +3,14 @@ package remote
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
@@ -30,14 +36,41 @@ type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
debugMode bool
|
||||
debugFile *os.File
|
||||
nestedReporter *reporters.DefaultReporter
|
||||
}
|
||||
|
||||
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
|
||||
return &ForwardingReporter{
|
||||
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
|
||||
reporter := &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
|
||||
if debugFile != "" {
|
||||
var err error
|
||||
reporter.debugMode = true
|
||||
reporter.debugFile, err = os.Create(debugFile)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !config.Verbose {
|
||||
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
|
||||
ginkgoWriter.AndRedirectTo(reporter.debugFile)
|
||||
}
|
||||
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
|
||||
|
||||
stenographer := stenographer.New(false, true, reporter.debugFile)
|
||||
config.Succinct = false
|
||||
config.Verbose = true
|
||||
config.FullTrace = true
|
||||
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
|
||||
}
|
||||
|
||||
return reporter
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
@@ -56,6 +89,10 @@ func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigT
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
@@ -63,10 +100,18 @@ func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupS
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecWillRun(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
@@ -74,6 +119,10 @@ func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSumma
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecDidComplete(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
@@ -81,10 +130,18 @@ func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSu
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteDidEnd(summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
@@ -1,5 +1,7 @@
|
||||
package remote
|
||||
|
||||
import "os"
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
@@ -7,4 +9,5 @@ intercept and capture all stdin and stderr output during a test run.
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
StreamTo(*os.File)
|
||||
}
|
||||
|
||||
28
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
28
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
@@ -6,6 +6,8 @@ import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/hpcloud/tail"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
@@ -14,7 +16,10 @@ func NewOutputInterceptor() OutputInterceptor {
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
streamTarget *os.File
|
||||
intercepting bool
|
||||
tailer *tail.Tail
|
||||
doneTailing chan bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
@@ -37,6 +42,18 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
syscallDup(int(interceptor.redirectFile.Fd()), 1)
|
||||
syscallDup(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
|
||||
interceptor.doneTailing = make(chan bool)
|
||||
|
||||
go func() {
|
||||
for line := range interceptor.tailer.Lines {
|
||||
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
|
||||
}
|
||||
close(interceptor.doneTailing)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -51,5 +68,16 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer.Stop()
|
||||
interceptor.tailer.Cleanup()
|
||||
<-interceptor.doneTailing
|
||||
interceptor.streamTarget.Sync()
|
||||
}
|
||||
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
|
||||
interceptor.streamTarget = out
|
||||
}
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
@@ -4,6 +4,7 @@ package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
@@ -31,3 +32,5 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(*os.File) {}
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
@@ -45,7 +45,7 @@ func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
@@ -8,4 +8,4 @@ import "syscall"
|
||||
// use the nearly identical syscall.Dup3 instead
|
||||
func syscallDup(oldfd int, newfd int) (err error) {
|
||||
return syscall.Dup3(oldfd, newfd, 0)
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
generated
vendored
@@ -6,4 +6,4 @@ import "golang.org/x/sys/unix"
|
||||
|
||||
func syscallDup(oldfd int, newfd int) (err error) {
|
||||
return unix.Dup2(oldfd, newfd)
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
generated
vendored
@@ -8,4 +8,4 @@ import "syscall"
|
||||
|
||||
func syscallDup(oldfd int, newfd int) (err error) {
|
||||
return syscall.Dup2(oldfd, newfd)
|
||||
}
|
||||
}
|
||||
|
||||
71
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
71
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
@@ -5,6 +5,8 @@ import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
@@ -19,8 +21,11 @@ type Spec struct {
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
startTime time.Time
|
||||
failure types.SpecFailure
|
||||
previousFailures bool
|
||||
|
||||
stateMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
@@ -29,6 +34,7 @@ func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNod
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
stateMutex: &sync.Mutex{},
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
@@ -43,32 +49,32 @@ func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.state = types.SpecStatePending
|
||||
spec.setState(types.SpecStatePending)
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.state = types.SpecStateSkipped
|
||||
spec.setState(types.SpecStateSkipped)
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
|
||||
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.state == types.SpecStatePassed
|
||||
return spec.getState() == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Flaked() bool {
|
||||
return spec.state == types.SpecStatePassed && spec.previousFailures
|
||||
return spec.getState() == types.SpecStatePassed && spec.previousFailures
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.state == types.SpecStatePending
|
||||
return spec.getState() == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.state == types.SpecStateSkipped
|
||||
return spec.getState() == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
@@ -91,13 +97,18 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
runTime := spec.runTime
|
||||
if runTime == 0 && !spec.startTime.IsZero() {
|
||||
runTime = time.Since(spec.startTime)
|
||||
}
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.state,
|
||||
RunTime: spec.runTime,
|
||||
State: spec.getState(),
|
||||
RunTime: runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
@@ -114,26 +125,38 @@ func (spec *Spec) ConcatenatedString() string {
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
if spec.state == types.SpecStateFailed {
|
||||
if spec.getState() == types.SpecStateFailed {
|
||||
spec.previousFailures = true
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
spec.startTime = time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(startTime)
|
||||
spec.runTime = time.Since(spec.startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.state != types.SpecStatePassed {
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) getState() types.SpecState {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
return spec.state
|
||||
}
|
||||
|
||||
func (spec *Spec) setState(state types.SpecState) {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
spec.state = state
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.state = types.SpecStatePassed
|
||||
spec.setState(types.SpecStatePassed)
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
@@ -143,8 +166,8 @@ func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = afterEachState
|
||||
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
|
||||
spec.setState(afterEachState)
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
@@ -155,8 +178,10 @@ func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
spec.state, spec.failure = beforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
s, f := beforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -165,15 +190,19 @@ func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
spec.state, spec.failure = justBeforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
s, f := justBeforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
spec.state, spec.failure = spec.subject.Run()
|
||||
s, f := spec.subject.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
|
||||
3
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
3
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
@@ -2,7 +2,6 @@ package spec_iterator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
@@ -31,7 +30,7 @@ func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode))
|
||||
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
|
||||
7
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
@@ -56,7 +56,9 @@ func (runner *SpecRunner) Run() bool {
|
||||
}
|
||||
|
||||
runner.reportSuiteWillBegin()
|
||||
go runner.registerForInterrupts()
|
||||
signalRegistered := make(chan struct{})
|
||||
go runner.registerForInterrupts(signalRegistered)
|
||||
<-signalRegistered
|
||||
|
||||
suitePassed := runner.runBeforeSuite()
|
||||
|
||||
@@ -213,9 +215,10 @@ func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||
return runner.runningSpec.Summary(runner.suiteID), true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForInterrupts() {
|
||||
func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
close(signalRegistered)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
|
||||
10
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
10
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
@@ -149,35 +149,35 @@ func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagT
|
||||
|
||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
|
||||
suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
|
||||
suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
16
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
16
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
@@ -16,10 +16,11 @@ type WriterInterface interface {
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
redirector io.Writer
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
@@ -31,6 +32,10 @@ func New(outWriter io.Writer) *Writer {
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) AndRedirectTo(writer io.Writer) {
|
||||
w.redirector = writer
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
@@ -42,6 +47,9 @@ func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
defer w.lock.Unlock()
|
||||
|
||||
n, err = w.buffer.Write(b)
|
||||
if w.redirector != nil {
|
||||
w.redirector.Write(b)
|
||||
}
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
}
|
||||
|
||||
2
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
2
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
@@ -66,7 +66,7 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
|
||||
case types.SpecStatePending:
|
||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
|
||||
7
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
@@ -11,6 +11,7 @@ package reporters
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -21,8 +22,10 @@ import (
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
Name string `xml:"name,attr"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Errors int `xml:"errors,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
@@ -59,6 +62,7 @@ func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Name: summary.SuiteDescription,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
@@ -117,8 +121,9 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
|
||||
reporter.suite.Time = summary.RunTime.Seconds()
|
||||
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds() * 1000 / 1000)
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
reporter.suite.Errors = 0
|
||||
file, err := os.Create(reporter.filename)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
|
||||
|
||||
5
vendor/github.com/onsi/ginkgo/reporters/stenographer/BUILD
generated
vendored
5
vendor/github.com/onsi/ginkgo/reporters/stenographer/BUILD
generated
vendored
@@ -10,10 +10,7 @@ go_library(
|
||||
importmap = "k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/reporters/stenographer",
|
||||
importpath = "github.com/onsi/ginkgo/reporters/stenographer",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/types:go_default_library",
|
||||
],
|
||||
deps = ["//vendor/github.com/onsi/ginkgo/types:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
||||
7
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
@@ -12,7 +12,6 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
@@ -62,7 +61,7 @@ type Stenographer interface {
|
||||
SummarizeFailures(summaries []*types.SpecSummary)
|
||||
}
|
||||
|
||||
func New(color bool, enableFlakes bool) Stenographer {
|
||||
func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
|
||||
denoter := "•"
|
||||
if runtime.GOOS == "windows" {
|
||||
denoter = "+"
|
||||
@@ -72,7 +71,7 @@ func New(color bool, enableFlakes bool) Stenographer {
|
||||
denoter: denoter,
|
||||
cursorState: cursorStateTop,
|
||||
enableFlakes: enableFlakes,
|
||||
w: colorable.NewColorableStdout(),
|
||||
w: writer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,7 +177,7 @@ func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSumm
|
||||
}
|
||||
|
||||
s.print(0,
|
||||
"%s -- %s | %s | %s | %s ",
|
||||
"%s -- %s | %s | %s | %s\n",
|
||||
status,
|
||||
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
|
||||
|
||||
5
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
5
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
@@ -10,10 +10,11 @@ package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
Reference in New Issue
Block a user