mirror of
https://github.com/DNSCrypt/dnscrypt-proxy.git
synced 2025-04-06 14:47:35 +03:00
Add suport for DoH over HTTP/3
This commit is contained in:
parent
91388b148c
commit
5977de660b
429 changed files with 87237 additions and 7 deletions
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
232
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
232
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
|
||||
|
||||
You can also learn more via
|
||||
|
||||
ginkgo help
|
||||
|
||||
or (I kid you not):
|
||||
|
||||
go test -asdf
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.16.4"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
RegexScansFilePath bool
|
||||
FocusStrings []string
|
||||
SkipStrings []string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
FlakeAttempts int
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
DebugParallel bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
var GinkgoConfig = GinkgoConfigType{}
|
||||
|
||||
type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
NoisySkippings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
ReportPassed bool
|
||||
ReportFile string
|
||||
}
|
||||
|
||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||
|
||||
func processPrefix(prefix string) string {
|
||||
if prefix != "" {
|
||||
prefix += "."
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
type flagFunc func(string)
|
||||
|
||||
func (f flagFunc) String() string { return "" }
|
||||
func (f flagFunc) Set(s string) error { f(s); return nil }
|
||||
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||
|
||||
flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.")
|
||||
flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
|
||||
|
||||
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||
}
|
||||
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
|
||||
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
|
||||
|
||||
}
|
||||
|
||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||
prefix = processPrefix(prefix)
|
||||
result := make([]string, 0)
|
||||
|
||||
if ginkgo.RandomSeed > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||
}
|
||||
|
||||
if ginkgo.RandomizeAllSpecs {
|
||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.SkipMeasurements {
|
||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailOnPending {
|
||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailFast {
|
||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DryRun {
|
||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||
}
|
||||
|
||||
for _, s := range ginkgo.FocusStrings {
|
||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s))
|
||||
}
|
||||
|
||||
for _, s := range ginkgo.SkipStrings {
|
||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s))
|
||||
}
|
||||
|
||||
if ginkgo.FlakeAttempts > 1 {
|
||||
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
|
||||
}
|
||||
|
||||
if ginkgo.EmitSpecProgress {
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DebugParallel {
|
||||
result = append(result, fmt.Sprintf("--%sdebug", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelTotal != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||
}
|
||||
|
||||
if ginkgo.StreamHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||
}
|
||||
|
||||
if ginkgo.SyncHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||
}
|
||||
|
||||
if ginkgo.RegexScansFilePath {
|
||||
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
|
||||
}
|
||||
|
||||
if reporter.NoColor {
|
||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||
}
|
||||
|
||||
if reporter.SlowSpecThreshold > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||
}
|
||||
|
||||
if !reporter.NoisyPendings {
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if !reporter.NoisySkippings {
|
||||
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
if reporter.Succinct {
|
||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||
}
|
||||
|
||||
if reporter.FullTrace {
|
||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||
}
|
||||
|
||||
if reporter.ReportPassed {
|
||||
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
|
||||
}
|
||||
|
||||
if reporter.ReportFile != "" {
|
||||
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// flagFocus implements the -focus flag.
|
||||
func flagFocus(arg string) {
|
||||
if arg != "" {
|
||||
GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg)
|
||||
}
|
||||
}
|
||||
|
||||
// flagSkip implements the -skip flag.
|
||||
func flagSkip(arg string) {
|
||||
if arg != "" {
|
||||
GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg)
|
||||
}
|
||||
}
|
190
vendor/github.com/onsi/ginkgo/formatter/formatter.go
generated
vendored
Normal file
190
vendor/github.com/onsi/ginkgo/formatter/formatter.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
|||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const COLS = 80
|
||||
|
||||
type ColorMode uint8
|
||||
|
||||
const (
|
||||
ColorModeNone ColorMode = iota
|
||||
ColorModeTerminal
|
||||
ColorModePassthrough
|
||||
)
|
||||
|
||||
var SingletonFormatter = New(ColorModeTerminal)
|
||||
|
||||
func F(format string, args ...interface{}) string {
|
||||
return SingletonFormatter.F(format, args...)
|
||||
}
|
||||
|
||||
func Fi(indentation uint, format string, args ...interface{}) string {
|
||||
return SingletonFormatter.Fi(indentation, format, args...)
|
||||
}
|
||||
|
||||
func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||
return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
|
||||
}
|
||||
|
||||
type Formatter struct {
|
||||
ColorMode ColorMode
|
||||
colors map[string]string
|
||||
styleRe *regexp.Regexp
|
||||
preserveColorStylingTags bool
|
||||
}
|
||||
|
||||
func NewWithNoColorBool(noColor bool) Formatter {
|
||||
if noColor {
|
||||
return New(ColorModeNone)
|
||||
}
|
||||
return New(ColorModeTerminal)
|
||||
}
|
||||
|
||||
func New(colorMode ColorMode) Formatter {
|
||||
f := Formatter{
|
||||
ColorMode: colorMode,
|
||||
colors: map[string]string{
|
||||
"/": "\x1b[0m",
|
||||
"bold": "\x1b[1m",
|
||||
"underline": "\x1b[4m",
|
||||
|
||||
"red": "\x1b[38;5;9m",
|
||||
"orange": "\x1b[38;5;214m",
|
||||
"coral": "\x1b[38;5;204m",
|
||||
"magenta": "\x1b[38;5;13m",
|
||||
"green": "\x1b[38;5;10m",
|
||||
"dark-green": "\x1b[38;5;28m",
|
||||
"yellow": "\x1b[38;5;11m",
|
||||
"light-yellow": "\x1b[38;5;228m",
|
||||
"cyan": "\x1b[38;5;14m",
|
||||
"gray": "\x1b[38;5;243m",
|
||||
"light-gray": "\x1b[38;5;246m",
|
||||
"blue": "\x1b[38;5;12m",
|
||||
},
|
||||
}
|
||||
colors := []string{}
|
||||
for color := range f.colors {
|
||||
colors = append(colors, color)
|
||||
}
|
||||
f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
|
||||
return f
|
||||
}
|
||||
|
||||
func (f Formatter) F(format string, args ...interface{}) string {
|
||||
return f.Fi(0, format, args...)
|
||||
}
|
||||
|
||||
func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
|
||||
return f.Fiw(indentation, 0, format, args...)
|
||||
}
|
||||
|
||||
func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
|
||||
out := fmt.Sprintf(f.style(format), args...)
|
||||
|
||||
if indentation == 0 && maxWidth == 0 {
|
||||
return out
|
||||
}
|
||||
|
||||
lines := strings.Split(out, "\n")
|
||||
|
||||
if maxWidth != 0 {
|
||||
outLines := []string{}
|
||||
|
||||
maxWidth = maxWidth - indentation*2
|
||||
for _, line := range lines {
|
||||
if f.length(line) <= maxWidth {
|
||||
outLines = append(outLines, line)
|
||||
continue
|
||||
}
|
||||
outWords := []string{}
|
||||
length := uint(0)
|
||||
words := strings.Split(line, " ")
|
||||
for _, word := range words {
|
||||
wordLength := f.length(word)
|
||||
if length+wordLength <= maxWidth {
|
||||
length += wordLength
|
||||
outWords = append(outWords, word)
|
||||
continue
|
||||
}
|
||||
outLines = append(outLines, strings.Join(outWords, " "))
|
||||
outWords = []string{word}
|
||||
length = wordLength
|
||||
}
|
||||
if len(outWords) > 0 {
|
||||
outLines = append(outLines, strings.Join(outWords, " "))
|
||||
}
|
||||
}
|
||||
|
||||
lines = outLines
|
||||
}
|
||||
|
||||
if indentation == 0 {
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
padding := strings.Repeat(" ", int(indentation))
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = padding + lines[i]
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (f Formatter) length(styled string) uint {
|
||||
n := uint(0)
|
||||
inStyle := false
|
||||
for _, b := range styled {
|
||||
if inStyle {
|
||||
if b == 'm' {
|
||||
inStyle = false
|
||||
}
|
||||
continue
|
||||
}
|
||||
if b == '\x1b' {
|
||||
inStyle = true
|
||||
continue
|
||||
}
|
||||
n += 1
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
|
||||
if len(elements) == 0 {
|
||||
return ""
|
||||
}
|
||||
n := len(cycle)
|
||||
out := ""
|
||||
for i, text := range elements {
|
||||
out += cycle[i%n] + text
|
||||
if i < len(elements)-1 {
|
||||
out += joiner
|
||||
}
|
||||
}
|
||||
out += "{{/}}"
|
||||
return f.style(out)
|
||||
}
|
||||
|
||||
func (f Formatter) style(s string) string {
|
||||
switch f.ColorMode {
|
||||
case ColorModeNone:
|
||||
return f.styleRe.ReplaceAllString(s, "")
|
||||
case ColorModePassthrough:
|
||||
return s
|
||||
case ColorModeTerminal:
|
||||
return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
|
||||
if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
|
||||
return out
|
||||
}
|
||||
return match
|
||||
})
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
200
vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
Normal file
200
vendor/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"go/build"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildBootstrapCommand() *Command {
|
||||
var (
|
||||
agouti, noDot, internal bool
|
||||
customBootstrapFile string
|
||||
)
|
||||
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
|
||||
flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name")
|
||||
flagSet.StringVar(&customBootstrapFile, "template", "", "If specified, generate will use the contents of the file passed as the bootstrap template")
|
||||
|
||||
return &Command{
|
||||
Name: "bootstrap",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo bootstrap <FLAGS>",
|
||||
Usage: []string{
|
||||
"Bootstrap a test suite for the current package",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateBootstrap(agouti, noDot, internal, customBootstrapFile)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var bootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
`
|
||||
|
||||
var agoutiBootstrapText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
|
||||
var agoutiDriver *agouti.WebDriver
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
// Choose a WebDriver:
|
||||
|
||||
agoutiDriver = agouti.PhantomJS()
|
||||
// agoutiDriver = agouti.Selenium()
|
||||
// agoutiDriver = agouti.ChromeDriver()
|
||||
|
||||
Expect(agoutiDriver.Start()).To(Succeed())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
Expect(agoutiDriver.Stop()).To(Succeed())
|
||||
})
|
||||
`
|
||||
|
||||
type bootstrapData struct {
|
||||
Package string
|
||||
FormattedName string
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not get current working directory: \n" + err.Error())
|
||||
}
|
||||
|
||||
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
|
||||
dirName = strings.Replace(dirName, " ", "_", -1)
|
||||
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
packageName := pkg.Name
|
||||
if err != nil {
|
||||
packageName = dirName
|
||||
}
|
||||
|
||||
formattedName := prettifyPackageName(filepath.Base(path))
|
||||
return packageName, dirName, formattedName
|
||||
}
|
||||
|
||||
func prettifyPackageName(name string) string {
|
||||
name = strings.Replace(name, "-", " ", -1)
|
||||
name = strings.Replace(name, "_", " ", -1)
|
||||
name = strings.Title(name)
|
||||
name = strings.Replace(name, " ", "", -1)
|
||||
return name
|
||||
}
|
||||
|
||||
func determinePackageName(name string, internal bool) string {
|
||||
if internal {
|
||||
return name
|
||||
}
|
||||
|
||||
return name + "_test"
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func generateBootstrap(agouti, noDot, internal bool, customBootstrapFile string) {
|
||||
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
data := bootstrapData{
|
||||
Package: determinePackageName(packageName, internal),
|
||||
FormattedName: formattedName,
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
}
|
||||
|
||||
if noDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
fmt.Printf("%s already exists.\n\n", targetFile)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not create file: " + err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if customBootstrapFile != "" {
|
||||
tpl, err := ioutil.ReadFile(customBootstrapFile)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
templateText = string(tpl)
|
||||
} else if agouti {
|
||||
templateText = agoutiBootstrapText
|
||||
} else {
|
||||
templateText = bootstrapText
|
||||
}
|
||||
|
||||
bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
bootstrapTemplate.Execute(buf, data)
|
||||
|
||||
if noDot {
|
||||
contents, err := nodot.ApplyNoDot(buf.Bytes())
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to import nodot declarations: " + err.Error())
|
||||
}
|
||||
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
|
||||
buf = bytes.NewBuffer(contents)
|
||||
}
|
||||
|
||||
buf.WriteTo(f)
|
||||
|
||||
goFmt(targetFile)
|
||||
}
|
66
vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
Normal file
66
vendor/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
)
|
||||
|
||||
func BuildBuildCommand() *Command {
|
||||
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
builder := &SpecBuilder{
|
||||
commandFlags: commandFlags,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "build",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
|
||||
Usage: []string{
|
||||
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: builder.BuildSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecBuilder struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
|
||||
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
passed := true
|
||||
for _, suite := range suites {
|
||||
runner := testrunner.New(suite, 1, false, 0, r.commandFlags.GoOpts, nil)
|
||||
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||
|
||||
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||
err := runner.CompileTo(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
passed = false
|
||||
} else {
|
||||
fmt.Printf(" compiled %s.test\n", suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
if passed {
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
123
vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
generated
vendored
Normal file
123
vendor/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
/*
|
||||
* Creates a func init() node
|
||||
*/
|
||||
func createVarUnderscoreBlock() *ast.ValueSpec {
|
||||
valueSpec := &ast.ValueSpec{}
|
||||
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
|
||||
ident := &ast.Ident{Name: "_", Obj: object}
|
||||
valueSpec.Names = append(valueSpec.Names, ident)
|
||||
return valueSpec
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates a Describe("Testing with ginkgo", func() { }) node
|
||||
*/
|
||||
func createDescribeBlock() *ast.CallExpr {
|
||||
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
|
||||
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
|
||||
describeIdent := &ast.Ident{Name: "Describe"}
|
||||
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the name of the *testing.T param
|
||||
* for a Test function that will be rewritten. This is useful because
|
||||
* we will want to replace the usage of this named *testing.T inside the
|
||||
* body of the function with a GinktoT.
|
||||
*/
|
||||
func namedTestingTArg(node *ast.FuncDecl) string {
|
||||
return node.Type.Params.List[0].Names[0].Name // *exhale*
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the block statement node for a Describe statement
|
||||
*/
|
||||
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
|
||||
var funcLit *ast.FuncLit
|
||||
var found = false
|
||||
|
||||
for _, node := range desc.Args {
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncLit:
|
||||
found = true
|
||||
funcLit = node
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
|
||||
}
|
||||
|
||||
return funcLit.Body
|
||||
}
|
||||
|
||||
/* convenience function for creating an It("TestNameHere")
|
||||
* with all the body of the test function inside the anonymous
|
||||
* func passed to It()
|
||||
*/
|
||||
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
|
||||
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
|
||||
testName := rewriteTestName(testFunc.Name.Name)
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
|
||||
itBlockIdent := &ast.Ident{Name: "It"}
|
||||
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
return &ast.ExprStmt{X: callExpr}
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite test names to be human readable
|
||||
* eg: rewrites "TestSomethingAmazing" as "something amazing"
|
||||
*/
|
||||
func rewriteTestName(testName string) string {
|
||||
nameComponents := []string{}
|
||||
currentString := ""
|
||||
indexOfTest := strings.Index(testName, "Test")
|
||||
if indexOfTest != 0 {
|
||||
return testName
|
||||
}
|
||||
|
||||
testName = strings.Replace(testName, "Test", "", 1)
|
||||
first, rest := testName[0], testName[1:]
|
||||
testName = string(unicode.ToLower(rune(first))) + rest
|
||||
|
||||
for _, rune := range testName {
|
||||
if unicode.IsUpper(rune) {
|
||||
nameComponents = append(nameComponents, currentString)
|
||||
currentString = string(unicode.ToLower(rune))
|
||||
} else {
|
||||
currentString += string(rune)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(append(nameComponents, currentString), " ")
|
||||
}
|
||||
|
||||
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
|
||||
return &ast.CallExpr{
|
||||
Lparen: ident.NamePos + 1,
|
||||
Rparen: ident.NamePos + 2,
|
||||
Fun: &ast.Ident{Name: "GinkgoT"},
|
||||
}
|
||||
}
|
||||
|
||||
func newGinkgoTInterface() *ast.Ident {
|
||||
return &ast.Ident{Name: "GinkgoTInterface"}
|
||||
}
|
90
vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given the root node of an AST, returns the node containing the
|
||||
* import statements for the file.
|
||||
*/
|
||||
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
|
||||
for _, declaration := range rootNode.Decls {
|
||||
decl, ok := declaration.(*ast.GenDecl)
|
||||
if !ok || len(decl.Specs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok = decl.Specs[0].(*ast.ImportSpec)
|
||||
if ok {
|
||||
imports = decl
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = fmt.Errorf("Could not find imports for root node:\n\t%#v\n", rootNode)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes "testing" import, if present
|
||||
*/
|
||||
func removeTestingImport(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
var index int
|
||||
for i, importSpec := range importDecl.Specs {
|
||||
importSpec := importSpec.(*ast.ImportSpec)
|
||||
if importSpec.Path.Value == "\"testing\"" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds import statements for onsi/ginkgo, if missing
|
||||
*/
|
||||
func addGinkgoImports(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
if len(importDecl.Specs) == 0 {
|
||||
// TODO: might need to create a import decl here
|
||||
panic("unimplemented : expected to find an imports block")
|
||||
}
|
||||
|
||||
needsGinkgo := true
|
||||
for _, importSpec := range importDecl.Specs {
|
||||
importSpec, ok := importSpec.(*ast.ImportSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
|
||||
needsGinkgo = false
|
||||
}
|
||||
}
|
||||
|
||||
if needsGinkgo {
|
||||
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function to create an import statement
|
||||
*/
|
||||
func createImport(name, path string) *ast.ImportSpec {
|
||||
return &ast.ImportSpec{
|
||||
Name: &ast.Ident{Name: name},
|
||||
Path: &ast.BasicLit{Kind: 9, Value: path},
|
||||
}
|
||||
}
|
128
vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
generated
vendored
Normal file
128
vendor/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
|
||||
* Go's build package, and then rewrites them. A ginkgo test suite file will
|
||||
* also be added for this package, and all of its child packages.
|
||||
*/
|
||||
func RewritePackage(packageName string) {
|
||||
pkg, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
for _, filename := range findTestsInPackage(pkg) {
|
||||
rewriteTestsInFile(filename)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a package, findTestsInPackage reads the test files in the directory,
|
||||
* and then recurses on each child package, returning a slice of all test files
|
||||
* found in this process.
|
||||
*/
|
||||
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
|
||||
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
|
||||
testfile, _ := filepath.Abs(filepath.Join(pkg.Dir, file))
|
||||
testfiles = append(testfiles, testfile)
|
||||
}
|
||||
|
||||
dirFiles, err := ioutil.ReadDir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`^[._]`)
|
||||
|
||||
for _, file := range dirFiles {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if re.Match([]byte(file.Name())) {
|
||||
continue
|
||||
}
|
||||
|
||||
packageName := filepath.Join(pkg.ImportPath, file.Name())
|
||||
subPackage, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
|
||||
}
|
||||
|
||||
addGinkgoSuiteForPackage(pkg)
|
||||
goFmtPackage(pkg)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `ginkgo bootstrap` to create a test suite file
|
||||
*/
|
||||
func addGinkgoSuiteForPackage(pkg *build.Package) {
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
|
||||
|
||||
_, err = os.Stat(suite_test_file)
|
||||
if err == nil {
|
||||
return // test file already exists, this should be a no-op
|
||||
}
|
||||
|
||||
err = os.Chdir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
output, err := exec.Command("ginkgo", "bootstrap").Output()
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
|
||||
}
|
||||
|
||||
err = os.Chdir(originalDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `go fmt` to format the package
|
||||
*/
|
||||
func goFmtPackage(pkg *build.Package) {
|
||||
path, _ := filepath.Abs(pkg.ImportPath)
|
||||
output, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", path, output, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempts to return a package with its test files already read.
|
||||
* The ImportMode arg to build.Import lets you specify if you want go to read the
|
||||
* buildable go files inside the package, but it fails if the package has no go files
|
||||
*/
|
||||
func packageWithName(name string) (pkg *build.Package, err error) {
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
|
||||
return
|
||||
}
|
56
vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
generated
vendored
Normal file
56
vendor/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a root node, walks its top level statements and returns
|
||||
* points to function nodes to rewrite as It statements.
|
||||
* These functions, according to Go testing convention, must be named
|
||||
* TestWithCamelCasedName and receive a single *testing.T argument.
|
||||
*/
|
||||
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
|
||||
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
|
||||
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
matches := testNameRegexp.MatchString(node.Name.Name)
|
||||
|
||||
if matches && receivesTestingT(node) {
|
||||
testsToRewrite = append(testsToRewrite, node)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function that looks at args to a function and determines if its
|
||||
* params include an argument of type *testing.T
|
||||
*/
|
||||
func receivesTestingT(node *ast.FuncDecl) bool {
|
||||
if len(node.Type.Params.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
intermediate := base.X.(*ast.SelectorExpr)
|
||||
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
|
||||
isTestingT := intermediate.Sel.Name == "T"
|
||||
|
||||
return isTestingPackage && isTestingT
|
||||
}
|
162
vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
Normal file
162
vendor/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a file path, rewrites any tests in the Ginkgo format.
|
||||
* First, we parse the AST, and update the imports declaration.
|
||||
* Then, we walk the first child elements in the file, returning tests to rewrite.
|
||||
* A top level init func is declared, with a single Describe func inside.
|
||||
* Then the test functions to rewrite are inserted as It statements inside the Describe.
|
||||
* Finally we walk the rest of the file, replacing other usages of *testing.T
|
||||
* Once that is complete, we write the AST back out again to its file.
|
||||
*/
|
||||
func rewriteTestsInFile(pathToFile string) {
|
||||
fileSet := token.NewFileSet()
|
||||
rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
|
||||
}
|
||||
|
||||
addGinkgoImports(rootNode)
|
||||
removeTestingImport(rootNode)
|
||||
|
||||
varUnderscoreBlock := createVarUnderscoreBlock()
|
||||
describeBlock := createDescribeBlock()
|
||||
varUnderscoreBlock.Values = []ast.Expr{describeBlock}
|
||||
|
||||
for _, testFunc := range findTestFuncs(rootNode) {
|
||||
rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
|
||||
}
|
||||
|
||||
underscoreDecl := &ast.GenDecl{
|
||||
Tok: 85, // gah, magick numbers are needed to make this work
|
||||
TokPos: 14, // this tricks Go into writing "var _ = Describe"
|
||||
Specs: []ast.Spec{varUnderscoreBlock},
|
||||
}
|
||||
|
||||
imports := rootNode.Decls[0]
|
||||
tail := rootNode.Decls[1:]
|
||||
rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
|
||||
rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
|
||||
walkNodesInRootNodeReplacingTestingT(rootNode)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
if err = format.Node(&buffer, fileSet, rootNode); err != nil {
|
||||
panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(pathToFile)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a test func named TestDoesSomethingNeat, rewrites it as
|
||||
* It("does something neat", func() { __test_body_here__ }) and adds it
|
||||
* to the Describe's list of statements
|
||||
*/
|
||||
func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
|
||||
var funcIndex int = -1
|
||||
for index, child := range rootNode.Decls {
|
||||
if child == testFunc {
|
||||
funcIndex = index
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if funcIndex < 0 {
|
||||
panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
|
||||
}
|
||||
|
||||
var block *ast.BlockStmt = blockStatementFromDescribe(describe)
|
||||
block.List = append(block.List, createItStatementForTestFunc(testFunc))
|
||||
replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
|
||||
|
||||
// remove the old test func from the root node's declarations
|
||||
rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
|
||||
}
|
||||
|
||||
/*
|
||||
* walks nodes inside of a test func's statements and replaces the usage of
|
||||
* it's named *testing.T param with GinkgoT's
|
||||
*/
|
||||
func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
|
||||
ast.Inspect(statementsBlock, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
keyValueExpr, ok := node.(*ast.KeyValueExpr)
|
||||
if ok {
|
||||
replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
|
||||
return true
|
||||
}
|
||||
|
||||
funcLiteral, ok := node.(*ast.FuncLit)
|
||||
if ok {
|
||||
replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
|
||||
return true
|
||||
}
|
||||
|
||||
callExpr, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
replaceTestingTsInArgsLists(callExpr, testingT)
|
||||
|
||||
funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
|
||||
if ok {
|
||||
replaceTestingTsMethodCalls(funCall, testingT)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
|
||||
* This function receives a selector expression (eg: t.Fail()) and
|
||||
* the name of the *testing.T param from the function declaration. Rewrites the
|
||||
* selector expression in place if the target was a *testing.T
|
||||
*/
|
||||
func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
|
||||
ident, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
selectorExpr.X = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces usages of a named *testing.T param inside of a call expression
|
||||
* with a new GinkgoT object
|
||||
*/
|
||||
func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
|
||||
for index, arg := range callExpr.Args {
|
||||
ident, ok := arg.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
callExpr.Args[index] = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
}
|
130
vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
Normal file
130
vendor/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Rewrites any other top level funcs that receive a *testing.T param
|
||||
*/
|
||||
func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
|
||||
for _, decl := range declarations {
|
||||
decl, ok := decl.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range decl.Type.Params.List {
|
||||
starExpr, ok := param.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok || xIdent.Name != "testing" {
|
||||
continue
|
||||
}
|
||||
|
||||
if selectorExpr.Sel.Name != "T" {
|
||||
continue
|
||||
}
|
||||
|
||||
param.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks all of the nodes in the file, replacing *testing.T in struct
|
||||
* and func literal nodes. eg:
|
||||
* type foo struct { *testing.T }
|
||||
* var bar = func(t *testing.T) { }
|
||||
*/
|
||||
func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.StructType:
|
||||
replaceTestingTsInStructType(node)
|
||||
case *ast.FuncLit:
|
||||
replaceTypeDeclTestingTsInFuncLiteral(node)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces named *testing.T inside a composite literal
|
||||
*/
|
||||
func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
|
||||
ident, ok := kve.Value.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
kve.Value = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces *testing.T params in a func literal with GinkgoT
|
||||
*/
|
||||
func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
|
||||
for _, arg := range functionLiteral.Type.Params.List {
|
||||
starExpr, ok := arg.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
target, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
arg.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Replaces *testing.T types inside of a struct declaration with a GinkgoT
|
||||
* eg: type foo struct { *testing.T }
|
||||
*/
|
||||
func replaceTestingTsInStructType(structType *ast.StructType) {
|
||||
for _, field := range structType.Fields.List {
|
||||
starExpr, ok := field.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
field.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
51
vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
Normal file
51
vendor/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func BuildConvertCommand() *Command {
|
||||
return &Command{
|
||||
Name: "convert",
|
||||
FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo convert /path/to/package",
|
||||
Usage: []string{
|
||||
"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
|
||||
},
|
||||
Command: convertPackage,
|
||||
}
|
||||
}
|
||||
|
||||
func convertPackage(args []string, additionalArgs []string) {
|
||||
deprecationTracker := types.NewDeprecationTracker()
|
||||
deprecationTracker.TrackDeprecation(types.Deprecations.Convert())
|
||||
fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
|
||||
|
||||
if len(args) != 1 {
|
||||
println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case error:
|
||||
println(err.Error())
|
||||
case string:
|
||||
println(err)
|
||||
default:
|
||||
println(fmt.Sprintf("unexpected error: %#v", err))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
convert.RewritePackage(args[0])
|
||||
}
|
273
vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
Normal file
273
vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
Normal file
|
@ -0,0 +1,273 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
sprig "github.com/go-task/slim-sprig"
|
||||
)
|
||||
|
||||
func BuildGenerateCommand() *Command {
|
||||
var (
|
||||
agouti, noDot, internal bool
|
||||
customTestFile string
|
||||
)
|
||||
flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
|
||||
flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name")
|
||||
flagSet.StringVar(&customTestFile, "template", "", "If specified, generate will use the contents of the file passed as the test file template")
|
||||
|
||||
return &Command{
|
||||
Name: "generate",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo generate <filename(s)>",
|
||||
Usage: []string{
|
||||
"Generate a test file named filename_test.go",
|
||||
"If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateSpec(args, agouti, noDot, internal, customTestFile)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var specText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
|
||||
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
|
||||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}
|
||||
|
||||
import (
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
"github.com/sclevine/agouti"
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
|
||||
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
var page *agouti.Page
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
page, err = agoutiDriver.NewPage()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
Expect(page.Destroy()).To(Succeed())
|
||||
})
|
||||
})
|
||||
`
|
||||
|
||||
type specData struct {
|
||||
Package string
|
||||
Subject string
|
||||
PackageImportPath string
|
||||
IncludeImports bool
|
||||
ImportPackage bool
|
||||
}
|
||||
|
||||
func generateSpec(args []string, agouti, noDot, internal bool, customTestFile string) {
|
||||
if len(args) == 0 {
|
||||
err := generateSpecForSubject("", agouti, noDot, internal, customTestFile)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
fmt.Println("")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("")
|
||||
return
|
||||
}
|
||||
|
||||
var failed bool
|
||||
for _, arg := range args {
|
||||
err := generateSpecForSubject(arg, agouti, noDot, internal, customTestFile)
|
||||
if err != nil {
|
||||
failed = true
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
fmt.Println("")
|
||||
if failed {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func generateSpecForSubject(subject string, agouti, noDot, internal bool, customTestFile string) error {
|
||||
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
if subject != "" {
|
||||
specFilePrefix = formatSubject(subject)
|
||||
formattedName = prettifyPackageName(specFilePrefix)
|
||||
}
|
||||
|
||||
if internal {
|
||||
specFilePrefix = specFilePrefix + "_internal"
|
||||
}
|
||||
|
||||
data := specData{
|
||||
Package: determinePackageName(packageName, internal),
|
||||
Subject: formattedName,
|
||||
PackageImportPath: getPackageImportPath(),
|
||||
IncludeImports: !noDot,
|
||||
ImportPackage: !internal,
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
return fmt.Errorf("%s already exists.", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if customTestFile != "" {
|
||||
tpl, err := ioutil.ReadFile(customTestFile)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
templateText = string(tpl)
|
||||
} else if agouti {
|
||||
templateText = agoutiSpecText
|
||||
} else {
|
||||
templateText = specText
|
||||
}
|
||||
|
||||
specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Parse(templateText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
specTemplate.Execute(f, data)
|
||||
goFmt(targetFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatSubject(name string) string {
|
||||
name = strings.Replace(name, "-", "_", -1)
|
||||
name = strings.Replace(name, " ", "_", -1)
|
||||
name = strings.Split(name, ".go")[0]
|
||||
name = strings.Split(name, "_test")[0]
|
||||
return name
|
||||
}
|
||||
|
||||
// moduleName returns module name from go.mod from given module root directory
|
||||
func moduleName(modRoot string) string {
|
||||
modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
mod := make([]byte, 128)
|
||||
_, err = modFile.Read(mod)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
slashSlash := []byte("//")
|
||||
moduleStr := []byte("module")
|
||||
|
||||
for len(mod) > 0 {
|
||||
line := mod
|
||||
mod = nil
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, mod = line[:i], line[i+1:]
|
||||
}
|
||||
if i := bytes.Index(line, slashSlash); i >= 0 {
|
||||
line = line[:i]
|
||||
}
|
||||
line = bytes.TrimSpace(line)
|
||||
if !bytes.HasPrefix(line, moduleStr) {
|
||||
continue
|
||||
}
|
||||
line = line[len(moduleStr):]
|
||||
n := len(line)
|
||||
line = bytes.TrimSpace(line)
|
||||
if len(line) == n || len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if line[0] == '"' || line[0] == '`' {
|
||||
p, err := strconv.Unquote(string(line))
|
||||
if err != nil {
|
||||
return "" // malformed quoted string or multiline module path
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
return string(line)
|
||||
}
|
||||
|
||||
return "" // missing module path
|
||||
}
|
||||
|
||||
func findModuleRoot(dir string) (root string) {
|
||||
dir = filepath.Clean(dir)
|
||||
|
||||
// Look for enclosing go.mod.
|
||||
for {
|
||||
if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
|
||||
return dir
|
||||
}
|
||||
d := filepath.Dir(dir)
|
||||
if d == dir {
|
||||
break
|
||||
}
|
||||
dir = d
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getPackageImportPath() string {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
sep := string(filepath.Separator)
|
||||
|
||||
// Try go.mod file first
|
||||
modRoot := findModuleRoot(workingDir)
|
||||
if modRoot != "" {
|
||||
modName := moduleName(modRoot)
|
||||
if modName != "" {
|
||||
cd := strings.Replace(workingDir, modRoot, "", -1)
|
||||
cd = strings.ReplaceAll(cd, sep, "/")
|
||||
return modName + cd
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to GOPATH structure
|
||||
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||
if len(paths) == 1 {
|
||||
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||
return "UNKNOWN_PACKAGE_PATH"
|
||||
}
|
||||
return filepath.ToSlash(paths[len(paths)-1])
|
||||
}
|
31
vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
generated
vendored
Normal file
31
vendor/github.com/onsi/ginkgo/ginkgo/help_command.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func BuildHelpCommand() *Command {
|
||||
return &Command{
|
||||
Name: "help",
|
||||
FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo help <COMMAND>",
|
||||
Usage: []string{
|
||||
"Print usage information. If a command is passed in, print usage information just for that command.",
|
||||
},
|
||||
Command: printHelp,
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(args []string, additionalArgs []string) {
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
} else {
|
||||
command, found := commandMatching(args[0])
|
||||
if !found {
|
||||
complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
|
||||
}
|
||||
|
||||
usageForCommand(command, true)
|
||||
}
|
||||
}
|
52
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
generated
vendored
Normal file
52
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/interrupt_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type InterruptHandler struct {
|
||||
interruptCount int
|
||||
lock *sync.Mutex
|
||||
C chan bool
|
||||
}
|
||||
|
||||
func NewInterruptHandler() *InterruptHandler {
|
||||
h := &InterruptHandler{
|
||||
lock: &sync.Mutex{},
|
||||
C: make(chan bool),
|
||||
}
|
||||
|
||||
go h.handleInterrupt()
|
||||
SwallowSigQuit()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) WasInterrupted() bool {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
return h.interruptCount > 0
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) handleInterrupt() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
|
||||
h.lock.Lock()
|
||||
h.interruptCount++
|
||||
if h.interruptCount == 1 {
|
||||
close(h.C)
|
||||
} else if h.interruptCount > 5 {
|
||||
os.Exit(1)
|
||||
}
|
||||
h.lock.Unlock()
|
||||
|
||||
go h.handleInterrupt()
|
||||
}
|
14
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
generated
vendored
Normal file
14
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||
|
||||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func SwallowSigQuit() {
|
||||
c := make(chan os.Signal, 1024)
|
||||
signal.Notify(c, syscall.SIGQUIT)
|
||||
}
|
7
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/ginkgo/interrupthandler/sigquit_swallower_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build windows
|
||||
|
||||
package interrupthandler
|
||||
|
||||
func SwallowSigQuit() {
|
||||
//noop
|
||||
}
|
308
vendor/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
Normal file
308
vendor/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
Normal file
|
@ -0,0 +1,308 @@
|
|||
/*
|
||||
The Ginkgo CLI
|
||||
|
||||
The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more by running:
|
||||
|
||||
ginkgo help
|
||||
|
||||
Here are some of the more commonly used commands:
|
||||
|
||||
To install:
|
||||
|
||||
go install github.com/onsi/ginkgo/ginkgo
|
||||
|
||||
To run tests:
|
||||
|
||||
ginkgo
|
||||
|
||||
To run tests in all subdirectories:
|
||||
|
||||
ginkgo -r
|
||||
|
||||
To run tests in particular packages:
|
||||
|
||||
ginkgo <flags> /path/to/package /path/to/another/package
|
||||
|
||||
To pass arguments/flags to your tests:
|
||||
|
||||
ginkgo <flags> <packages> -- <pass-throughs>
|
||||
|
||||
To run tests in parallel
|
||||
|
||||
ginkgo -p
|
||||
|
||||
this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
(note that you don't need to provide -p in this case).
|
||||
|
||||
By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
|
||||
An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
|
||||
|
||||
ginkgo -nodes=N -stream=true
|
||||
|
||||
On windows, the default value for stream is true.
|
||||
|
||||
By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
|
||||
|
||||
ginkgo -keepGoing
|
||||
|
||||
To fail if there are ginkgo tests in a directory but no test suite (missing `RunSpecs`)
|
||||
|
||||
ginkgo -requireSuite
|
||||
|
||||
To monitor packages and rerun tests when changes occur:
|
||||
|
||||
ginkgo watch <-r> </path/to/package>
|
||||
|
||||
passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
|
||||
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
||||
that depend on X are not rerun.
|
||||
|
||||
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
|
||||
|
||||
ginkgo -notify
|
||||
|
||||
this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
|
||||
|
||||
Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
|
||||
|
||||
ginkgo -untilItFails
|
||||
|
||||
To bootstrap a test suite:
|
||||
|
||||
ginkgo bootstrap
|
||||
|
||||
To generate a test file:
|
||||
|
||||
ginkgo generate <test_file_name>
|
||||
|
||||
To bootstrap/generate test files without using "." imports:
|
||||
|
||||
ginkgo bootstrap --nodot
|
||||
ginkgo generate --nodot
|
||||
|
||||
this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
|
||||
|
||||
ginkgo nodot
|
||||
|
||||
to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
|
||||
|
||||
To convert an existing XUnit style test suite to a Ginkgo-style test suite:
|
||||
|
||||
ginkgo convert .
|
||||
|
||||
To unfocus tests:
|
||||
|
||||
ginkgo unfocus
|
||||
|
||||
or
|
||||
|
||||
ginkgo blur
|
||||
|
||||
To compile a test suite:
|
||||
|
||||
ginkgo build <path-to-package>
|
||||
|
||||
will output an executable file named `package.test`. This can be run directly or by invoking
|
||||
|
||||
ginkgo <path-to-package.test>
|
||||
|
||||
|
||||
To print an outline of Ginkgo specs and containers in a file:
|
||||
|
||||
gingko outline <filename>
|
||||
|
||||
To print out Ginkgo's version:
|
||||
|
||||
ginkgo version
|
||||
|
||||
To get more help:
|
||||
|
||||
ginkgo help
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
const greenColor = "\x1b[32m"
|
||||
const redColor = "\x1b[91m"
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
AltName string
|
||||
FlagSet *flag.FlagSet
|
||||
Usage []string
|
||||
UsageCommand string
|
||||
Command func(args []string, additionalArgs []string)
|
||||
SuppressFlagDocumentation bool
|
||||
FlagDocSubstitute []string
|
||||
}
|
||||
|
||||
func (c *Command) Matches(name string) bool {
|
||||
return c.Name == name || (c.AltName != "" && c.AltName == name)
|
||||
}
|
||||
|
||||
func (c *Command) Run(args []string, additionalArgs []string) {
|
||||
c.FlagSet.Usage = usage
|
||||
c.FlagSet.Parse(args)
|
||||
c.Command(c.FlagSet.Args(), additionalArgs)
|
||||
}
|
||||
|
||||
var DefaultCommand *Command
|
||||
var Commands []*Command
|
||||
|
||||
func init() {
|
||||
DefaultCommand = BuildRunCommand()
|
||||
Commands = append(Commands, BuildWatchCommand())
|
||||
Commands = append(Commands, BuildBuildCommand())
|
||||
Commands = append(Commands, BuildBootstrapCommand())
|
||||
Commands = append(Commands, BuildGenerateCommand())
|
||||
Commands = append(Commands, BuildNodotCommand())
|
||||
Commands = append(Commands, BuildConvertCommand())
|
||||
Commands = append(Commands, BuildUnfocusCommand())
|
||||
Commands = append(Commands, BuildVersionCommand())
|
||||
Commands = append(Commands, BuildHelpCommand())
|
||||
Commands = append(Commands, BuildOutlineCommand())
|
||||
}
|
||||
|
||||
func main() {
|
||||
args := []string{}
|
||||
additionalArgs := []string{}
|
||||
|
||||
foundDelimiter := false
|
||||
|
||||
for _, arg := range os.Args[1:] {
|
||||
if !foundDelimiter {
|
||||
if arg == "--" {
|
||||
foundDelimiter = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if foundDelimiter {
|
||||
additionalArgs = append(additionalArgs, arg)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
commandToRun, found := commandMatching(args[0])
|
||||
if found {
|
||||
commandToRun.Run(args[1:], additionalArgs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
DefaultCommand.Run(args, additionalArgs)
|
||||
}
|
||||
|
||||
func commandMatching(name string) (*Command, bool) {
|
||||
for _, command := range Commands {
|
||||
if command.Matches(name) {
|
||||
return command, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Printf("Ginkgo Version %s\n\n", config.VERSION)
|
||||
usageForCommand(DefaultCommand, false)
|
||||
for _, command := range Commands {
|
||||
fmt.Printf("\n")
|
||||
usageForCommand(command, false)
|
||||
}
|
||||
}
|
||||
|
||||
func usageForCommand(command *Command, longForm bool) {
|
||||
fmt.Printf("%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
|
||||
fmt.Printf("%s\n", strings.Join(command.Usage, "\n"))
|
||||
if command.SuppressFlagDocumentation && !longForm {
|
||||
fmt.Printf("%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
|
||||
} else {
|
||||
command.FlagSet.SetOutput(os.Stdout)
|
||||
command.FlagSet.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
func complainAndQuit(complaint string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func findSuites(args []string, recurseForAll bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
|
||||
suites := []testsuite.TestSuite{}
|
||||
|
||||
if len(args) > 0 {
|
||||
for _, arg := range args {
|
||||
if allowPrecompiled {
|
||||
suite, err := testsuite.PrecompiledTestSuite(arg)
|
||||
if err == nil {
|
||||
suites = append(suites, suite)
|
||||
continue
|
||||
}
|
||||
}
|
||||
recurseForSuite := recurseForAll
|
||||
if strings.HasSuffix(arg, "/...") && arg != "/..." {
|
||||
arg = arg[:len(arg)-4]
|
||||
recurseForSuite = true
|
||||
}
|
||||
suites = append(suites, testsuite.SuitesInDir(arg, recurseForSuite)...)
|
||||
}
|
||||
} else {
|
||||
suites = testsuite.SuitesInDir(".", recurseForAll)
|
||||
}
|
||||
|
||||
skippedPackages := []string{}
|
||||
if skipPackage != "" {
|
||||
skipFilters := strings.Split(skipPackage, ",")
|
||||
filteredSuites := []testsuite.TestSuite{}
|
||||
for _, suite := range suites {
|
||||
skip := false
|
||||
for _, skipFilter := range skipFilters {
|
||||
if strings.Contains(suite.Path, skipFilter) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
skippedPackages = append(skippedPackages, suite.Path)
|
||||
} else {
|
||||
filteredSuites = append(filteredSuites, suite)
|
||||
}
|
||||
}
|
||||
suites = filteredSuites
|
||||
}
|
||||
|
||||
return suites, skippedPackages
|
||||
}
|
||||
|
||||
func goFmt(path string) {
|
||||
out, err := exec.Command("go", "fmt", path).CombinedOutput()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not fmt: " + err.Error() + "\n" + string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func pluralizedWord(singular, plural string, count int) string {
|
||||
if count == 1 {
|
||||
return singular
|
||||
}
|
||||
return plural
|
||||
}
|
196
vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
generated
vendored
Normal file
196
vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
generated
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
package nodot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ApplyNoDot(data []byte) ([]byte, error) {
|
||||
sections, err := generateNodotSections()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, section := range sections {
|
||||
data = section.createOrUpdateIn(data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
type nodotSection struct {
|
||||
name string
|
||||
pkg string
|
||||
declarations []string
|
||||
types []string
|
||||
}
|
||||
|
||||
func (s nodotSection) createOrUpdateIn(data []byte) []byte {
|
||||
renames := map[string]string{}
|
||||
|
||||
contents := string(data)
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
|
||||
comment := "// Declarations for " + s.name
|
||||
|
||||
newLines := []string{}
|
||||
for _, line := range lines {
|
||||
if line == comment {
|
||||
continue
|
||||
}
|
||||
|
||||
words := strings.Split(line, " ")
|
||||
lastWord := words[len(words)-1]
|
||||
|
||||
if s.containsDeclarationOrType(lastWord) {
|
||||
renames[lastWord] = words[1]
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
if len(newLines[len(newLines)-1]) > 0 {
|
||||
newLines = append(newLines, "")
|
||||
}
|
||||
|
||||
newLines = append(newLines, comment)
|
||||
|
||||
for _, typ := range s.types {
|
||||
name, ok := renames[s.prefix(typ)]
|
||||
if !ok {
|
||||
name = typ
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
|
||||
}
|
||||
|
||||
for _, decl := range s.declarations {
|
||||
name, ok := renames[s.prefix(decl)]
|
||||
if !ok {
|
||||
name = decl
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
|
||||
}
|
||||
|
||||
newLines = append(newLines, "")
|
||||
|
||||
newContents := strings.Join(newLines, "\n")
|
||||
|
||||
return []byte(newContents)
|
||||
}
|
||||
|
||||
func (s nodotSection) prefix(declOrType string) string {
|
||||
return s.pkg + "." + declOrType
|
||||
}
|
||||
|
||||
func (s nodotSection) containsDeclarationOrType(word string) bool {
|
||||
for _, declaration := range s.declarations {
|
||||
if s.prefix(declaration) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, typ := range s.types {
|
||||
if s.prefix(typ) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func generateNodotSections() ([]nodotSection, error) {
|
||||
sections := []nodotSection{}
|
||||
|
||||
declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Ginkgo DSL",
|
||||
pkg: "ginkgo",
|
||||
declarations: declarations,
|
||||
types: []string{"Done", "Benchmarker"},
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega DSL",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega Matchers",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
return sections, nil
|
||||
}
|
||||
|
||||
func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
|
||||
pkg, err := build.Import(pkgPath, ".", 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
blacklistLookup := map[string]bool{}
|
||||
for _, declaration := range blacklist {
|
||||
blacklistLookup[declaration] = true
|
||||
}
|
||||
|
||||
filteredDeclarations := []string{}
|
||||
for _, declaration := range declarations {
|
||||
if blacklistLookup[declaration] {
|
||||
continue
|
||||
}
|
||||
filteredDeclarations = append(filteredDeclarations, declaration)
|
||||
}
|
||||
|
||||
return filteredDeclarations, nil
|
||||
}
|
||||
|
||||
func getExportedDeclarationsForFile(path string) ([]string, error) {
|
||||
fset := token.NewFileSet()
|
||||
tree, err := parser.ParseFile(fset, path, nil, 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations := []string{}
|
||||
ast.FileExports(tree)
|
||||
for _, decl := range tree.Decls {
|
||||
switch x := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
switch s := x.Specs[0].(type) {
|
||||
case *ast.ValueSpec:
|
||||
declarations = append(declarations, s.Names[0].Name)
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
if x.Recv == nil {
|
||||
declarations = append(declarations, x.Name.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return declarations, nil
|
||||
}
|
77
vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
Normal file
77
vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildNodotCommand() *Command {
|
||||
return &Command{
|
||||
Name: "nodot",
|
||||
FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo nodot",
|
||||
Usage: []string{
|
||||
"Update the nodot declarations in your test suite",
|
||||
"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
|
||||
"If you've renamed a declaration, that name will be honored and not overwritten.",
|
||||
},
|
||||
Command: updateNodot,
|
||||
}
|
||||
}
|
||||
|
||||
func updateNodot(args []string, additionalArgs []string) {
|
||||
suiteFile, perm := findSuiteFile()
|
||||
|
||||
data, err := ioutil.ReadFile(suiteFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
|
||||
content, err := nodot.ApplyNoDot(data)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
ioutil.WriteFile(suiteFile, content, perm)
|
||||
|
||||
goFmt(suiteFile)
|
||||
}
|
||||
|
||||
func findSuiteFile() (string, os.FileMode) {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(workingDir)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(workingDir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if re.MatchReader(bufio.NewReader(f)) {
|
||||
return path, file.Mode()
|
||||
}
|
||||
}
|
||||
|
||||
complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
|
||||
|
||||
return "", 0
|
||||
}
|
141
vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
Normal file
141
vendor/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Notifier struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
}
|
||||
|
||||
func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
|
||||
return &Notifier{
|
||||
commandFlags: commandFlags,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
||||
|
||||
OSX:
|
||||
|
||||
To remedy this:
|
||||
|
||||
brew install terminal-notifier
|
||||
|
||||
To learn more about terminal-notifier:
|
||||
|
||||
https://github.com/alloy/terminal-notifier
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
|
||||
|
||||
Linux:
|
||||
|
||||
Download and install notify-send for your distribution
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
|
||||
if suitePassed {
|
||||
n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
|
||||
} else {
|
||||
n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendNotification(title string, subtitle string) {
|
||||
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err == nil {
|
||||
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
||||
terminal := os.Getenv("TERM_PROGRAM")
|
||||
if terminal == "iTerm.app" {
|
||||
args = append(args, "-activate", "com.googlecode.iterm2")
|
||||
} else if terminal == "Apple_Terminal" {
|
||||
args = append(args, "-activate", "com.apple.Terminal")
|
||||
}
|
||||
|
||||
exec.Command("terminal-notifier", args...).Run()
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err == nil {
|
||||
args := []string{"-a", "ginkgo", title, subtitle}
|
||||
exec.Command("notify-send", args...).Run()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
|
||||
|
||||
command := n.commandFlags.AfterSuiteHook
|
||||
if command != "" {
|
||||
|
||||
// Allow for string replacement to pass input to the command
|
||||
passed := "[FAIL]"
|
||||
if suitePassed {
|
||||
passed = "[PASS]"
|
||||
}
|
||||
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
|
||||
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
|
||||
|
||||
// Must break command into parts
|
||||
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||
parts := splitArgs.FindAllString(command, -1)
|
||||
|
||||
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println("Post-suite command failed:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
|
||||
}
|
||||
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
|
||||
} else {
|
||||
fmt.Println("Post-suite command succeeded:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
243
vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
243
vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go
generated
vendored
Normal file
|
@ -0,0 +1,243 @@
|
|||
package outline
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// undefinedTextAlt is used if the spec/container text cannot be derived
|
||||
undefinedTextAlt = "undefined"
|
||||
)
|
||||
|
||||
// ginkgoMetadata holds useful bits of information for every entry in the outline
|
||||
type ginkgoMetadata struct {
|
||||
// Name is the spec or container function name, e.g. `Describe` or `It`
|
||||
Name string `json:"name"`
|
||||
|
||||
// Text is the `text` argument passed to specs, and some containers
|
||||
Text string `json:"text"`
|
||||
|
||||
// Start is the position of first character of the spec or container block
|
||||
Start int `json:"start"`
|
||||
|
||||
// End is the position of first character immediately after the spec or container block
|
||||
End int `json:"end"`
|
||||
|
||||
Spec bool `json:"spec"`
|
||||
Focused bool `json:"focused"`
|
||||
Pending bool `json:"pending"`
|
||||
}
|
||||
|
||||
// ginkgoNode is used to construct the outline as a tree
|
||||
type ginkgoNode struct {
|
||||
ginkgoMetadata
|
||||
Nodes []*ginkgoNode `json:"nodes"`
|
||||
}
|
||||
|
||||
type walkFunc func(n *ginkgoNode)
|
||||
|
||||
func (n *ginkgoNode) PreOrder(f walkFunc) {
|
||||
f(n)
|
||||
for _, m := range n.Nodes {
|
||||
m.PreOrder(f)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ginkgoNode) PostOrder(f walkFunc) {
|
||||
for _, m := range n.Nodes {
|
||||
m.PostOrder(f)
|
||||
}
|
||||
f(n)
|
||||
}
|
||||
|
||||
func (n *ginkgoNode) Walk(pre, post walkFunc) {
|
||||
pre(n)
|
||||
for _, m := range n.Nodes {
|
||||
m.Walk(pre, post)
|
||||
}
|
||||
post(n)
|
||||
}
|
||||
|
||||
// PropagateInheritedProperties propagates the Pending and Focused properties
|
||||
// through the subtree rooted at n.
|
||||
func (n *ginkgoNode) PropagateInheritedProperties() {
|
||||
n.PreOrder(func(thisNode *ginkgoNode) {
|
||||
for _, descendantNode := range thisNode.Nodes {
|
||||
if thisNode.Pending {
|
||||
descendantNode.Pending = true
|
||||
descendantNode.Focused = false
|
||||
}
|
||||
if thisNode.Focused && !descendantNode.Pending {
|
||||
descendantNode.Focused = true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BackpropagateUnfocus propagates the Focused property through the subtree
|
||||
// rooted at n. It applies the rule described in the Ginkgo docs:
|
||||
// > Nested programmatically focused specs follow a simple rule: if a
|
||||
// > leaf-node is marked focused, any of its ancestor nodes that are marked
|
||||
// > focus will be unfocused.
|
||||
func (n *ginkgoNode) BackpropagateUnfocus() {
|
||||
focusedSpecInSubtreeStack := []bool{}
|
||||
n.PostOrder(func(thisNode *ginkgoNode) {
|
||||
if thisNode.Spec {
|
||||
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
|
||||
return
|
||||
}
|
||||
focusedSpecInSubtree := false
|
||||
for range thisNode.Nodes {
|
||||
focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
|
||||
focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
|
||||
}
|
||||
focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
|
||||
if focusedSpecInSubtree {
|
||||
thisNode.Focused = false
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
|
||||
switch ex := ce.Fun.(type) {
|
||||
case *ast.Ident:
|
||||
return "", ex.Name, true
|
||||
case *ast.SelectorExpr:
|
||||
pkgID, ok := ex.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return "", "", false
|
||||
}
|
||||
// A package identifier is top-level, so Obj must be nil
|
||||
if pkgID.Obj != nil {
|
||||
return "", "", false
|
||||
}
|
||||
if ex.Sel == nil {
|
||||
return "", "", false
|
||||
}
|
||||
return pkgID.Name, ex.Sel.Name, true
|
||||
default:
|
||||
return "", "", false
|
||||
}
|
||||
}
|
||||
|
||||
// absoluteOffsetsForNode derives the absolute character offsets of the node start and
|
||||
// end positions.
|
||||
func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
|
||||
return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
|
||||
}
|
||||
|
||||
// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
|
||||
// corresponding to a Ginkgo container or spec.
|
||||
func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName, tablePackageName *string) (*ginkgoNode, bool) {
|
||||
packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
n := ginkgoNode{}
|
||||
n.Name = identName
|
||||
n.Start, n.End = absoluteOffsetsForNode(fset, ce)
|
||||
n.Nodes = make([]*ginkgoNode, 0)
|
||||
switch identName {
|
||||
case "It", "Measure", "Specify":
|
||||
n.Spec = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "Entry":
|
||||
n.Spec = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, tablePackageName != nil && *tablePackageName == packageName
|
||||
case "FIt", "FMeasure", "FSpecify":
|
||||
n.Spec = true
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "FEntry":
|
||||
n.Spec = true
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, tablePackageName != nil && *tablePackageName == packageName
|
||||
case "PIt", "PMeasure", "PSpecify", "XIt", "XMeasure", "XSpecify":
|
||||
n.Spec = true
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "PEntry", "XEntry":
|
||||
n.Spec = true
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, tablePackageName != nil && *tablePackageName == packageName
|
||||
case "Context", "Describe", "When":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "DescribeTable":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, tablePackageName != nil && *tablePackageName == packageName
|
||||
case "FContext", "FDescribe", "FWhen":
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "FDescribeTable":
|
||||
n.Focused = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, tablePackageName != nil && *tablePackageName == packageName
|
||||
case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen":
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "PDescribeTable", "XDescribeTable":
|
||||
n.Pending = true
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, tablePackageName != nil && *tablePackageName == packageName
|
||||
case "By":
|
||||
n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "AfterEach", "BeforeEach":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "JustAfterEach", "JustBeforeEach":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "AfterSuite", "BeforeSuite":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
|
||||
return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
|
||||
// container. If it cannot derive it, it returns the alt text.
|
||||
func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
|
||||
text, defined := textFromCallExpr(ce)
|
||||
if !defined {
|
||||
return alt
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
|
||||
// it cannot derive it, it returns false.
|
||||
func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
|
||||
if len(ce.Args) < 1 {
|
||||
return "", false
|
||||
}
|
||||
text, ok := ce.Args[0].(*ast.BasicLit)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
switch text.Kind {
|
||||
case token.CHAR, token.STRING:
|
||||
// For token.CHAR and token.STRING, Value is quoted
|
||||
unquoted, err := strconv.Unquote(text.Value)
|
||||
if err != nil {
|
||||
// If unquoting fails, just use the raw Value
|
||||
return text.Value, true
|
||||
}
|
||||
return unquoted, true
|
||||
default:
|
||||
return text.Value, true
|
||||
}
|
||||
}
|
65
vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go
generated
vendored
Normal file
65
vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Most of the required functions were available in the
|
||||
// "golang.org/x/tools/go/ast/astutil" package, but not exported.
|
||||
// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
|
||||
|
||||
package outline
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// packageNameForImport returns the package name for the package. If the package
|
||||
// is not imported, it returns nil. "Package name" refers to `pkgname` in the
|
||||
// call expression `pkgname.ExportedIdentifier`. Examples:
|
||||
// (import path not found) -> nil
|
||||
// "import example.com/pkg/foo" -> "foo"
|
||||
// "import fooalias example.com/pkg/foo" -> "fooalias"
|
||||
// "import . example.com/pkg/foo" -> ""
|
||||
func packageNameForImport(f *ast.File, path string) *string {
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return nil
|
||||
}
|
||||
name := spec.Name.String()
|
||||
if name == "<nil>" {
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
}
|
||||
if name == "." {
|
||||
name = ""
|
||||
}
|
||||
return &name
|
||||
}
|
||||
|
||||
// importSpec returns the import spec if f imports path,
|
||||
// or nil otherwise.
|
||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||
for _, s := range f.Imports {
|
||||
if importPath(s) == path {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importPath returns the unquoted import path of s,
|
||||
// or "" if the path is not properly quoted.
|
||||
func importPath(s *ast.ImportSpec) string {
|
||||
t, err := strconv.Unquote(s.Path.Value)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return t
|
||||
}
|
107
vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go
generated
vendored
Normal file
107
vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
package outline
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
const (
|
||||
// ginkgoImportPath is the well-known ginkgo import path
|
||||
ginkgoImportPath = "github.com/onsi/ginkgo"
|
||||
|
||||
// tableImportPath is the well-known table extension import path
|
||||
tableImportPath = "github.com/onsi/ginkgo/extensions/table"
|
||||
)
|
||||
|
||||
// FromASTFile returns an outline for a Ginkgo test source file
|
||||
func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
|
||||
ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
|
||||
tablePackageName := packageNameForImport(src, tableImportPath)
|
||||
if ginkgoPackageName == nil && tablePackageName == nil {
|
||||
return nil, fmt.Errorf("file does not import %q or %q", ginkgoImportPath, tableImportPath)
|
||||
}
|
||||
|
||||
root := ginkgoNode{}
|
||||
stack := []*ginkgoNode{&root}
|
||||
ispr := inspector.New([]*ast.File{src})
|
||||
ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
|
||||
if push {
|
||||
// Pre-order traversal
|
||||
ce, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
// Because `Nodes` calls this function only when the node is an
|
||||
// ast.CallExpr, this should never happen
|
||||
panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
|
||||
}
|
||||
gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName, tablePackageName)
|
||||
if !ok {
|
||||
// Node is not a Ginkgo spec or container, continue
|
||||
return true
|
||||
}
|
||||
parent := stack[len(stack)-1]
|
||||
parent.Nodes = append(parent.Nodes, gn)
|
||||
stack = append(stack, gn)
|
||||
return true
|
||||
}
|
||||
// Post-order traversal
|
||||
start, end := absoluteOffsetsForNode(fset, node)
|
||||
lastVisitedGinkgoNode := stack[len(stack)-1]
|
||||
if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
|
||||
// Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
|
||||
return true
|
||||
}
|
||||
stack = stack[0 : len(stack)-1]
|
||||
return true
|
||||
})
|
||||
if len(root.Nodes) == 0 {
|
||||
return &outline{[]*ginkgoNode{}}, nil
|
||||
}
|
||||
|
||||
// Derive the final focused property for all nodes. This must be done
|
||||
// _before_ propagating the inherited focused property.
|
||||
root.BackpropagateUnfocus()
|
||||
// Now, propagate inherited properties, including focused and pending.
|
||||
root.PropagateInheritedProperties()
|
||||
|
||||
return &outline{root.Nodes}, nil
|
||||
}
|
||||
|
||||
type outline struct {
|
||||
Nodes []*ginkgoNode `json:"nodes"`
|
||||
}
|
||||
|
||||
func (o *outline) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(o.Nodes)
|
||||
}
|
||||
|
||||
// String returns a CSV-formatted outline. Spec or container are output in
|
||||
// depth-first order.
|
||||
func (o *outline) String() string {
|
||||
return o.StringIndent(0)
|
||||
}
|
||||
|
||||
// StringIndent returns a CSV-formated outline, but every line is indented by
|
||||
// one 'width' of spaces for every level of nesting.
|
||||
func (o *outline) StringIndent(width int) string {
|
||||
var b strings.Builder
|
||||
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n")
|
||||
|
||||
currentIndent := 0
|
||||
pre := func(n *ginkgoNode) {
|
||||
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
|
||||
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending))
|
||||
currentIndent += width
|
||||
}
|
||||
post := func(n *ginkgoNode) {
|
||||
currentIndent -= width
|
||||
}
|
||||
for _, n := range o.Nodes {
|
||||
n.Walk(pre, post)
|
||||
}
|
||||
return b.String()
|
||||
}
|
95
vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go
generated
vendored
Normal file
95
vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/outline"
|
||||
)
|
||||
|
||||
const (
|
||||
// indentWidth is the width used by the 'indent' output
|
||||
indentWidth = 4
|
||||
// stdinAlias is a portable alias for stdin. This convention is used in
|
||||
// other CLIs, e.g., kubectl.
|
||||
stdinAlias = "-"
|
||||
usageCommand = "ginkgo outline <filename>"
|
||||
)
|
||||
|
||||
func BuildOutlineCommand() *Command {
|
||||
const defaultFormat = "csv"
|
||||
var format string
|
||||
flagSet := flag.NewFlagSet("outline", flag.ExitOnError)
|
||||
flagSet.StringVar(&format, "format", defaultFormat, "Format of outline. Accepted: 'csv', 'indent', 'json'")
|
||||
return &Command{
|
||||
Name: "outline",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: usageCommand,
|
||||
Usage: []string{
|
||||
"Create an outline of Ginkgo symbols for a file",
|
||||
"To read from stdin, use: `ginkgo outline -`",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
outlineFile(args, format)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func outlineFile(args []string, format string) {
|
||||
if len(args) != 1 {
|
||||
println(fmt.Sprintf("usage: %s", usageCommand))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
filename := args[0]
|
||||
var src *os.File
|
||||
if filename == stdinAlias {
|
||||
src = os.Stdin
|
||||
} else {
|
||||
var err error
|
||||
src, err = os.Open(filename)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error opening file: %s", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
|
||||
parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error parsing source: %s", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
o, err := outline.FromASTFile(fset, parsedSrc)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error creating outline: %s", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var oerr error
|
||||
switch format {
|
||||
case "csv":
|
||||
_, oerr = fmt.Print(o)
|
||||
case "indent":
|
||||
_, oerr = fmt.Print(o.StringIndent(indentWidth))
|
||||
case "json":
|
||||
b, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
println(fmt.Sprintf("error marshalling to json: %s", err))
|
||||
}
|
||||
_, oerr = fmt.Println(string(b))
|
||||
default:
|
||||
complainAndQuit(fmt.Sprintf("format %s not accepted", format))
|
||||
}
|
||||
if oerr != nil {
|
||||
println(fmt.Sprintf("error writing outline: %s", oerr))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
315
vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
Normal file
315
vendor/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
Normal file
|
@ -0,0 +1,315 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func BuildRunCommand() *Command {
|
||||
commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
|
||||
notifier := NewNotifier(commandFlags)
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
runner := &SpecRunner{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: runner.RunSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecRunner struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
r.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
deprecationTracker := types.NewDeprecationTracker()
|
||||
|
||||
if r.commandFlags.ParallelStream && (runtime.GOOS != "windows") {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecation{
|
||||
Message: "--stream is deprecated and will be removed in Ginkgo 2.0",
|
||||
DocLink: "removed--stream",
|
||||
Version: "1.16.0",
|
||||
})
|
||||
}
|
||||
|
||||
if r.commandFlags.Notify {
|
||||
deprecationTracker.TrackDeprecation(types.Deprecation{
|
||||
Message: "--notify is deprecated and will be removed in Ginkgo 2.0",
|
||||
DocLink: "removed--notify",
|
||||
Version: "1.16.0",
|
||||
})
|
||||
}
|
||||
|
||||
if deprecationTracker.DidTrackDeprecations() {
|
||||
fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport())
|
||||
}
|
||||
|
||||
suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
|
||||
if len(skippedPackages) > 0 {
|
||||
fmt.Println("Will skip:")
|
||||
for _, skippedPackage := range skippedPackages {
|
||||
fmt.Println(" " + skippedPackage)
|
||||
}
|
||||
}
|
||||
|
||||
if len(skippedPackages) > 0 && len(suites) == 0 {
|
||||
fmt.Println("All tests skipped! Exiting...")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
r.ComputeSuccinctMode(len(suites))
|
||||
|
||||
t := time.Now()
|
||||
|
||||
runners := []*testrunner.TestRunner{}
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs))
|
||||
}
|
||||
|
||||
numSuites := 0
|
||||
runResult := testrunner.PassingRunResult()
|
||||
if r.commandFlags.UntilItFails {
|
||||
iteration := 0
|
||||
for {
|
||||
r.UpdateSeed()
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
iteration++
|
||||
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
break
|
||||
}
|
||||
|
||||
if runResult.Passed {
|
||||
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
|
||||
} else {
|
||||
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
}
|
||||
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
if r.isInCoverageMode() {
|
||||
if r.getOutputDir() != "" {
|
||||
// If coverprofile is set, combine coverages
|
||||
if r.getCoverprofile() != "" {
|
||||
if err := r.combineCoverprofiles(runners); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
// Just move them
|
||||
r.moveCoverprofiles(runners)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
|
||||
|
||||
if runResult.Passed {
|
||||
if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
} else {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Test Suite Failed\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Moves all generated profiles to specified directory
|
||||
func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) {
|
||||
for _, runner := range runners {
|
||||
_, filename := filepath.Split(runner.CoverageFile)
|
||||
err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename))
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Combines all generated profiles in the specified directory
|
||||
func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error {
|
||||
|
||||
path, _ := filepath.Abs(r.getOutputDir())
|
||||
if !fileExists(path) {
|
||||
return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir())
|
||||
}
|
||||
|
||||
fmt.Println("path is " + path)
|
||||
|
||||
combined, err := os.OpenFile(
|
||||
filepath.Join(path, r.getCoverprofile()),
|
||||
os.O_WRONLY|os.O_CREATE,
|
||||
0666,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to create combined profile, %v\n", err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
|
||||
modeRegex := regexp.MustCompile(`^mode: .*\n`)
|
||||
for index, runner := range runners {
|
||||
contents, err := ioutil.ReadFile(runner.CoverageFile)
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
|
||||
// remove the cover mode line from every file
|
||||
// except the first one
|
||||
if index > 0 {
|
||||
contents = modeRegex.ReplaceAll(contents, []byte{})
|
||||
}
|
||||
|
||||
_, err = combined.Write(contents)
|
||||
|
||||
// Add a newline to the end of every file if missing.
|
||||
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
|
||||
_, err = combined.Write([]byte("\n"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to append to coverprofile, %v\n", err)
|
||||
return nil // non-fatal error
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("All profiles combined")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *SpecRunner) isInCoverageMode() bool {
|
||||
opts := r.commandFlags.GoOpts
|
||||
return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != ""
|
||||
}
|
||||
|
||||
func (r *SpecRunner) getCoverprofile() string {
|
||||
return *r.commandFlags.GoOpts["coverprofile"].(*string)
|
||||
}
|
||||
|
||||
func (r *SpecRunner) getOutputDir() string {
|
||||
return *r.commandFlags.GoOpts["outputdir"].(*string)
|
||||
}
|
||||
|
||||
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) UpdateSeed() {
|
||||
if !r.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
|
||||
if !r.commandFlags.RandomizeSuites {
|
||||
return runners
|
||||
}
|
||||
|
||||
if len(runners) <= 1 {
|
||||
return runners
|
||||
}
|
||||
|
||||
randomizedRunners := make([]*testrunner.TestRunner, len(runners))
|
||||
randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
|
||||
permutation := randomizer.Perm(len(runners))
|
||||
for i, j := range permutation {
|
||||
randomizedRunners[i] = runners[j]
|
||||
}
|
||||
return randomizedRunners
|
||||
}
|
||||
|
||||
func orcMessage(iteration int) string {
|
||||
if iteration < 10 {
|
||||
return ""
|
||||
} else if iteration < 30 {
|
||||
return []string{
|
||||
"If at first you succeed...",
|
||||
"...try, try again.",
|
||||
"Looking good!",
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Oh boy, here I go testin' again!",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
"I'm getting tired...",
|
||||
"What if I just made you a sandwich?",
|
||||
"Hit ^C, hit ^C, please hit ^C",
|
||||
"Make it stop. Please!",
|
||||
"Come on! Enough is enough!",
|
||||
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||
"Just what do you think you're doing, Dave? ",
|
||||
"I, Sisyphus",
|
||||
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||
"I guess Einstein never tried to churn butter",
|
||||
}[iteration-10] + "\n"
|
||||
} else {
|
||||
return "No, seriously... you can probably stop now.\n"
|
||||
}
|
||||
}
|
169
vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
Normal file
169
vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
type RunWatchAndBuildCommandFlags struct {
|
||||
Recurse bool
|
||||
SkipPackage string
|
||||
GoOpts map[string]interface{}
|
||||
|
||||
//for run and watch commands
|
||||
NumCPU int
|
||||
NumCompilers int
|
||||
ParallelStream bool
|
||||
Notify bool
|
||||
AfterSuiteHook string
|
||||
AutoNodes bool
|
||||
Timeout time.Duration
|
||||
|
||||
//only for run command
|
||||
KeepGoing bool
|
||||
UntilItFails bool
|
||||
RandomizeSuites bool
|
||||
|
||||
//only for watch command
|
||||
Depth int
|
||||
WatchRegExp string
|
||||
|
||||
FlagSet *flag.FlagSet
|
||||
}
|
||||
|
||||
const runMode = 1
|
||||
const watchMode = 2
|
||||
const buildMode = 3
|
||||
|
||||
func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(runMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(watchMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(buildMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
|
||||
wasSet := false
|
||||
c.FlagSet.Visit(func(f *flag.Flag) {
|
||||
if f.Name == flagName {
|
||||
wasSet = true
|
||||
}
|
||||
})
|
||||
|
||||
return wasSet
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) computeNodes() {
|
||||
if c.wasSet("nodes") {
|
||||
return
|
||||
}
|
||||
if c.AutoNodes {
|
||||
switch n := runtime.NumCPU(); {
|
||||
case n <= 4:
|
||||
c.NumCPU = n
|
||||
default:
|
||||
c.NumCPU = n - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) stringSlot(slot string) *string {
|
||||
var opt string
|
||||
c.GoOpts[slot] = &opt
|
||||
return &opt
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) boolSlot(slot string) *bool {
|
||||
var opt bool
|
||||
c.GoOpts[slot] = &opt
|
||||
return &opt
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) intSlot(slot string) *int {
|
||||
var opt int
|
||||
c.GoOpts[slot] = &opt
|
||||
return &opt
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
c.GoOpts = make(map[string]interface{})
|
||||
|
||||
onWindows := (runtime.GOOS == "windows")
|
||||
|
||||
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("race"), "race", false, "Run tests with race detection enabled.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("cover"), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory.")
|
||||
c.FlagSet.StringVar(c.stringSlot("coverpkg"), "coverpkg", "", "Run tests with coverage on the given external modules.")
|
||||
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
||||
c.FlagSet.StringVar(c.stringSlot("tags"), "tags", "", "A list of build tags to consider satisfied during the build.")
|
||||
c.FlagSet.StringVar(c.stringSlot("gcflags"), "gcflags", "", "Arguments to pass on each go tool compile invocation.")
|
||||
c.FlagSet.StringVar(c.stringSlot("covermode"), "covermode", "", "Set the mode for coverage analysis.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("a"), "a", false, "Force rebuilding of packages that are already up-to-date.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("n"), "n", false, "Have `go test` print the commands but do not run them.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("msan"), "msan", false, "Enable interoperation with memory sanitizer.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("x"), "x", false, "Have `go test` print the commands.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("work"), "work", false, "Print the name of the temporary work directory and do not delete it when exiting.")
|
||||
c.FlagSet.StringVar(c.stringSlot("asmflags"), "asmflags", "", "Arguments to pass on each go tool asm invocation.")
|
||||
c.FlagSet.StringVar(c.stringSlot("buildmode"), "buildmode", "", "Build mode to use. See 'go help buildmode' for more.")
|
||||
c.FlagSet.StringVar(c.stringSlot("mod"), "mod", "", "Go module control. See 'go help modules' for more.")
|
||||
c.FlagSet.StringVar(c.stringSlot("compiler"), "compiler", "", "Name of compiler to use, as in runtime.Compiler (gccgo or gc).")
|
||||
c.FlagSet.StringVar(c.stringSlot("gccgoflags"), "gccgoflags", "", "Arguments to pass on each gccgo compiler/linker invocation.")
|
||||
c.FlagSet.StringVar(c.stringSlot("installsuffix"), "installsuffix", "", "A suffix to use in the name of the package installation directory.")
|
||||
c.FlagSet.StringVar(c.stringSlot("ldflags"), "ldflags", "", "Arguments to pass on each go tool link invocation.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("linkshared"), "linkshared", false, "Link against shared libraries previously created with -buildmode=shared.")
|
||||
c.FlagSet.StringVar(c.stringSlot("pkgdir"), "pkgdir", "", "install and load all packages from the given dir instead of the usual locations.")
|
||||
c.FlagSet.StringVar(c.stringSlot("toolexec"), "toolexec", "", "a program to use to invoke toolchain programs like vet and asm.")
|
||||
c.FlagSet.IntVar(c.intSlot("blockprofilerate"), "blockprofilerate", 1, "Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with the given value.")
|
||||
c.FlagSet.StringVar(c.stringSlot("coverprofile"), "coverprofile", "", "Write a coverage profile to the specified file after all tests have passed.")
|
||||
c.FlagSet.StringVar(c.stringSlot("cpuprofile"), "cpuprofile", "", "Write a CPU profile to the specified file before exiting.")
|
||||
c.FlagSet.StringVar(c.stringSlot("memprofile"), "memprofile", "", "Write a memory profile to the specified file after all tests have passed.")
|
||||
c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.")
|
||||
c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.")
|
||||
c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)")
|
||||
c.FlagSet.StringVar(c.stringSlot("vet"), "vet", "", "Configure the invocation of 'go vet' to use the comma-separated list of vet checks. If list is 'off', 'go test' does not run 'go vet' at all.")
|
||||
|
||||
if mode == runMode || mode == watchMode {
|
||||
config.Flags(c.FlagSet, "", false)
|
||||
c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
|
||||
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
||||
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
||||
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
||||
if !onWindows {
|
||||
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||
}
|
||||
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
|
||||
c.FlagSet.DurationVar(&(c.Timeout), "timeout", 24*time.Hour, "Suite fails if it does not complete within the specified timeout")
|
||||
}
|
||||
|
||||
if mode == runMode {
|
||||
c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
|
||||
c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
|
||||
c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
|
||||
}
|
||||
|
||||
if mode == watchMode {
|
||||
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
|
||||
c.FlagSet.StringVar(&(c.WatchRegExp), "watchRegExp", `\.go$`, "Files matching this regular expression will be watched for changes")
|
||||
}
|
||||
}
|
173
vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
Normal file
173
vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
)
|
||||
|
||||
type compilationInput struct {
|
||||
runner *testrunner.TestRunner
|
||||
result chan compilationOutput
|
||||
}
|
||||
|
||||
type compilationOutput struct {
|
||||
runner *testrunner.TestRunner
|
||||
err error
|
||||
}
|
||||
|
||||
type SuiteRunner struct {
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
||||
return &SuiteRunner{
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
|
||||
//we return this to the consumer, it will return each runner in order as it compiles
|
||||
compilationOutputs := make(chan compilationOutput, len(runners))
|
||||
|
||||
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
|
||||
//we read from these channels in order to ensure we run the suites in order
|
||||
orderedCompilationOutputs := []chan compilationOutput{}
|
||||
for range runners {
|
||||
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
|
||||
}
|
||||
|
||||
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
|
||||
//we prefill the channel then close it, this ensures we compile things in the correct order
|
||||
workPool := make(chan compilationInput, len(runners))
|
||||
for i, runner := range runners {
|
||||
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
|
||||
}
|
||||
close(workPool)
|
||||
|
||||
//pick a reasonable numCompilers
|
||||
if numCompilers == 0 {
|
||||
numCompilers = runtime.NumCPU()
|
||||
}
|
||||
|
||||
//a WaitGroup to help us wait for all compilers to shut down
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(numCompilers)
|
||||
|
||||
//spin up the concurrent compilers
|
||||
for i := 0; i < numCompilers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for input := range workPool {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
|
||||
if willCompile != nil {
|
||||
willCompile(input.runner.Suite)
|
||||
}
|
||||
|
||||
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
||||
var err error
|
||||
retries := 0
|
||||
for retries <= 5 {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
if err = input.runner.Compile(); err == nil {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
}
|
||||
|
||||
input.result <- compilationOutput{input.runner, err}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
//read from the compilation output channels *in order* and send them to the caller
|
||||
//close the compilationOutputs channel to tell the caller we're done
|
||||
go func() {
|
||||
defer close(compilationOutputs)
|
||||
for _, orderedCompilationOutput := range orderedCompilationOutputs {
|
||||
select {
|
||||
case compilationOutput := <-orderedCompilationOutput:
|
||||
compilationOutputs <- compilationOutput
|
||||
case <-r.interruptHandler.C:
|
||||
//interrupt detected, wait for the compilers to shut down then bail
|
||||
//this ensure we clean up after ourselves as we don't leave any compilation processes running
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return compilationOutputs
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
||||
runResult := testrunner.PassingRunResult()
|
||||
|
||||
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
|
||||
|
||||
numSuitesThatRan := 0
|
||||
suitesThatFailed := []testsuite.TestSuite{}
|
||||
for compilationOutput := range compilationOutputs {
|
||||
if compilationOutput.err != nil {
|
||||
fmt.Print(compilationOutput.err.Error())
|
||||
}
|
||||
numSuitesThatRan++
|
||||
suiteRunResult := testrunner.FailingRunResult()
|
||||
if compilationOutput.err == nil {
|
||||
suiteRunResult = compilationOutput.runner.Run()
|
||||
}
|
||||
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
runResult = runResult.Merge(suiteRunResult)
|
||||
if !suiteRunResult.Passed {
|
||||
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
|
||||
if !keepGoing {
|
||||
break
|
||||
}
|
||||
}
|
||||
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
|
||||
fmt.Println("")
|
||||
}
|
||||
}
|
||||
|
||||
if keepGoing && !runResult.Passed {
|
||||
r.listFailedSuites(suitesThatFailed)
|
||||
}
|
||||
|
||||
return runResult, numSuitesThatRan
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
|
||||
fmt.Println("")
|
||||
fmt.Println("There were failures detected in the following suites:")
|
||||
|
||||
maxPackageNameLength := 0
|
||||
for _, suite := range suitesThatFailed {
|
||||
if len(suite.PackageName) > maxPackageNameLength {
|
||||
maxPackageNameLength = len(suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||
|
||||
for _, suite := range suitesThatFailed {
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
|
||||
} else {
|
||||
fmt.Fprintf(colorable.NewColorableStdout(), "\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
7
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build go1.10
|
||||
|
||||
package testrunner
|
||||
|
||||
var (
|
||||
buildArgs = []string{"test", "-c"}
|
||||
)
|
7
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !go1.10
|
||||
|
||||
package testrunner
|
||||
|
||||
var (
|
||||
buildArgs = []string{"test", "-c", "-i"}
|
||||
)
|
52
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
generated
vendored
Normal file
52
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type logWriter struct {
|
||||
buffer *bytes.Buffer
|
||||
lock *sync.Mutex
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func newLogWriter(target io.Writer, node int) *logWriter {
|
||||
return &logWriter{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *logWriter) Write(data []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
w.buffer.Write(data)
|
||||
contents := w.buffer.String()
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
for _, line := range lines[0 : len(lines)-1] {
|
||||
w.log.Println(line)
|
||||
}
|
||||
|
||||
w.buffer.Reset()
|
||||
w.buffer.Write([]byte(lines[len(lines)-1]))
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *logWriter) Close() error {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.buffer.Len() > 0 {
|
||||
w.log.Println(w.buffer.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
27
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
generated
vendored
Normal file
27
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package testrunner
|
||||
|
||||
type RunResult struct {
|
||||
Passed bool
|
||||
HasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func PassingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: true,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func FailingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: false,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (r RunResult) Merge(o RunResult) RunResult {
|
||||
return RunResult{
|
||||
Passed: r.Passed && o.Passed,
|
||||
HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
|
||||
}
|
||||
}
|
554
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
Normal file
554
vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
Normal file
|
@ -0,0 +1,554 @@
|
|||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type TestRunner struct {
|
||||
Suite testsuite.TestSuite
|
||||
|
||||
compiled bool
|
||||
compilationTargetPath string
|
||||
|
||||
numCPU int
|
||||
parallelStream bool
|
||||
timeout time.Duration
|
||||
goOpts map[string]interface{}
|
||||
additionalArgs []string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
CoverageFile string
|
||||
}
|
||||
|
||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner {
|
||||
runner := &TestRunner{
|
||||
Suite: suite,
|
||||
numCPU: numCPU,
|
||||
parallelStream: parallelStream,
|
||||
goOpts: goOpts,
|
||||
additionalArgs: additionalArgs,
|
||||
timeout: timeout,
|
||||
stderr: new(bytes.Buffer),
|
||||
}
|
||||
|
||||
if !suite.Precompiled {
|
||||
runner.compilationTargetPath, _ = filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
|
||||
}
|
||||
|
||||
return runner
|
||||
}
|
||||
|
||||
func (t *TestRunner) Compile() error {
|
||||
return t.CompileTo(t.compilationTargetPath)
|
||||
}
|
||||
|
||||
func (t *TestRunner) BuildArgs(path string) []string {
|
||||
args := make([]string, len(buildArgs), len(buildArgs)+3)
|
||||
copy(args, buildArgs)
|
||||
args = append(args, "-o", path, t.Suite.Path)
|
||||
|
||||
if t.getCoverMode() != "" {
|
||||
args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode()))
|
||||
} else {
|
||||
if t.shouldCover() || t.getCoverPackage() != "" {
|
||||
args = append(args, "-cover", "-covermode=atomic")
|
||||
}
|
||||
}
|
||||
|
||||
boolOpts := []string{
|
||||
"a",
|
||||
"n",
|
||||
"msan",
|
||||
"race",
|
||||
"x",
|
||||
"work",
|
||||
"linkshared",
|
||||
}
|
||||
|
||||
for _, opt := range boolOpts {
|
||||
if s, found := t.goOpts[opt].(*bool); found && *s {
|
||||
args = append(args, fmt.Sprintf("-%s", opt))
|
||||
}
|
||||
}
|
||||
|
||||
intOpts := []string{
|
||||
"memprofilerate",
|
||||
"blockprofilerate",
|
||||
}
|
||||
|
||||
for _, opt := range intOpts {
|
||||
if s, found := t.goOpts[opt].(*int); found {
|
||||
args = append(args, fmt.Sprintf("-%s=%d", opt, *s))
|
||||
}
|
||||
}
|
||||
|
||||
stringOpts := []string{
|
||||
"asmflags",
|
||||
"buildmode",
|
||||
"compiler",
|
||||
"gccgoflags",
|
||||
"installsuffix",
|
||||
"ldflags",
|
||||
"pkgdir",
|
||||
"toolexec",
|
||||
"coverprofile",
|
||||
"cpuprofile",
|
||||
"memprofile",
|
||||
"outputdir",
|
||||
"coverpkg",
|
||||
"tags",
|
||||
"gcflags",
|
||||
"vet",
|
||||
"mod",
|
||||
}
|
||||
|
||||
for _, opt := range stringOpts {
|
||||
if s, found := t.goOpts[opt].(*string); found && *s != "" {
|
||||
args = append(args, fmt.Sprintf("-%s=%s", opt, *s))
|
||||
}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func (t *TestRunner) CompileTo(path string) error {
|
||||
if t.compiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.Suite.Precompiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
args := t.BuildArgs(path)
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
if len(output) > 0 {
|
||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output)
|
||||
}
|
||||
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||
}
|
||||
|
||||
if len(output) > 0 {
|
||||
fmt.Println(string(output))
|
||||
}
|
||||
|
||||
if !fileExists(path) {
|
||||
compiledFile := t.Suite.PackageName + ".test"
|
||||
if fileExists(compiledFile) {
|
||||
// seems like we are on an old go version that does not support the -o flag on go test
|
||||
// move the compiled test file to the desired location by hand
|
||||
err = os.Rename(compiledFile, path)
|
||||
if err != nil {
|
||||
// We cannot move the file, perhaps because the source and destination
|
||||
// are on different partitions. We can copy the file, however.
|
||||
err = copyFile(compiledFile, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to copy compiled file: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
|
||||
}
|
||||
}
|
||||
|
||||
t.compiled = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil || !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// copyFile copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFile(src, dst string) error {
|
||||
srcInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := srcInfo.Mode()
|
||||
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer in.Close()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := out.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Chmod(mode)
|
||||
}
|
||||
|
||||
func (t *TestRunner) Run() RunResult {
|
||||
if t.Suite.IsGinkgo {
|
||||
if t.numCPU > 1 {
|
||||
if t.parallelStream {
|
||||
return t.runAndStreamParallelGinkgoSuite()
|
||||
} else {
|
||||
return t.runParallelGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runSerialGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runGoTestSuite()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestRunner) CleanUp() {
|
||||
if t.Suite.Precompiled {
|
||||
return
|
||||
}
|
||||
os.Remove(t.compilationTargetPath)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runGoTestSuite() RunResult {
|
||||
return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
writers[cpu] = newLogWriter(os.Stdout, cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
result := make(chan bool)
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
reports := make([]*bytes.Buffer, t.numCPU)
|
||||
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
||||
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
server.RegisterReporters(aggregator)
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
config.GinkgoConfig.StreamHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
reports[cpu] = &bytes.Buffer{}
|
||||
writers[cpu] = newLogWriter(reports[cpu], cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
//all test processes are done, at this point
|
||||
//we should be able to wait for the aggregator to tell us that it's done
|
||||
|
||||
select {
|
||||
case <-result:
|
||||
fmt.Println("")
|
||||
case <-time.After(time.Second):
|
||||
//the aggregator never got back to us! something must have gone wrong
|
||||
fmt.Println(`
|
||||
-------------------------------------------------------------------
|
||||
| |
|
||||
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||
| |
|
||||
-------------------------------------------------------------------`)
|
||||
fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path)
|
||||
os.Stdout.Sync()
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
for _, report := range reports {
|
||||
fmt.Print(report.String())
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
}
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
const CoverProfileSuffix = ".coverprofile"
|
||||
|
||||
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||
args := []string{"--test.timeout=" + t.timeout.String()}
|
||||
|
||||
coverProfile := t.getCoverProfile()
|
||||
|
||||
if t.shouldCombineCoverprofiles() {
|
||||
|
||||
testCoverProfile := "--test.coverprofile="
|
||||
|
||||
coverageFile := ""
|
||||
// Set default name for coverage results
|
||||
if coverProfile == "" {
|
||||
coverageFile = t.Suite.PackageName + CoverProfileSuffix
|
||||
} else {
|
||||
coverageFile = coverProfile
|
||||
}
|
||||
|
||||
testCoverProfile += coverageFile
|
||||
|
||||
t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile)
|
||||
|
||||
if t.numCPU > 1 {
|
||||
testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node)
|
||||
}
|
||||
args = append(args, testCoverProfile)
|
||||
}
|
||||
|
||||
args = append(args, ginkgoArgs...)
|
||||
args = append(args, t.additionalArgs...)
|
||||
|
||||
path := t.compilationTargetPath
|
||||
if t.Suite.Precompiled {
|
||||
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
cmd.Stderr = io.MultiWriter(stream, t.stderr)
|
||||
cmd.Stdout = stream
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldCover() bool {
|
||||
return *t.goOpts["cover"].(*bool)
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldRequireSuite() bool {
|
||||
return *t.goOpts["requireSuite"].(*bool)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverProfile() string {
|
||||
return *t.goOpts["coverprofile"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverPackage() string {
|
||||
return *t.goOpts["coverpkg"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) getCoverMode() string {
|
||||
return *t.goOpts["covermode"].(*string)
|
||||
}
|
||||
|
||||
func (t *TestRunner) shouldCombineCoverprofiles() bool {
|
||||
return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != ""
|
||||
}
|
||||
|
||||
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||
var res RunResult
|
||||
|
||||
defer func() {
|
||||
if completions != nil {
|
||||
completions <- res
|
||||
}
|
||||
}()
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
|
||||
return res
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
|
||||
if strings.Contains(t.stderr.String(), "warning: no tests to run") {
|
||||
if t.shouldRequireSuite() {
|
||||
res.Passed = false
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) combineCoverprofiles() {
|
||||
profiles := []string{}
|
||||
|
||||
coverProfile := t.getCoverProfile()
|
||||
|
||||
for cpu := 1; cpu <= t.numCPU; cpu++ {
|
||||
var coverFile string
|
||||
if coverProfile == "" {
|
||||
coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu)
|
||||
} else {
|
||||
coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu)
|
||||
}
|
||||
|
||||
coverFile = filepath.Join(t.Suite.Path, coverFile)
|
||||
coverProfile, err := ioutil.ReadFile(coverFile)
|
||||
os.Remove(coverFile)
|
||||
|
||||
if err == nil {
|
||||
profiles = append(profiles, string(coverProfile))
|
||||
}
|
||||
}
|
||||
|
||||
if len(profiles) != t.numCPU {
|
||||
return
|
||||
}
|
||||
|
||||
lines := map[string]int{}
|
||||
lineOrder := []string{}
|
||||
for i, coverProfile := range profiles {
|
||||
for _, line := range strings.Split(coverProfile, "\n")[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
components := strings.Split(line, " ")
|
||||
count, _ := strconv.Atoi(components[len(components)-1])
|
||||
prefix := strings.Join(components[0:len(components)-1], " ")
|
||||
lines[prefix] += count
|
||||
if i == 0 {
|
||||
lineOrder = append(lineOrder, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output := []string{"mode: atomic"}
|
||||
for _, line := range lineOrder {
|
||||
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
|
||||
}
|
||||
finalOutput := strings.Join(output, "\n")
|
||||
|
||||
finalFilename := ""
|
||||
|
||||
if coverProfile != "" {
|
||||
finalFilename = coverProfile
|
||||
} else {
|
||||
finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix)
|
||||
}
|
||||
|
||||
coverageFilepath := filepath.Join(t.Suite.Path, finalFilename)
|
||||
ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666)
|
||||
|
||||
t.CoverageFile = coverageFilepath
|
||||
}
|
115
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
Normal file
115
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
package testsuite
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TestSuite struct {
|
||||
Path string
|
||||
PackageName string
|
||||
IsGinkgo bool
|
||||
Precompiled bool
|
||||
}
|
||||
|
||||
func PrecompiledTestSuite(path string) (TestSuite, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".test" {
|
||||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
dir := relPath(filepath.Dir(path))
|
||||
packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
|
||||
|
||||
return TestSuite{
|
||||
Path: dir,
|
||||
PackageName: packageName,
|
||||
IsGinkgo: true,
|
||||
Precompiled: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SuitesInDir(dir string, recurse bool) []TestSuite {
|
||||
suites := []TestSuite{}
|
||||
|
||||
if vendorExperimentCheck(dir) {
|
||||
return suites
|
||||
}
|
||||
|
||||
files, _ := ioutil.ReadDir(dir)
|
||||
re := regexp.MustCompile(`^[^._].*_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, New(dir, files))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
|
||||
if string(dir[0]) != "." {
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
func New(dir string, files []os.FileInfo) TestSuite {
|
||||
return TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||
}
|
||||
}
|
||||
|
||||
func packageNameForSuite(dir string) string {
|
||||
path, _ := filepath.Abs(dir)
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
|
||||
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
16
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go
generated
vendored
Normal file
16
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// +build !go1.6
|
||||
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// "This change will only be enabled if the go command is run with
|
||||
// GO15VENDOREXPERIMENT=1 in its environment."
|
||||
// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC
|
||||
func vendorExperimentCheck(dir string) bool {
|
||||
vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
|
||||
return vendorExperiment == "1" && path.Base(dir) == "vendor"
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// +build go1.6
|
||||
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// in 1.6 the vendor directory became the default go behaviour, so now
|
||||
// check if its disabled.
|
||||
func vendorExperimentCheck(dir string) bool {
|
||||
vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT")
|
||||
return vendorExperiment != "0" && path.Base(dir) == "vendor"
|
||||
}
|
180
vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
Normal file
180
vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func BuildUnfocusCommand() *Command {
|
||||
return &Command{
|
||||
Name: "unfocus",
|
||||
AltName: "blur",
|
||||
FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo unfocus (or ginkgo blur)",
|
||||
Usage: []string{
|
||||
"Recursively unfocuses any focused tests under the current directory",
|
||||
},
|
||||
Command: unfocusSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
func unfocusSpecs([]string, []string) {
|
||||
fmt.Println("Scanning for focus...")
|
||||
|
||||
goFiles := make(chan string)
|
||||
go func() {
|
||||
unfocusDir(goFiles, ".")
|
||||
close(goFiles)
|
||||
}()
|
||||
|
||||
const workers = 10
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
for path := range goFiles {
|
||||
unfocusFile(path)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func unfocusDir(goFiles chan string, path string) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
switch {
|
||||
case f.IsDir() && shouldProcessDir(f.Name()):
|
||||
unfocusDir(goFiles, filepath.Join(path, f.Name()))
|
||||
case !f.IsDir() && shouldProcessFile(f.Name()):
|
||||
goFiles <- filepath.Join(path, f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func shouldProcessDir(basename string) bool {
|
||||
return basename != "vendor" && !strings.HasPrefix(basename, ".")
|
||||
}
|
||||
|
||||
func shouldProcessFile(basename string) bool {
|
||||
return strings.HasSuffix(basename, ".go")
|
||||
}
|
||||
|
||||
func unfocusFile(path string) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
fmt.Printf("error reading file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), 0)
|
||||
if err != nil {
|
||||
fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
eliminations := scanForFocus(ast)
|
||||
if len(eliminations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("...updating %s\n", path)
|
||||
backup, err := writeBackup(path, data)
|
||||
if err != nil {
|
||||
fmt.Printf("error creating backup file: %s\n", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := updateFile(path, data, eliminations); err != nil {
|
||||
fmt.Printf("error writing file '%s': %s\n", path, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
os.Remove(backup)
|
||||
}
|
||||
|
||||
func writeBackup(path string, data []byte) (string, error) {
|
||||
t, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path))
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating temporary file: %w", err)
|
||||
}
|
||||
defer t.Close()
|
||||
|
||||
if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
|
||||
return "", fmt.Errorf("error writing to temporary file: %w", err)
|
||||
}
|
||||
|
||||
return t.Name(), nil
|
||||
}
|
||||
|
||||
func updateFile(path string, data []byte, eliminations []int64) error {
|
||||
to, err := os.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
|
||||
}
|
||||
defer to.Close()
|
||||
|
||||
from := bytes.NewReader(data)
|
||||
var cursor int64
|
||||
for _, byteToEliminate := range eliminations {
|
||||
if _, err := io.CopyN(to, from, byteToEliminate-cursor); err != nil {
|
||||
return fmt.Errorf("error copying data: %w", err)
|
||||
}
|
||||
|
||||
cursor = byteToEliminate + 1
|
||||
|
||||
if _, err := from.Seek(1, io.SeekCurrent); err != nil {
|
||||
return fmt.Errorf("error seeking to position in buffer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(to, from); err != nil {
|
||||
return fmt.Errorf("error copying end data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanForFocus(file *ast.File) (eliminations []int64) {
|
||||
ast.Inspect(file, func(n ast.Node) bool {
|
||||
if c, ok := n.(*ast.CallExpr); ok {
|
||||
if i, ok := c.Fun.(*ast.Ident); ok {
|
||||
if isFocus(i.Name) {
|
||||
eliminations = append(eliminations, int64(i.Pos()-file.Pos()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return eliminations
|
||||
}
|
||||
|
||||
func isFocus(name string) bool {
|
||||
switch name {
|
||||
case "FDescribe", "FContext", "FIt", "FMeasure", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
24
vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
Normal file
24
vendor/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
func BuildVersionCommand() *Command {
|
||||
return &Command{
|
||||
Name: "version",
|
||||
FlagSet: flag.NewFlagSet("version", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo version",
|
||||
Usage: []string{
|
||||
"Print Ginkgo's version",
|
||||
},
|
||||
Command: printVersion,
|
||||
}
|
||||
}
|
||||
|
||||
func printVersion([]string, []string) {
|
||||
fmt.Printf("Ginkgo Version %s\n", config.VERSION)
|
||||
}
|
22
vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go
generated
vendored
Normal file
22
vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package watch
|
||||
|
||||
import "sort"
|
||||
|
||||
type Delta struct {
|
||||
ModifiedPackages []string
|
||||
|
||||
NewSuites []*Suite
|
||||
RemovedSuites []*Suite
|
||||
modifiedSuites []*Suite
|
||||
}
|
||||
|
||||
type DescendingByDelta []*Suite
|
||||
|
||||
func (a DescendingByDelta) Len() int { return len(a) }
|
||||
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||
|
||||
func (d Delta) ModifiedSuites() []*Suite {
|
||||
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||
return d.modifiedSuites
|
||||
}
|
75
vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
75
vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"regexp"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type SuiteErrors map[testsuite.TestSuite]error
|
||||
|
||||
type DeltaTracker struct {
|
||||
maxDepth int
|
||||
watchRegExp *regexp.Regexp
|
||||
suites map[string]*Suite
|
||||
packageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
|
||||
return &DeltaTracker{
|
||||
maxDepth: maxDepth,
|
||||
watchRegExp: watchRegExp,
|
||||
packageHashes: NewPackageHashes(watchRegExp),
|
||||
suites: map[string]*Suite{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
|
||||
errors = SuiteErrors{}
|
||||
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||
|
||||
providedSuitePaths := map[string]bool{}
|
||||
for _, suite := range suites {
|
||||
providedSuitePaths[suite.Path] = true
|
||||
}
|
||||
|
||||
d.packageHashes.StartTrackingUsage()
|
||||
|
||||
for _, suite := range d.suites {
|
||||
if providedSuitePaths[suite.Suite.Path] {
|
||||
if suite.Delta() > 0 {
|
||||
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||
}
|
||||
} else {
|
||||
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||
}
|
||||
}
|
||||
|
||||
d.packageHashes.StopTrackingUsageAndPrune()
|
||||
|
||||
for _, suite := range suites {
|
||||
_, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||
if err != nil {
|
||||
errors[suite] = err
|
||||
continue
|
||||
}
|
||||
d.suites[suite.Path] = s
|
||||
delta.NewSuites = append(delta.NewSuites, s)
|
||||
}
|
||||
}
|
||||
|
||||
return delta, errors
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
|
||||
s, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||
}
|
||||
|
||||
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package watch
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||
var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
|
||||
|
||||
type Dependencies struct {
|
||||
deps map[string]int
|
||||
}
|
||||
|
||||
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||
d := Dependencies{
|
||||
deps: map[string]int{},
|
||||
}
|
||||
|
||||
if maxDepth == 0 {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
err := d.seedWithDepsForPackageAtPath(path)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
for depth := 1; depth < maxDepth; depth++ {
|
||||
n := len(d.deps)
|
||||
d.addDepsForDepth(depth)
|
||||
if n == len(d.deps) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d Dependencies) Dependencies() map[string]int {
|
||||
return d.deps
|
||||
}
|
||||
|
||||
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.resolveAndAdd(pkg.Imports, 1)
|
||||
d.resolveAndAdd(pkg.TestImports, 1)
|
||||
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||
|
||||
delete(d.deps, pkg.Dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDepth(depth int) {
|
||||
for dep, depDepth := range d.deps {
|
||||
if depDepth == depth {
|
||||
d.addDepsForDep(dep, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||
pkg, err := build.ImportDir(dep, 0)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
}
|
||||
d.resolveAndAdd(pkg.Imports, depth)
|
||||
}
|
||||
|
||||
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||
for _, dep := range deps {
|
||||
pkg, err := build.Import(dep, ".", 0)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) {
|
||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||
_, ok := d.deps[dep]
|
||||
if !ok {
|
||||
d.deps[dep] = depth
|
||||
}
|
||||
}
|
104
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
Normal file
104
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||
|
||||
type PackageHash struct {
|
||||
CodeModifiedTime time.Time
|
||||
TestModifiedTime time.Time
|
||||
Deleted bool
|
||||
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
watchRegExp *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
|
||||
p := &PackageHash{
|
||||
path: path,
|
||||
watchRegExp: watchRegExp,
|
||||
}
|
||||
|
||||
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PackageHash) CheckForChanges() bool {
|
||||
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||
|
||||
if deleted {
|
||||
if !p.Deleted {
|
||||
t := time.Now()
|
||||
p.CodeModifiedTime = t
|
||||
p.TestModifiedTime = t
|
||||
}
|
||||
p.Deleted = true
|
||||
return true
|
||||
}
|
||||
|
||||
modified := false
|
||||
p.Deleted = false
|
||||
|
||||
if p.codeHash != codeHash {
|
||||
p.CodeModifiedTime = codeModifiedTime
|
||||
modified = true
|
||||
}
|
||||
if p.testHash != testHash {
|
||||
p.TestModifiedTime = testModifiedTime
|
||||
modified = true
|
||||
}
|
||||
|
||||
p.codeHash = codeHash
|
||||
p.testHash = testHash
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||
infos, err := ioutil.ReadDir(p.path)
|
||||
|
||||
if err != nil {
|
||||
deleted = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, info := range infos {
|
||||
if info.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.Match([]byte(info.Name())) {
|
||||
testHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(testModifiedTime) {
|
||||
testModifiedTime = info.ModTime()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if p.watchRegExp.Match([]byte(info.Name())) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testHash += codeHash
|
||||
if codeModifiedTime.After(testModifiedTime) {
|
||||
testModifiedTime = codeModifiedTime
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||
}
|
85
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
85
vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
package watch
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PackageHashes struct {
|
||||
PackageHashes map[string]*PackageHash
|
||||
usedPaths map[string]bool
|
||||
watchRegExp *regexp.Regexp
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
|
||||
return &PackageHashes{
|
||||
PackageHashes: map[string]*PackageHash{},
|
||||
usedPaths: nil,
|
||||
watchRegExp: watchRegExp,
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) CheckForChanges() []string {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
modified := []string{}
|
||||
|
||||
for _, packageHash := range p.PackageHashes {
|
||||
if packageHash.CheckForChanges() {
|
||||
modified = append(modified, packageHash.path)
|
||||
}
|
||||
}
|
||||
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
_, ok := p.PackageHashes[path]
|
||||
if !ok {
|
||||
p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
|
||||
}
|
||||
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StartTrackingUsage() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.usedPaths = map[string]bool{}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for path := range p.PackageHashes {
|
||||
if !p.usedPaths[path] {
|
||||
delete(p.PackageHashes, path)
|
||||
}
|
||||
}
|
||||
|
||||
p.usedPaths = nil
|
||||
}
|
87
vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
generated
vendored
Normal file
87
vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Suite struct {
|
||||
Suite testsuite.TestSuite
|
||||
RunTime time.Time
|
||||
Dependencies Dependencies
|
||||
|
||||
sharedPackageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedPackageHashes.Add(suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
return &Suite{
|
||||
Suite: suite,
|
||||
Dependencies: deps,
|
||||
|
||||
sharedPackageHashes: sharedPackageHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Suite) Delta() float64 {
|
||||
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||
for dep, depth := range s.Dependencies.Dependencies() {
|
||||
delta += s.delta(dep, false, depth)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||
s.RunTime = time.Now()
|
||||
|
||||
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
s.sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
s.Dependencies = deps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) Description() string {
|
||||
numDeps := len(s.Dependencies.Dependencies())
|
||||
pluralizer := "ies"
|
||||
if numDeps == 1 {
|
||||
pluralizer = "y"
|
||||
}
|
||||
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||
}
|
||||
|
||||
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||
}
|
||||
|
||||
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||
var modifiedTime time.Time
|
||||
if includeTests {
|
||||
modifiedTime = packageHash.TestModifiedTime
|
||||
} else {
|
||||
modifiedTime = packageHash.CodeModifiedTime
|
||||
}
|
||||
|
||||
return modifiedTime.Sub(s.RunTime)
|
||||
}
|
175
vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
Normal file
175
vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
Normal file
|
@ -0,0 +1,175 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/ginkgo/watch"
|
||||
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||
)
|
||||
|
||||
func BuildWatchCommand() *Command {
|
||||
commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
notifier := NewNotifier(commandFlags)
|
||||
watcher := &SpecWatcher{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "watch",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
},
|
||||
Command: watcher.WatchSpecs,
|
||||
SuppressFlagDocumentation: true,
|
||||
FlagDocSubstitute: []string{
|
||||
"Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecWatcher struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||
w.commandFlags.computeNodes()
|
||||
w.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
w.WatchSuites(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
|
||||
runners := []*testrunner.TestRunner{}
|
||||
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs))
|
||||
}
|
||||
|
||||
return runners
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
|
||||
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
|
||||
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth, regexp.MustCompile(w.commandFlags.WatchRegExp))
|
||||
delta, errors := deltaTracker.Delta(suites)
|
||||
|
||||
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
|
||||
for suite, err := range errors {
|
||||
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||
}
|
||||
|
||||
if len(suites) == 1 {
|
||||
runners := w.runnersForSuites(suites, additionalArgs)
|
||||
w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
|
||||
runners[0].CleanUp()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||
delta, _ := deltaTracker.Delta(suites)
|
||||
coloredStream := colorable.NewColorableStdout()
|
||||
|
||||
suitesToRun := []testsuite.TestSuite{}
|
||||
|
||||
if len(delta.NewSuites) > 0 {
|
||||
fmt.Fprintf(coloredStream, greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
suitesToRun = append(suitesToRun, suite.Suite)
|
||||
fmt.Fprintln(coloredStream, " "+suite.Description())
|
||||
}
|
||||
}
|
||||
|
||||
modifiedSuites := delta.ModifiedSuites()
|
||||
if len(modifiedSuites) > 0 {
|
||||
fmt.Fprintln(coloredStream, greenColor+"\nDetected changes in:"+defaultStyle)
|
||||
for _, pkg := range delta.ModifiedPackages {
|
||||
fmt.Fprintln(coloredStream, " "+pkg)
|
||||
}
|
||||
fmt.Fprintf(coloredStream, greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
|
||||
for _, suite := range modifiedSuites {
|
||||
suitesToRun = append(suitesToRun, suite.Suite)
|
||||
fmt.Fprintln(coloredStream, " "+suite.Description())
|
||||
}
|
||||
fmt.Fprintln(coloredStream, "")
|
||||
}
|
||||
|
||||
if len(suitesToRun) > 0 {
|
||||
w.UpdateSeed()
|
||||
w.ComputeSuccinctMode(len(suitesToRun))
|
||||
runners := w.runnersForSuites(suitesToRun, additionalArgs)
|
||||
result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
|
||||
deltaTracker.WillRun(suite)
|
||||
})
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
if !w.interruptHandler.WasInterrupted() {
|
||||
color := redColor
|
||||
if result.Passed {
|
||||
color = greenColor
|
||||
}
|
||||
fmt.Fprintln(coloredStream, color+"\nDone. Resuming watch..."+defaultStyle)
|
||||
}
|
||||
}
|
||||
|
||||
case <-w.interruptHandler.C:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if w.commandFlags.wasSet("succinct") {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
}
|
||||
|
||||
if numSuites > 1 {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) UpdateSeed() {
|
||||
if !w.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package codelocation
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func New(skip int) types.CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
// PruneStack removes references to functions that are internal to Ginkgo
|
||||
// and the Go runtime from a stack string and a certain number of stack entries
|
||||
// at the beginning of the stack. The stack string has the format
|
||||
// as returned by runtime/debug.Stack. The leading goroutine information is
|
||||
// optional and always removed if present. Beware that runtime/debug.Stack
|
||||
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
||||
// entry.
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
// Ensure that the even entries are the method names and the
|
||||
// the odd entries the source code information.
|
||||
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||
// Ignore "goroutine 29 [running]:" line.
|
||||
stack = stack[1:]
|
||||
}
|
||||
// The "+1" is for skipping over the initial entry, which is
|
||||
// runtime/debug.Stack() itself.
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
// We filter out based on the source code file name.
|
||||
if !re.Match([]byte(stack[i*2+1])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package containernode
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type subjectOrContainerNode struct {
|
||||
containerNode *ContainerNode
|
||||
subjectNode leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
func (n subjectOrContainerNode) text() string {
|
||||
if n.containerNode != nil {
|
||||
return n.containerNode.Text()
|
||||
} else {
|
||||
return n.subjectNode.Text()
|
||||
}
|
||||
}
|
||||
|
||||
type CollatedNodes struct {
|
||||
Containers []*ContainerNode
|
||||
Subject leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
type ContainerNode struct {
|
||||
text string
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
|
||||
setupNodes []leafnodes.BasicNode
|
||||
subjectAndContainerNodes []subjectOrContainerNode
|
||||
}
|
||||
|
||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||
return &ContainerNode{
|
||||
text: text,
|
||||
flag: flag,
|
||||
codeLocation: codeLocation,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(container)
|
||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||
for i, j := range permutation {
|
||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||
}
|
||||
container.subjectAndContainerNodes = shuffledNodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||
if node.flag == types.FlagTypePending {
|
||||
return false
|
||||
}
|
||||
|
||||
shouldUnfocus := false
|
||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainerNode.containerNode != nil {
|
||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||
} else {
|
||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||
}
|
||||
}
|
||||
|
||||
if shouldUnfocus {
|
||||
if node.flag == types.FlagTypeFocused {
|
||||
node.flag = types.FlagTypeNone
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return node.flag == types.FlagTypeFocused
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||
return node.collate([]*ContainerNode{})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||
collated := make([]CollatedNodes, 0)
|
||||
|
||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||
copy(containers, enclosingContainers)
|
||||
containers = append(containers, node)
|
||||
|
||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainer.containerNode != nil {
|
||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||
} else {
|
||||
collated = append(collated, CollatedNodes{
|
||||
Containers: containers,
|
||||
Subject: subjectOrContainer.subjectNode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return collated
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||
node.setupNodes = append(node.setupNodes, setupNode)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||
nodes := []leafnodes.BasicNode{}
|
||||
for _, setupNode := range node.setupNodes {
|
||||
if setupNode.Type() == nodeType {
|
||||
nodes = append(nodes, setupNode)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||
return node.codeLocation
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (node *ContainerNode) Len() int {
|
||||
return len(node.subjectAndContainerNodes)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Less(i, j int) bool {
|
||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Swap(i, j int) {
|
||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package failer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Failer struct {
|
||||
lock *sync.Mutex
|
||||
failure types.SpecFailure
|
||||
state types.SpecState
|
||||
}
|
||||
|
||||
func New() *Failer {
|
||||
return &Failer{
|
||||
lock: &sync.Mutex{},
|
||||
state: types.SpecStatePassed,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStatePanicked
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Test Panicked",
|
||||
Location: location,
|
||||
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateTimedOut
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Timed out",
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateFailed
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
failure := f.failure
|
||||
outcome := f.state
|
||||
if outcome != types.SpecStatePassed {
|
||||
failure.ComponentType = componentType
|
||||
failure.ComponentIndex = componentIndex
|
||||
failure.ComponentCodeLocation = componentCodeLocation
|
||||
}
|
||||
|
||||
f.state = types.SpecStatePassed
|
||||
f.failure = types.SpecFailure{}
|
||||
|
||||
return failure, outcome
|
||||
}
|
||||
|
||||
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateSkipped
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type benchmarker struct {
|
||||
mu sync.Mutex
|
||||
measurements map[string]*types.SpecMeasurement
|
||||
orderCounter int
|
||||
}
|
||||
|
||||
func newBenchmarker() *benchmarker {
|
||||
return &benchmarker{
|
||||
measurements: make(map[string]*types.SpecMeasurement),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||
t := time.Now()
|
||||
body()
|
||||
elapsedTime = time.Since(t)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
|
||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
|
||||
b.mu.Lock()
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
|
||||
measurement, ok := b.measurements[name]
|
||||
if !ok {
|
||||
var computedInfo interface{}
|
||||
computedInfo = nil
|
||||
if len(info) > 0 {
|
||||
computedInfo = info[0]
|
||||
}
|
||||
measurement = &types.SpecMeasurement{
|
||||
Name: name,
|
||||
Info: computedInfo,
|
||||
Order: b.orderCounter,
|
||||
SmallestLabel: smallestLabel,
|
||||
LargestLabel: largestLabel,
|
||||
AverageLabel: averageLabel,
|
||||
Units: units,
|
||||
Precision: precision,
|
||||
Results: make([]float64, 0),
|
||||
}
|
||||
b.measurements[name] = measurement
|
||||
b.orderCounter++
|
||||
}
|
||||
|
||||
return measurement
|
||||
}
|
||||
|
||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, measurement := range b.measurements {
|
||||
measurement.Smallest = math.MaxFloat64
|
||||
measurement.Largest = -math.MaxFloat64
|
||||
sum := float64(0)
|
||||
sumOfSquares := float64(0)
|
||||
|
||||
for _, result := range measurement.Results {
|
||||
if result > measurement.Largest {
|
||||
measurement.Largest = result
|
||||
}
|
||||
if result < measurement.Smallest {
|
||||
measurement.Smallest = result
|
||||
}
|
||||
sum += result
|
||||
sumOfSquares += result * result
|
||||
}
|
||||
|
||||
n := float64(len(measurement.Results))
|
||||
measurement.Average = sum / n
|
||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||
}
|
||||
|
||||
return b.measurements
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type BasicNode interface {
|
||||
Type() types.SpecComponentType
|
||||
Run() (types.SpecState, types.SpecFailure)
|
||||
CodeLocation() types.CodeLocation
|
||||
}
|
||||
|
||||
type SubjectNode interface {
|
||||
BasicNode
|
||||
|
||||
Text() string
|
||||
Flag() types.FlagType
|
||||
Samples() int
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
runner *runner
|
||||
|
||||
flag types.FlagType
|
||||
text string
|
||||
}
|
||||
|
||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||
return &ItNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||
flag: flag,
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *ItNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeIt
|
||||
}
|
||||
|
||||
func (node *ItNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ItNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *ItNode) Samples() int {
|
||||
return 1
|
||||
}
|
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
runner *runner
|
||||
|
||||
text string
|
||||
flag types.FlagType
|
||||
samples int
|
||||
benchmarker *benchmarker
|
||||
}
|
||||
|
||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||
benchmarker := newBenchmarker()
|
||||
|
||||
wrappedBody := func() {
|
||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||
}
|
||||
|
||||
return &MeasureNode{
|
||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||
|
||||
text: text,
|
||||
flag: flag,
|
||||
samples: samples,
|
||||
benchmarker: benchmarker,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||
return node.benchmarker.measurementsReport()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Samples() int {
|
||||
return node.samples
|
||||
}
|
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
isAsync bool
|
||||
asyncFunc func(chan<- interface{})
|
||||
syncFunc func()
|
||||
codeLocation types.CodeLocation
|
||||
timeoutThreshold time.Duration
|
||||
nodeType types.SpecComponentType
|
||||
componentIndex int
|
||||
failer *failer.Failer
|
||||
}
|
||||
|
||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||
bodyType := reflect.TypeOf(body)
|
||||
if bodyType.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||
}
|
||||
|
||||
runner := &runner{
|
||||
codeLocation: codeLocation,
|
||||
timeoutThreshold: timeout,
|
||||
failer: failer,
|
||||
nodeType: nodeType,
|
||||
componentIndex: componentIndex,
|
||||
}
|
||||
|
||||
switch bodyType.NumIn() {
|
||||
case 0:
|
||||
runner.syncFunc = body.(func())
|
||||
return runner
|
||||
case 1:
|
||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
wrappedBody := func(done chan<- interface{}) {
|
||||
bodyValue := reflect.ValueOf(body)
|
||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
}
|
||||
|
||||
runner.isAsync = true
|
||||
runner.asyncFunc = wrappedBody
|
||||
return runner
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
if r.isAsync {
|
||||
return r.runAsync()
|
||||
} else {
|
||||
return r.runSync()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
go func() {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r.asyncFunc(done)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
// If this goroutine gets no CPU time before the select block,
|
||||
// the <-done case may complete even if the test took longer than the timeoutThreshold.
|
||||
// This can cause flaky behaviour, but we haven't seen it in the wild.
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
r.failer.Timeout(r.codeLocation)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
return
|
||||
}
|
||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
}()
|
||||
|
||||
r.syncFunc()
|
||||
finished = true
|
||||
|
||||
return
|
||||
}
|
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
runner *runner
|
||||
}
|
||||
|
||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *SetupNode) Type() types.SpecComponentType {
|
||||
return node.runner.nodeType
|
||||
}
|
||||
|
||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
|
||||
}
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||
Passed() bool
|
||||
Summary() *types.SetupSummary
|
||||
}
|
||||
|
||||
type simpleSuiteNode struct {
|
||||
runner *runner
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
node.outcome, node.failure = node.runner.run()
|
||||
node.runTime = time.Since(t)
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runner.nodeType,
|
||||
CodeLocation: node.runner.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &synchronizedAfterSuiteNode{
|
||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
node.outcome, node.failure = node.runnerA.run()
|
||||
|
||||
if parallelNode == 1 {
|
||||
if parallelTotal > 1 {
|
||||
node.waitUntilOtherNodesAreDone(syncHost)
|
||||
}
|
||||
|
||||
outcome, failure := node.runnerB.run()
|
||||
|
||||
if node.outcome == types.SpecStatePassed {
|
||||
node.outcome, node.failure = outcome, failure
|
||||
}
|
||||
}
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||
for {
|
||||
if node.canRun(syncHost) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||
err = json.Unmarshal(body, &afterSuiteData)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return afterSuiteData.CanRun
|
||||
}
|
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
|
@ -0,0 +1,181 @@
|
|||
package leafnodes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
data []byte
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
node := &synchronizedBeforeSuiteNode{}
|
||||
|
||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
node.runTime = time.Since(t)
|
||||
}()
|
||||
|
||||
if parallelNode == 1 {
|
||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||
} else {
|
||||
node.outcome, node.failure = node.waitForA(syncHost)
|
||||
}
|
||||
|
||||
if node.outcome != types.SpecStatePassed {
|
||||
return false
|
||||
}
|
||||
node.outcome, node.failure = node.runnerB.run()
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
outcome, failure := node.runnerA.run()
|
||||
|
||||
if parallelTotal > 1 {
|
||||
state := types.RemoteBeforeSuiteStatePassed
|
||||
if outcome != types.SpecStatePassed {
|
||||
state = types.RemoteBeforeSuiteStateFailed
|
||||
}
|
||||
json := (types.RemoteBeforeSuiteData{
|
||||
Data: node.data,
|
||||
State: state,
|
||||
}).ToJSON()
|
||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||
}
|
||||
|
||||
return outcome, failure
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
failure := func(message string) types.SpecFailure {
|
||||
return types.SpecFailure{
|
||||
Message: message,
|
||||
Location: node.runnerA.codeLocation,
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
ComponentIndex: node.runnerA.componentIndex,
|
||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||
}
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||
err = json.Unmarshal(body, &beforeSuiteData)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||
}
|
||||
|
||||
switch beforeSuiteData.State {
|
||||
case types.RemoteBeforeSuiteStatePassed:
|
||||
node.data = beforeSuiteData.Data
|
||||
return types.SpecStatePassed, types.SpecFailure{}
|
||||
case types.RemoteBeforeSuiteStateFailed:
|
||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||
case types.RemoteBeforeSuiteStateDisappeared:
|
||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||
typeA := reflect.TypeOf(bodyA)
|
||||
if typeA.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||
}
|
||||
|
||||
takesNothing := typeA.NumIn() == 0
|
||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||
|
||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||
}
|
||||
|
||||
if takesADoneChannel {
|
||||
return func(done chan<- interface{}) {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||
typeB := reflect.TypeOf(bodyB)
|
||||
if typeB.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||
}
|
||||
|
||||
returnsNothing := typeB.NumOut() == 0
|
||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||
|
||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||
}
|
||||
|
||||
if takesBytesAndDone {
|
||||
return func(done chan<- interface{}) {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||
}
|
||||
}
|
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
|
@ -0,0 +1,249 @@
|
|||
/*
|
||||
|
||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
where N is the number of nodes you desire.
|
||||
*/
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type configAndSuite struct {
|
||||
config config.GinkgoConfigType
|
||||
summary *types.SuiteSummary
|
||||
}
|
||||
|
||||
type Aggregator struct {
|
||||
nodeCount int
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
result chan bool
|
||||
|
||||
suiteBeginnings chan configAndSuite
|
||||
aggregatedSuiteBeginnings []configAndSuite
|
||||
|
||||
beforeSuites chan *types.SetupSummary
|
||||
aggregatedBeforeSuites []*types.SetupSummary
|
||||
|
||||
afterSuites chan *types.SetupSummary
|
||||
aggregatedAfterSuites []*types.SetupSummary
|
||||
|
||||
specCompletions chan *types.SpecSummary
|
||||
completedSpecs []*types.SpecSummary
|
||||
|
||||
suiteEndings chan *types.SuiteSummary
|
||||
aggregatedSuiteEndings []*types.SuiteSummary
|
||||
specs []*types.SpecSummary
|
||||
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||
aggregator := &Aggregator{
|
||||
nodeCount: nodeCount,
|
||||
result: result,
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
|
||||
suiteBeginnings: make(chan configAndSuite),
|
||||
beforeSuites: make(chan *types.SetupSummary),
|
||||
afterSuites: make(chan *types.SetupSummary),
|
||||
specCompletions: make(chan *types.SpecSummary),
|
||||
suiteEndings: make(chan *types.SuiteSummary),
|
||||
}
|
||||
|
||||
go aggregator.mux()
|
||||
|
||||
return aggregator
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.beforeSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.afterSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
//noop
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
aggregator.specCompletions <- specSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
aggregator.suiteEndings <- summary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) mux() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||
aggregator.registerSuiteBeginning(configAndSuite)
|
||||
case setupSummary := <-aggregator.beforeSuites:
|
||||
aggregator.registerBeforeSuite(setupSummary)
|
||||
case setupSummary := <-aggregator.afterSuites:
|
||||
aggregator.registerAfterSuite(setupSummary)
|
||||
case specSummary := <-aggregator.specCompletions:
|
||||
aggregator.registerSpecCompletion(specSummary)
|
||||
case suite := <-aggregator.suiteEndings:
|
||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||
if finished {
|
||||
aggregator.result <- passed
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||
aggregator.startTime = time.Now()
|
||||
}
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
totalNumberOfSpecs := 0
|
||||
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
|
||||
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||
aggregator.specs = append(aggregator.specs, specSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||
aggregator.announceBeforeSuite(setupSummary)
|
||||
}
|
||||
|
||||
for _, specSummary := range aggregator.completedSpecs {
|
||||
aggregator.announceSpec(specSummary)
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||
aggregator.announceAfterSuite(setupSummary)
|
||||
}
|
||||
|
||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||
aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||
} else {
|
||||
aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
|
||||
}
|
||||
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||
return false, false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||
|
||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||
if !suiteSummary.SuiteSucceeded {
|
||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||
|
||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||
|
||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||
}
|
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//An interface to net/http's client to allow the injection of fakes under test
|
||||
type Poster interface {
|
||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
/*
|
||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||
a Ginkgo remote server.
|
||||
|
||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||
|
||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||
in place of Ginkgo's DefaultReporter.
|
||||
*/
|
||||
|
||||
type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
debugMode bool
|
||||
debugFile *os.File
|
||||
nestedReporter *reporters.DefaultReporter
|
||||
}
|
||||
|
||||
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
|
||||
reporter := &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
|
||||
if debugFile != "" {
|
||||
var err error
|
||||
reporter.debugMode = true
|
||||
reporter.debugFile, err = os.Create(debugFile)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !config.Verbose {
|
||||
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
|
||||
ginkgoWriter.AndRedirectTo(reporter.debugFile)
|
||||
}
|
||||
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
|
||||
|
||||
stenographer := stenographer.New(false, true, reporter.debugFile)
|
||||
config.Succinct = false
|
||||
config.Verbose = true
|
||||
config.FullTrace = true
|
||||
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
|
||||
}
|
||||
|
||||
return reporter
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
encoded, _ := json.Marshal(data)
|
||||
buffer := bytes.NewBuffer(encoded)
|
||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
data := struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}{
|
||||
conf,
|
||||
summary,
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecWillRun(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecDidComplete(specSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
if reporter.debugMode {
|
||||
reporter.nestedReporter.SpecSuiteDidEnd(summary)
|
||||
reporter.debugFile.Sync()
|
||||
}
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
package remote
|
||||
|
||||
import "os"
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
*/
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
StreamTo(*os.File)
|
||||
}
|
82
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
82
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/nxadm/tail"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
streamTarget *os.File
|
||||
intercepting bool
|
||||
tailer *tail.Tail
|
||||
doneTailing chan bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
var err error
|
||||
|
||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This might call Dup3 if the dup2 syscall is not available, e.g. on
|
||||
// linux/arm64 or linux/riscv64
|
||||
unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
||||
unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
|
||||
interceptor.doneTailing = make(chan bool)
|
||||
|
||||
go func() {
|
||||
for line := range interceptor.tailer.Lines {
|
||||
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
|
||||
}
|
||||
close(interceptor.doneTailing)
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
if !interceptor.intercepting {
|
||||
return "", errors.New("Not intercepting output!")
|
||||
}
|
||||
|
||||
interceptor.redirectFile.Close()
|
||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||
os.Remove(interceptor.redirectFile.Name())
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
if interceptor.streamTarget != nil {
|
||||
interceptor.tailer.Stop()
|
||||
interceptor.tailer.Cleanup()
|
||||
<-interceptor.doneTailing
|
||||
interceptor.streamTarget.Sync()
|
||||
}
|
||||
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
|
||||
interceptor.streamTarget = out
|
||||
}
|
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
// +build windows
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
// not working on windows...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
// not working on windows...
|
||||
interceptor.intercepting = false
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StreamTo(*os.File) {}
|
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
/*
|
||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type Server struct {
|
||||
listener net.Listener
|
||||
reporters []reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
counter int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *Server) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
mux.HandleFunc("/counter", server.handleCounter)
|
||||
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *Server) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *Server) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *Server) readAll(request *http.Request) []byte {
|
||||
defer request.Body.Close()
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||
server.reporters = reporters
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
|
||||
var data struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}
|
||||
|
||||
json.Unmarshal(body, &data)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var suiteSummary *types.SuiteSummary
|
||||
json.Unmarshal(body, &suiteSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Synchronization Endpoints
|
||||
//
|
||||
|
||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
server.alives[node-1] = alive
|
||||
}
|
||||
|
||||
func (server *Server) nodeIsAlive(node int) bool {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
alive := server.alives[node-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "POST" {
|
||||
dec := json.NewDecoder(request.Body)
|
||||
dec.Decode(&(server.beforeSuiteData))
|
||||
} else {
|
||||
beforeSuiteData := server.beforeSuiteData
|
||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||
}
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(beforeSuiteData)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||
afterSuiteData := types.RemoteAfterSuiteData{
|
||||
CanRun: true,
|
||||
}
|
||||
for i := 2; i <= server.parallelTotal; i++ {
|
||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
||||
|
||||
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
c := spec_iterator.Counter{}
|
||||
server.lock.Lock()
|
||||
c.Index = server.counter
|
||||
server.counter++
|
||||
server.lock.Unlock()
|
||||
|
||||
json.NewEncoder(writer).Encode(c)
|
||||
}
|
||||
|
||||
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.Write([]byte(""))
|
||||
}
|
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
package spec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Spec struct {
|
||||
subject leafnodes.SubjectNode
|
||||
focused bool
|
||||
announceProgress bool
|
||||
|
||||
containers []*containernode.ContainerNode
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
startTime time.Time
|
||||
failure types.SpecFailure
|
||||
previousFailures bool
|
||||
|
||||
stateMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
spec := &Spec{
|
||||
subject: subject,
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
stateMutex: &sync.Mutex{},
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
spec.processFlag(containers[i].Flag())
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.setState(types.SpecStatePending)
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.setState(types.SpecStateSkipped)
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.getState() == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Flaked() bool {
|
||||
return spec.getState() == types.SpecStatePassed && spec.previousFailures
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.getState() == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.getState() == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
return spec.focused
|
||||
}
|
||||
|
||||
func (spec *Spec) IsMeasurement() bool {
|
||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts := make([]string, len(spec.containers)+1)
|
||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||
|
||||
for i, container := range spec.containers {
|
||||
componentTexts[i] = container.Text()
|
||||
componentCodeLocations[i] = container.CodeLocation()
|
||||
}
|
||||
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
runTime := spec.runTime
|
||||
if runTime == 0 && !spec.startTime.IsZero() {
|
||||
runTime = time.Since(spec.startTime)
|
||||
}
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.getState(),
|
||||
RunTime: runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) ConcatenatedString() string {
|
||||
s := ""
|
||||
for _, container := range spec.containers {
|
||||
s += container.Text() + " "
|
||||
}
|
||||
|
||||
return s + spec.subject.Text()
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
if spec.getState() == types.SpecStateFailed {
|
||||
spec.previousFailures = true
|
||||
}
|
||||
|
||||
spec.startTime = time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(spec.startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) getState() types.SpecState {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
return spec.state
|
||||
}
|
||||
|
||||
func (spec *Spec) setState(state types.SpecState) {
|
||||
spec.stateMutex.Lock()
|
||||
defer spec.stateMutex.Unlock()
|
||||
spec.state = state
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.setState(types.SpecStatePassed)
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
defer func() {
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
|
||||
spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
|
||||
justAfterEachState, justAfterEachFailure := justAfterEach.Run()
|
||||
if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = justAfterEachState
|
||||
spec.failure = justAfterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
|
||||
spec.setState(afterEachState)
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i, container := range spec.containers {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
s, f := beforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
s, f := justBeforeEach.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
if spec.getState() != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
s, f := spec.subject.Run()
|
||||
spec.failure = f
|
||||
spec.setState(s)
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
if spec.announceProgress {
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||
if spec.announceProgress {
|
||||
nodeType := ""
|
||||
switch subject.Type() {
|
||||
case types.SpecComponentTypeIt:
|
||||
nodeType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
nodeType = "Measure"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
if !spec.IsMeasurement() || spec.Failed() {
|
||||
return map[string]*types.SpecMeasurement{}
|
||||
}
|
||||
|
||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||
}
|
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
package spec
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
names []string
|
||||
|
||||
hasProgrammaticFocus bool
|
||||
RegexScansFilePath bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
names := make([]string, len(specs))
|
||||
for i, spec := range specs {
|
||||
names[i] = spec.ConcatenatedString()
|
||||
}
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
names: names,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(e)
|
||||
permutation := r.Perm(len(e.specs))
|
||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||
names := make([]string, len(e.specs))
|
||||
for i, j := range permutation {
|
||||
shuffledSpecs[i] = e.specs[j]
|
||||
names[i] = e.names[j]
|
||||
}
|
||||
e.specs = shuffledSpecs
|
||||
e.names = names
|
||||
}
|
||||
|
||||
func (e *Specs) ApplyFocus(description string, focus, skip []string) {
|
||||
if len(focus)+len(skip) == 0 {
|
||||
e.applyProgrammaticFocus()
|
||||
} else {
|
||||
e.applyRegExpFocusAndSkip(description, focus, skip)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyProgrammaticFocus() {
|
||||
e.hasProgrammaticFocus = false
|
||||
for _, spec := range e.specs {
|
||||
if spec.Focused() && !spec.Pending() {
|
||||
e.hasProgrammaticFocus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if e.hasProgrammaticFocus {
|
||||
for _, spec := range e.specs {
|
||||
if !spec.Focused() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
|
||||
// this is the place which we append to.
|
||||
func (e *Specs) toMatch(description string, i int) []byte {
|
||||
if i > len(e.names) {
|
||||
return nil
|
||||
}
|
||||
if e.RegexScansFilePath {
|
||||
return []byte(
|
||||
description + " " +
|
||||
e.names[i] + " " +
|
||||
e.specs[i].subject.CodeLocation().FileName)
|
||||
} else {
|
||||
return []byte(
|
||||
description + " " +
|
||||
e.names[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) {
|
||||
var focusFilter, skipFilter *regexp.Regexp
|
||||
if len(focus) > 0 {
|
||||
focusFilter = regexp.MustCompile(strings.Join(focus, "|"))
|
||||
}
|
||||
if len(skip) > 0 {
|
||||
skipFilter = regexp.MustCompile(strings.Join(skip, "|"))
|
||||
}
|
||||
|
||||
for i, spec := range e.specs {
|
||||
matchesFocus := true
|
||||
matchesSkip := false
|
||||
|
||||
toMatch := e.toMatch(description, i)
|
||||
|
||||
if focusFilter != nil {
|
||||
matchesFocus = focusFilter.Match(toMatch)
|
||||
}
|
||||
|
||||
if skipFilter != nil {
|
||||
matchesSkip = skipFilter.Match(toMatch)
|
||||
}
|
||||
|
||||
if !matchesFocus || matchesSkip {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) SkipMeasurements() {
|
||||
for _, spec := range e.specs {
|
||||
if spec.IsMeasurement() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
return len(e.specs)
|
||||
}
|
||||
|
||||
func (e *Specs) Less(i, j int) bool {
|
||||
return e.names[i] < e.names[j]
|
||||
}
|
||||
|
||||
func (e *Specs) Swap(i, j int) {
|
||||
e.names[i], e.names[j] = e.names[j], e.names[i]
|
||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package spec_iterator
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package spec_iterator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type ParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
host string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
||||
return &ParallelIterator{
|
||||
specs: specs,
|
||||
host: host,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||
resp, err := s.client.Get(s.host + "/counter")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
err = json.NewDecoder(resp.Body).Decode(&counter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if counter.Index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
return s.specs[counter.Index], nil
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package spec_iterator
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type SerialIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
}
|
||||
|
||||
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
||||
return &SerialIterator{
|
||||
specs: specs,
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return len(s.specs), true
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for _, s := range s.specs {
|
||||
if !s.Skipped() && !s.Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package spec_iterator
|
||||
|
||||
import "github.com/onsi/ginkgo/internal/spec"
|
||||
|
||||
type ShardedParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
maxIndex int
|
||||
}
|
||||
|
||||
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
||||
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
||||
|
||||
return &ShardedParallelIterator{
|
||||
specs: specs,
|
||||
index: startIndex,
|
||||
maxIndex: startIndex + count,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= s.maxIndex {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return s.maxIndex - s.index, true
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for i := s.index; i < s.maxIndex; i += 1 {
|
||||
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package spec_iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("no more specs to run")
|
||||
|
||||
type SpecIterator interface {
|
||||
Next() (*spec.Spec, error)
|
||||
NumberOfSpecsPriorToIteration() int
|
||||
NumberOfSpecsToProcessIfKnown() (int, bool)
|
||||
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Index int `json:"index"`
|
||||
}
|
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package writer
|
||||
|
||||
type FakeGinkgoWriter struct {
|
||||
EventStream []string
|
||||
}
|
||||
|
||||
func NewFake() *FakeGinkgoWriter {
|
||||
return &FakeGinkgoWriter{
|
||||
EventStream: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||
writer.EventStream = append(writer.EventStream, event)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Truncate() {
|
||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Bytes() []byte {
|
||||
writer.EventStream = append(writer.EventStream, "BYTES")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
package writer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
Truncate()
|
||||
DumpOut()
|
||||
DumpOutWithHeader(header string)
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
redirector io.Writer
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) AndRedirectTo(writer io.Writer) {
|
||||
w.redirector = writer
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.stream = stream
|
||||
}
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
n, err = w.buffer.Write(b)
|
||||
if w.redirector != nil {
|
||||
w.redirector.Write(b)
|
||||
}
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *Writer) Truncate() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.buffer.Reset()
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOut() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream {
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) Bytes() []byte {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
b := w.buffer.Bytes()
|
||||
copied := make([]byte, len(b))
|
||||
copy(copied, b)
|
||||
return copied
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOutWithHeader(header string) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream && w.buffer.Len() > 0 {
|
||||
w.outWriter.Write([]byte(header))
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
87
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
87
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
Ginkgo's Default Reporter
|
||||
|
||||
A number of command line flags are available to tweak Ginkgo's default output.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||
*/
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type DefaultReporter struct {
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
specSummaries []*types.SpecSummary
|
||||
}
|
||||
|
||||
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||
return &DefaultReporter{
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||
if config.ParallelTotal > 1 {
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||
reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
|
||||
if reporter.config.ReportPassed {
|
||||
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
}
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
|
||||
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//FakeReporter is useful for testing purposes
|
||||
type FakeReporter struct {
|
||||
Config config.GinkgoConfigType
|
||||
|
||||
BeginSummary *types.SuiteSummary
|
||||
BeforeSuiteSummary *types.SetupSummary
|
||||
SpecWillRunSummaries []*types.SpecSummary
|
||||
SpecSummaries []*types.SpecSummary
|
||||
AfterSuiteSummary *types.SetupSummary
|
||||
EndSummary *types.SuiteSummary
|
||||
|
||||
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||
}
|
||||
|
||||
func NewFakeReporter() *FakeReporter {
|
||||
return &FakeReporter{
|
||||
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
fakeR.Config = config
|
||||
fakeR.BeginSummary = summary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.BeforeSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecWillRunStub != nil {
|
||||
fakeR.SpecWillRunStub(specSummary)
|
||||
}
|
||||
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecDidCompleteStub != nil {
|
||||
fakeR.SpecDidCompleteStub(specSummary)
|
||||
}
|
||||
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.AfterSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fakeR.EndSummary = summary
|
||||
}
|
178
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
178
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
|
||||
JUnit XML Reporter for Ginkgo
|
||||
|
||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
Name string `xml:"name,attr"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Errors int `xml:"errors,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitTestCase struct {
|
||||
Name string `xml:"name,attr"`
|
||||
ClassName string `xml:"classname,attr"`
|
||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
SystemOut string `xml:"system-out,omitempty"`
|
||||
}
|
||||
|
||||
type JUnitFailureMessage struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitSkipped struct {
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitReporter struct {
|
||||
suite JUnitTestSuite
|
||||
filename string
|
||||
testSuiteName string
|
||||
ReporterConfig config.DefaultReporterConfigType
|
||||
}
|
||||
|
||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
return &JUnitReporter{
|
||||
filename: filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Name: summary.SuiteDescription,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
reporter.ReporterConfig = config.DefaultReporterConfig
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func failureMessage(failure types.SpecFailure) string {
|
||||
return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testCase := JUnitTestCase{
|
||||
Name: name,
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(setupSummary.State),
|
||||
Message: failureMessage(setupSummary.Failure),
|
||||
}
|
||||
testCase.SystemOut = setupSummary.CapturedOutput
|
||||
testCase.Time = setupSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testCase := JUnitTestCase{
|
||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||
testCase.SystemOut = specSummary.CapturedOutput
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(specSummary.State),
|
||||
Message: failureMessage(specSummary.Failure),
|
||||
}
|
||||
if specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
|
||||
specSummary.Failure.ForwardedPanic,
|
||||
specSummary.Failure.Location.FullStackTrace)
|
||||
}
|
||||
testCase.SystemOut = specSummary.CapturedOutput
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
testCase.Skipped = &JUnitSkipped{}
|
||||
if specSummary.Failure.Message != "" {
|
||||
testCase.Skipped.Message = failureMessage(specSummary.Failure)
|
||||
}
|
||||
}
|
||||
testCase.Time = specSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
|
||||
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
reporter.suite.Errors = 0
|
||||
if reporter.ReporterConfig.ReportFile != "" {
|
||||
reporter.filename = reporter.ReporterConfig.ReportFile
|
||||
fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
|
||||
}
|
||||
filePath, _ := filepath.Abs(reporter.filename)
|
||||
dirPath := filepath.Dir(filePath)
|
||||
err := os.MkdirAll(dirPath, os.ModePerm)
|
||||
if err != nil {
|
||||
fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
|
||||
}
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(file)
|
||||
encoder.Indent(" ", " ")
|
||||
err = encoder.Encode(reporter.suite)
|
||||
if err == nil {
|
||||
fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStateFailed:
|
||||
return "Failure"
|
||||
case types.SpecStateTimedOut:
|
||||
return "Timeout"
|
||||
case types.SpecStatePanicked:
|
||||
return "Panic"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecWillRun(specSummary *types.SpecSummary)
|
||||
SpecDidComplete(specSummary *types.SpecSummary)
|
||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||
}
|
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||
var out string
|
||||
|
||||
if len(args) > 0 {
|
||||
out = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
out = format
|
||||
}
|
||||
|
||||
if s.color {
|
||||
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||
} else {
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||
fmt.Fprintln(s.w, text)
|
||||
fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printNewLine() {
|
||||
fmt.Fprintln(s.w, "")
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printDelimiter() {
|
||||
fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||
fmt.Fprint(s.w, s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||
fmt.Fprintln(s.w, s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||
var text string
|
||||
|
||||
if len(args) > 0 {
|
||||
text = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
text = format
|
||||
}
|
||||
|
||||
stringArray := strings.Split(text, "\n")
|
||||
padding := ""
|
||||
if indentation >= 0 {
|
||||
padding = strings.Repeat(" ", indentation)
|
||||
}
|
||||
for i, s := range stringArray {
|
||||
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||
}
|
||||
|
||||
return strings.Join(stringArray, "\n")
|
||||
}
|
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
package stenographer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
||||
return FakeStenographerCall{
|
||||
Method: method,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
type FakeStenographer struct {
|
||||
calls []FakeStenographerCall
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
type FakeStenographerCall struct {
|
||||
Method string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func NewFakeStenographer() *FakeStenographer {
|
||||
stenographer := &FakeStenographer{
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
stenographer.Reset()
|
||||
return stenographer
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
return stenographer.calls
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Reset() {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = make([]FakeStenographerCall, 0)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
results := make([]FakeStenographerCall, 0)
|
||||
for _, call := range stenographer.calls {
|
||||
if call.Method == method {
|
||||
results = append(results, call)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
||||
stenographer.registerCall("AnnounceCapturedOutput", output)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSuccessfulSpec", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
stenographer.registerCall("SummarizeFailures", summaries)
|
||||
}
|
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
|
@ -0,0 +1,572 @@
|
|||
/*
|
||||
The stenographer is used by Ginkgo's reporters to generate output.
|
||||
|
||||
Move along, nothing to see here.
|
||||
*/
|
||||
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const boldStyle = "\x1b[1m"
|
||||
const redColor = "\x1b[91m"
|
||||
const greenColor = "\x1b[32m"
|
||||
const yellowColor = "\x1b[33m"
|
||||
const cyanColor = "\x1b[36m"
|
||||
const grayColor = "\x1b[90m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type cursorStateType int
|
||||
|
||||
const (
|
||||
cursorStateTop cursorStateType = iota
|
||||
cursorStateStreaming
|
||||
cursorStateMidBlock
|
||||
cursorStateEndBlock
|
||||
)
|
||||
|
||||
type Stenographer interface {
|
||||
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||
AnnounceParallelRun(node int, nodes int, succinct bool)
|
||||
AnnounceTotalNumberOfSpecs(total int, succinct bool)
|
||||
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||
|
||||
AnnounceSpecWillRun(spec *types.SpecSummary)
|
||||
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceCapturedOutput(output string)
|
||||
|
||||
AnnounceSuccessfulSpec(spec *types.SpecSummary)
|
||||
AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
||||
AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
|
||||
|
||||
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
||||
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
SummarizeFailures(summaries []*types.SpecSummary)
|
||||
}
|
||||
|
||||
func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
|
||||
denoter := "•"
|
||||
if runtime.GOOS == "windows" {
|
||||
denoter = "+"
|
||||
}
|
||||
return &consoleStenographer{
|
||||
color: color,
|
||||
denoter: denoter,
|
||||
cursorState: cursorStateTop,
|
||||
enableFlakes: enableFlakes,
|
||||
w: writer,
|
||||
}
|
||||
}
|
||||
|
||||
type consoleStenographer struct {
|
||||
color bool
|
||||
denoter string
|
||||
cursorState cursorStateType
|
||||
enableFlakes bool
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
var alternatingColors = []string{defaultStyle, grayColor}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
||||
return
|
||||
}
|
||||
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
||||
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
||||
if randomizingAll {
|
||||
s.print(0, " - Will randomize all specs")
|
||||
}
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- node #%d ", node)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Parallel test node %s/%s.",
|
||||
s.colorize(boldStyle, "%d", node),
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d nodes ", nodes)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Running in parallel across %s nodes",
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d/%d specs ", specsToRun, total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s of %s specs",
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d specs ", total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s specs",
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
if succinct && summary.SuiteSucceeded {
|
||||
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||
return
|
||||
}
|
||||
s.printNewLine()
|
||||
color := greenColor
|
||||
if !summary.SuiteSucceeded {
|
||||
color = redColor
|
||||
}
|
||||
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
||||
|
||||
status := ""
|
||||
if summary.SuiteSucceeded {
|
||||
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
||||
} else {
|
||||
status = s.colorize(boldStyle+redColor, "FAIL!")
|
||||
}
|
||||
|
||||
flakes := ""
|
||||
if s.enableFlakes {
|
||||
flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
|
||||
}
|
||||
|
||||
s.print(0,
|
||||
"%s -- %s | %s | %s | %s\n",
|
||||
status,
|
||||
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
|
||||
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
||||
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
s.startBlock()
|
||||
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
||||
}
|
||||
|
||||
indentation := 0
|
||||
if len(spec.ComponentTexts) > 2 {
|
||||
indentation = 1
|
||||
s.printNewLine()
|
||||
}
|
||||
index := len(spec.ComponentTexts) - 1
|
||||
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
||||
s.printNewLine()
|
||||
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
||||
s.printNewLine()
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
var message string
|
||||
switch summary.State {
|
||||
case types.SpecStateFailed:
|
||||
message = "Failure"
|
||||
case types.SpecStatePanicked:
|
||||
message = "Panic"
|
||||
case types.SpecStateTimedOut:
|
||||
message = "Timeout"
|
||||
}
|
||||
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
||||
if output == "" {
|
||||
return
|
||||
}
|
||||
|
||||
s.startBlock()
|
||||
s.println(0, output)
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
|
||||
s.print(0, s.colorize(greenColor, s.denoter))
|
||||
s.stream()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
|
||||
"",
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
|
||||
s.measurementReport(spec, succinct),
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
if noisy {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(yellowColor, "P [PENDING]"),
|
||||
"",
|
||||
spec,
|
||||
false,
|
||||
)
|
||||
} else {
|
||||
s.print(0, s.colorize(yellowColor, "P"))
|
||||
s.stream()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
||||
if succinct || spec.Failure == (types.SpecFailure{}) {
|
||||
s.print(0, s.colorize(cyanColor, "S"))
|
||||
s.stream()
|
||||
} else {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printSkip(indentation, spec.Failure)
|
||||
s.endBlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
failingSpecs := []*types.SpecSummary{}
|
||||
|
||||
for _, summary := range summaries {
|
||||
if summary.HasFailureState() {
|
||||
failingSpecs = append(failingSpecs, summary)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failingSpecs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.printNewLine()
|
||||
s.printNewLine()
|
||||
plural := "s"
|
||||
if len(failingSpecs) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
||||
for _, summary := range failingSpecs {
|
||||
s.printNewLine()
|
||||
if summary.HasFailureState() {
|
||||
if summary.TimedOut() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
||||
} else if summary.Panicked() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
||||
} else if summary.Failed() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
||||
}
|
||||
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
||||
s.printNewLine()
|
||||
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) startBlock() {
|
||||
if s.cursorState == cursorStateStreaming {
|
||||
s.printNewLine()
|
||||
s.printDelimiter()
|
||||
} else if s.cursorState == cursorStateMidBlock {
|
||||
s.printNewLine()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) midBlock() {
|
||||
s.cursorState = cursorStateMidBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) endBlock() {
|
||||
s.printDelimiter()
|
||||
s.cursorState = cursorStateEndBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) stream() {
|
||||
s.cursorState = cursorStateStreaming
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
||||
s.startBlock()
|
||||
s.println(0, header)
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
||||
|
||||
if message != "" {
|
||||
s.printNewLine()
|
||||
s.println(indentation, message)
|
||||
}
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
return " in Suite Setup (BeforeSuite)"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
return " in Suite Teardown (AfterSuite)"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
return " in Spec Setup (BeforeEach)"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
return " in Spec Setup (JustBeforeEach)"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
return " in Spec Teardown (AfterEach)"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
||||
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, spec.Location.String())
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
||||
if state == types.SpecStatePanicked {
|
||||
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
||||
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
||||
s.println(indentation, failure.Location.String())
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
} else {
|
||||
s.println(indentation, s.colorize(redColor, failure.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, failure.Location.String())
|
||||
if fullTrace {
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
startIndex := 1
|
||||
indentation := 0
|
||||
|
||||
if len(componentTexts) == 1 {
|
||||
startIndex = 0
|
||||
}
|
||||
|
||||
for i := startIndex; i < len(componentTexts); i++ {
|
||||
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
||||
color := redColor
|
||||
if state == types.SpecStateSkipped {
|
||||
color = cyanColor
|
||||
}
|
||||
blockType := ""
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
blockType = "BeforeSuite"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
blockType = "AfterSuite"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
blockType = "BeforeEach"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
blockType = "JustBeforeEach"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
blockType = "AfterEach"
|
||||
case types.SpecComponentTypeIt:
|
||||
blockType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
blockType = "Measurement"
|
||||
}
|
||||
if succinct {
|
||||
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
} else {
|
||||
if succinct {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, componentTexts[i])
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
}
|
||||
indentation++
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
||||
|
||||
if succinct {
|
||||
if len(componentTexts) > 0 {
|
||||
s.printNewLine()
|
||||
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
||||
}
|
||||
s.printNewLine()
|
||||
indentation = 1
|
||||
} else {
|
||||
indentation--
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
||||
orderedKeys := make([]string, len(measurements))
|
||||
for key, measurement := range measurements {
|
||||
orderedKeys[measurement.Order] = key
|
||||
}
|
||||
return orderedKeys
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
||||
if len(spec.Measurements) == 0 {
|
||||
return "Found no measurements"
|
||||
}
|
||||
|
||||
message := []string{}
|
||||
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
||||
|
||||
if succinct {
|
||||
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
} else {
|
||||
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
info := ""
|
||||
if measurement.Info != nil {
|
||||
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
||||
}
|
||||
|
||||
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
info,
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(message, "\n")
|
||||
}
|
21
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
generated
vendored
Normal file
21
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
43
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
generated
vendored
Normal file
43
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
# go-colorable
|
||||
|
||||
Colorable writer for windows.
|
||||
|
||||
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
||||
This package is possible to handle escape sequence for ansi color on windows.
|
||||
|
||||
## Too Bad!
|
||||
|
||||

|
||||
|
||||
|
||||
## So Good!
|
||||
|
||||

|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
||||
logrus.SetOutput(colorable.NewColorableStdout())
|
||||
|
||||
logrus.Info("succeeded")
|
||||
logrus.Warn("not correct")
|
||||
logrus.Error("something error")
|
||||
logrus.Fatal("panic")
|
||||
```
|
||||
|
||||
You can compile above code on non-windows OSs.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-colorable
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
24
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
generated
vendored
Normal file
24
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// +build !windows
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
783
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
generated
vendored
Normal file
783
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,783 @@
|
|||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty"
|
||||
)
|
||||
|
||||
const (
|
||||
foregroundBlue = 0x1
|
||||
foregroundGreen = 0x2
|
||||
foregroundRed = 0x4
|
||||
foregroundIntensity = 0x8
|
||||
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
|
||||
backgroundBlue = 0x10
|
||||
backgroundGreen = 0x20
|
||||
backgroundRed = 0x40
|
||||
backgroundIntensity = 0x80
|
||||
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
|
||||
)
|
||||
|
||||
type wchar uint16
|
||||
type short int16
|
||||
type dword uint32
|
||||
type word uint16
|
||||
|
||||
type coord struct {
|
||||
x short
|
||||
y short
|
||||
}
|
||||
|
||||
type smallRect struct {
|
||||
left short
|
||||
top short
|
||||
right short
|
||||
bottom short
|
||||
}
|
||||
|
||||
type consoleScreenBufferInfo struct {
|
||||
size coord
|
||||
cursorPosition coord
|
||||
attributes word
|
||||
window smallRect
|
||||
maximumWindowSize coord
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
|
||||
)
|
||||
|
||||
type Writer struct {
|
||||
out io.Writer
|
||||
handle syscall.Handle
|
||||
lastbuf bytes.Buffer
|
||||
oldattr word
|
||||
}
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
if isatty.IsTerminal(file.Fd()) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
handle := syscall.Handle(file.Fd())
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
|
||||
} else {
|
||||
return file
|
||||
}
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return NewColorable(os.Stdout)
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return NewColorable(os.Stderr)
|
||||
}
|
||||
|
||||
var color256 = map[int]int{
|
||||
0: 0x000000,
|
||||
1: 0x800000,
|
||||
2: 0x008000,
|
||||
3: 0x808000,
|
||||
4: 0x000080,
|
||||
5: 0x800080,
|
||||
6: 0x008080,
|
||||
7: 0xc0c0c0,
|
||||
8: 0x808080,
|
||||
9: 0xff0000,
|
||||
10: 0x00ff00,
|
||||
11: 0xffff00,
|
||||
12: 0x0000ff,
|
||||
13: 0xff00ff,
|
||||
14: 0x00ffff,
|
||||
15: 0xffffff,
|
||||
16: 0x000000,
|
||||
17: 0x00005f,
|
||||
18: 0x000087,
|
||||
19: 0x0000af,
|
||||
20: 0x0000d7,
|
||||
21: 0x0000ff,
|
||||
22: 0x005f00,
|
||||
23: 0x005f5f,
|
||||
24: 0x005f87,
|
||||
25: 0x005faf,
|
||||
26: 0x005fd7,
|
||||
27: 0x005fff,
|
||||
28: 0x008700,
|
||||
29: 0x00875f,
|
||||
30: 0x008787,
|
||||
31: 0x0087af,
|
||||
32: 0x0087d7,
|
||||
33: 0x0087ff,
|
||||
34: 0x00af00,
|
||||
35: 0x00af5f,
|
||||
36: 0x00af87,
|
||||
37: 0x00afaf,
|
||||
38: 0x00afd7,
|
||||
39: 0x00afff,
|
||||
40: 0x00d700,
|
||||
41: 0x00d75f,
|
||||
42: 0x00d787,
|
||||
43: 0x00d7af,
|
||||
44: 0x00d7d7,
|
||||
45: 0x00d7ff,
|
||||
46: 0x00ff00,
|
||||
47: 0x00ff5f,
|
||||
48: 0x00ff87,
|
||||
49: 0x00ffaf,
|
||||
50: 0x00ffd7,
|
||||
51: 0x00ffff,
|
||||
52: 0x5f0000,
|
||||
53: 0x5f005f,
|
||||
54: 0x5f0087,
|
||||
55: 0x5f00af,
|
||||
56: 0x5f00d7,
|
||||
57: 0x5f00ff,
|
||||
58: 0x5f5f00,
|
||||
59: 0x5f5f5f,
|
||||
60: 0x5f5f87,
|
||||
61: 0x5f5faf,
|
||||
62: 0x5f5fd7,
|
||||
63: 0x5f5fff,
|
||||
64: 0x5f8700,
|
||||
65: 0x5f875f,
|
||||
66: 0x5f8787,
|
||||
67: 0x5f87af,
|
||||
68: 0x5f87d7,
|
||||
69: 0x5f87ff,
|
||||
70: 0x5faf00,
|
||||
71: 0x5faf5f,
|
||||
72: 0x5faf87,
|
||||
73: 0x5fafaf,
|
||||
74: 0x5fafd7,
|
||||
75: 0x5fafff,
|
||||
76: 0x5fd700,
|
||||
77: 0x5fd75f,
|
||||
78: 0x5fd787,
|
||||
79: 0x5fd7af,
|
||||
80: 0x5fd7d7,
|
||||
81: 0x5fd7ff,
|
||||
82: 0x5fff00,
|
||||
83: 0x5fff5f,
|
||||
84: 0x5fff87,
|
||||
85: 0x5fffaf,
|
||||
86: 0x5fffd7,
|
||||
87: 0x5fffff,
|
||||
88: 0x870000,
|
||||
89: 0x87005f,
|
||||
90: 0x870087,
|
||||
91: 0x8700af,
|
||||
92: 0x8700d7,
|
||||
93: 0x8700ff,
|
||||
94: 0x875f00,
|
||||
95: 0x875f5f,
|
||||
96: 0x875f87,
|
||||
97: 0x875faf,
|
||||
98: 0x875fd7,
|
||||
99: 0x875fff,
|
||||
100: 0x878700,
|
||||
101: 0x87875f,
|
||||
102: 0x878787,
|
||||
103: 0x8787af,
|
||||
104: 0x8787d7,
|
||||
105: 0x8787ff,
|
||||
106: 0x87af00,
|
||||
107: 0x87af5f,
|
||||
108: 0x87af87,
|
||||
109: 0x87afaf,
|
||||
110: 0x87afd7,
|
||||
111: 0x87afff,
|
||||
112: 0x87d700,
|
||||
113: 0x87d75f,
|
||||
114: 0x87d787,
|
||||
115: 0x87d7af,
|
||||
116: 0x87d7d7,
|
||||
117: 0x87d7ff,
|
||||
118: 0x87ff00,
|
||||
119: 0x87ff5f,
|
||||
120: 0x87ff87,
|
||||
121: 0x87ffaf,
|
||||
122: 0x87ffd7,
|
||||
123: 0x87ffff,
|
||||
124: 0xaf0000,
|
||||
125: 0xaf005f,
|
||||
126: 0xaf0087,
|
||||
127: 0xaf00af,
|
||||
128: 0xaf00d7,
|
||||
129: 0xaf00ff,
|
||||
130: 0xaf5f00,
|
||||
131: 0xaf5f5f,
|
||||
132: 0xaf5f87,
|
||||
133: 0xaf5faf,
|
||||
134: 0xaf5fd7,
|
||||
135: 0xaf5fff,
|
||||
136: 0xaf8700,
|
||||
137: 0xaf875f,
|
||||
138: 0xaf8787,
|
||||
139: 0xaf87af,
|
||||
140: 0xaf87d7,
|
||||
141: 0xaf87ff,
|
||||
142: 0xafaf00,
|
||||
143: 0xafaf5f,
|
||||
144: 0xafaf87,
|
||||
145: 0xafafaf,
|
||||
146: 0xafafd7,
|
||||
147: 0xafafff,
|
||||
148: 0xafd700,
|
||||
149: 0xafd75f,
|
||||
150: 0xafd787,
|
||||
151: 0xafd7af,
|
||||
152: 0xafd7d7,
|
||||
153: 0xafd7ff,
|
||||
154: 0xafff00,
|
||||
155: 0xafff5f,
|
||||
156: 0xafff87,
|
||||
157: 0xafffaf,
|
||||
158: 0xafffd7,
|
||||
159: 0xafffff,
|
||||
160: 0xd70000,
|
||||
161: 0xd7005f,
|
||||
162: 0xd70087,
|
||||
163: 0xd700af,
|
||||
164: 0xd700d7,
|
||||
165: 0xd700ff,
|
||||
166: 0xd75f00,
|
||||
167: 0xd75f5f,
|
||||
168: 0xd75f87,
|
||||
169: 0xd75faf,
|
||||
170: 0xd75fd7,
|
||||
171: 0xd75fff,
|
||||
172: 0xd78700,
|
||||
173: 0xd7875f,
|
||||
174: 0xd78787,
|
||||
175: 0xd787af,
|
||||
176: 0xd787d7,
|
||||
177: 0xd787ff,
|
||||
178: 0xd7af00,
|
||||
179: 0xd7af5f,
|
||||
180: 0xd7af87,
|
||||
181: 0xd7afaf,
|
||||
182: 0xd7afd7,
|
||||
183: 0xd7afff,
|
||||
184: 0xd7d700,
|
||||
185: 0xd7d75f,
|
||||
186: 0xd7d787,
|
||||
187: 0xd7d7af,
|
||||
188: 0xd7d7d7,
|
||||
189: 0xd7d7ff,
|
||||
190: 0xd7ff00,
|
||||
191: 0xd7ff5f,
|
||||
192: 0xd7ff87,
|
||||
193: 0xd7ffaf,
|
||||
194: 0xd7ffd7,
|
||||
195: 0xd7ffff,
|
||||
196: 0xff0000,
|
||||
197: 0xff005f,
|
||||
198: 0xff0087,
|
||||
199: 0xff00af,
|
||||
200: 0xff00d7,
|
||||
201: 0xff00ff,
|
||||
202: 0xff5f00,
|
||||
203: 0xff5f5f,
|
||||
204: 0xff5f87,
|
||||
205: 0xff5faf,
|
||||
206: 0xff5fd7,
|
||||
207: 0xff5fff,
|
||||
208: 0xff8700,
|
||||
209: 0xff875f,
|
||||
210: 0xff8787,
|
||||
211: 0xff87af,
|
||||
212: 0xff87d7,
|
||||
213: 0xff87ff,
|
||||
214: 0xffaf00,
|
||||
215: 0xffaf5f,
|
||||
216: 0xffaf87,
|
||||
217: 0xffafaf,
|
||||
218: 0xffafd7,
|
||||
219: 0xffafff,
|
||||
220: 0xffd700,
|
||||
221: 0xffd75f,
|
||||
222: 0xffd787,
|
||||
223: 0xffd7af,
|
||||
224: 0xffd7d7,
|
||||
225: 0xffd7ff,
|
||||
226: 0xffff00,
|
||||
227: 0xffff5f,
|
||||
228: 0xffff87,
|
||||
229: 0xffffaf,
|
||||
230: 0xffffd7,
|
||||
231: 0xffffff,
|
||||
232: 0x080808,
|
||||
233: 0x121212,
|
||||
234: 0x1c1c1c,
|
||||
235: 0x262626,
|
||||
236: 0x303030,
|
||||
237: 0x3a3a3a,
|
||||
238: 0x444444,
|
||||
239: 0x4e4e4e,
|
||||
240: 0x585858,
|
||||
241: 0x626262,
|
||||
242: 0x6c6c6c,
|
||||
243: 0x767676,
|
||||
244: 0x808080,
|
||||
245: 0x8a8a8a,
|
||||
246: 0x949494,
|
||||
247: 0x9e9e9e,
|
||||
248: 0xa8a8a8,
|
||||
249: 0xb2b2b2,
|
||||
250: 0xbcbcbc,
|
||||
251: 0xc6c6c6,
|
||||
252: 0xd0d0d0,
|
||||
253: 0xdadada,
|
||||
254: 0xe4e4e4,
|
||||
255: 0xeeeeee,
|
||||
}
|
||||
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
if r1 == 0 {
|
||||
break loop
|
||||
}
|
||||
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var m rune
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
m = c
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
|
||||
var csbi consoleScreenBufferInfo
|
||||
switch m {
|
||||
case 'A':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'B':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'C':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'D':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if n, err = strconv.Atoi(buf.String()); err == nil {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
}
|
||||
case 'E':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'F':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'G':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'H':
|
||||
token := strings.Split(buf.String(), ";")
|
||||
if len(token) != 2 {
|
||||
continue
|
||||
}
|
||||
n1, err := strconv.Atoi(token[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
n2, err := strconv.Atoi(token[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
csbi.cursorPosition.x = short(n2)
|
||||
csbi.cursorPosition.x = short(n1)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'J':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'K':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'm':
|
||||
attr := csbi.attributes
|
||||
cs := buf.String()
|
||||
if cs == "" {
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
|
||||
continue
|
||||
}
|
||||
token := strings.Split(cs, ";")
|
||||
for i := 0; i < len(token); i += 1 {
|
||||
ns := token[i]
|
||||
if n, err = strconv.Atoi(ns); err == nil {
|
||||
switch {
|
||||
case n == 0 || n == 100:
|
||||
attr = w.oldattr
|
||||
case 1 <= n && n <= 5:
|
||||
attr |= foregroundIntensity
|
||||
case n == 7:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 22 == n || n == 25 || n == 25:
|
||||
attr |= foregroundIntensity
|
||||
case n == 27:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 30 <= n && n <= 37:
|
||||
attr = (attr & backgroundMask)
|
||||
if (n-30)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-30)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-30)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case n == 38: // set foreground color.
|
||||
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256foreAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= backgroundMask
|
||||
attr |= n256foreAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & backgroundMask)
|
||||
}
|
||||
case n == 39: // reset foreground color.
|
||||
attr &= backgroundMask
|
||||
attr |= w.oldattr & foregroundMask
|
||||
case 40 <= n && n <= 47:
|
||||
attr = (attr & foregroundMask)
|
||||
if (n-40)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-40)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-40)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
case n == 48: // set background color.
|
||||
if i < len(token)-2 && token[i+1] == "5" {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256backAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= foregroundMask
|
||||
attr |= n256backAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & foregroundMask)
|
||||
}
|
||||
case n == 49: // reset foreground color.
|
||||
attr &= foregroundMask
|
||||
attr |= w.oldattr & backgroundMask
|
||||
case 90 <= n && n <= 97:
|
||||
attr = (attr & backgroundMask)
|
||||
attr |= foregroundIntensity
|
||||
if (n-90)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-90)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-90)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case 100 <= n && n <= 107:
|
||||
attr = (attr & foregroundMask)
|
||||
attr |= backgroundIntensity
|
||||
if (n-100)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-100)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-100)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
}
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
||||
|
||||
type consoleColor struct {
|
||||
rgb int
|
||||
red bool
|
||||
green bool
|
||||
blue bool
|
||||
intensity bool
|
||||
}
|
||||
|
||||
func (c consoleColor) foregroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= foregroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c consoleColor) backgroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= backgroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var color16 = []consoleColor{
|
||||
consoleColor{0x000000, false, false, false, false},
|
||||
consoleColor{0x000080, false, false, true, false},
|
||||
consoleColor{0x008000, false, true, false, false},
|
||||
consoleColor{0x008080, false, true, true, false},
|
||||
consoleColor{0x800000, true, false, false, false},
|
||||
consoleColor{0x800080, true, false, true, false},
|
||||
consoleColor{0x808000, true, true, false, false},
|
||||
consoleColor{0xc0c0c0, true, true, true, false},
|
||||
consoleColor{0x808080, false, false, false, true},
|
||||
consoleColor{0x0000ff, false, false, true, true},
|
||||
consoleColor{0x00ff00, false, true, false, true},
|
||||
consoleColor{0x00ffff, false, true, true, true},
|
||||
consoleColor{0xff0000, true, false, false, true},
|
||||
consoleColor{0xff00ff, true, false, true, true},
|
||||
consoleColor{0xffff00, true, true, false, true},
|
||||
consoleColor{0xffffff, true, true, true, true},
|
||||
}
|
||||
|
||||
type hsv struct {
|
||||
h, s, v float32
|
||||
}
|
||||
|
||||
func (a hsv) dist(b hsv) float32 {
|
||||
dh := a.h - b.h
|
||||
switch {
|
||||
case dh > 0.5:
|
||||
dh = 1 - dh
|
||||
case dh < -0.5:
|
||||
dh = -1 - dh
|
||||
}
|
||||
ds := a.s - b.s
|
||||
dv := a.v - b.v
|
||||
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
|
||||
}
|
||||
|
||||
func toHSV(rgb int) hsv {
|
||||
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
|
||||
float32((rgb&0x00FF00)>>8)/256.0,
|
||||
float32(rgb&0x0000FF)/256.0
|
||||
min, max := minmax3f(r, g, b)
|
||||
h := max - min
|
||||
if h > 0 {
|
||||
if max == r {
|
||||
h = (g - b) / h
|
||||
if h < 0 {
|
||||
h += 6
|
||||
}
|
||||
} else if max == g {
|
||||
h = 2 + (b-r)/h
|
||||
} else {
|
||||
h = 4 + (r-g)/h
|
||||
}
|
||||
}
|
||||
h /= 6.0
|
||||
s := max - min
|
||||
if max != 0 {
|
||||
s /= max
|
||||
}
|
||||
v := max
|
||||
return hsv{h: h, s: s, v: v}
|
||||
}
|
||||
|
||||
type hsvTable []hsv
|
||||
|
||||
func toHSVTable(rgbTable []consoleColor) hsvTable {
|
||||
t := make(hsvTable, len(rgbTable))
|
||||
for i, c := range rgbTable {
|
||||
t[i] = toHSV(c.rgb)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t hsvTable) find(rgb int) consoleColor {
|
||||
hsv := toHSV(rgb)
|
||||
n := 7
|
||||
l := float32(5.0)
|
||||
for i, p := range t {
|
||||
d := hsv.dist(p)
|
||||
if d < l {
|
||||
l, n = d, i
|
||||
}
|
||||
}
|
||||
return color16[n]
|
||||
}
|
||||
|
||||
func minmax3f(a, b, c float32) (min, max float32) {
|
||||
if a < b {
|
||||
if b < c {
|
||||
return a, c
|
||||
} else if a < c {
|
||||
return a, b
|
||||
} else {
|
||||
return c, b
|
||||
}
|
||||
} else {
|
||||
if a < c {
|
||||
return b, c
|
||||
} else if b < c {
|
||||
return b, a
|
||||
} else {
|
||||
return c, a
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n256foreAttr []word
|
||||
var n256backAttr []word
|
||||
|
||||
func n256setup() {
|
||||
n256foreAttr = make([]word, 256)
|
||||
n256backAttr = make([]word, 256)
|
||||
t := toHSVTable(color16)
|
||||
for i, rgb := range color256 {
|
||||
c := t.find(rgb)
|
||||
n256foreAttr[i] = c.foregroundAttr()
|
||||
n256backAttr[i] = c.backgroundAttr()
|
||||
}
|
||||
}
|
57
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
generated
vendored
Normal file
57
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type NonColorable struct {
|
||||
out io.Writer
|
||||
lastbuf bytes.Buffer
|
||||
}
|
||||
|
||||
func NewNonColorable(w io.Writer) io.Writer {
|
||||
return &NonColorable{out: w}
|
||||
}
|
||||
|
||||
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
generated
vendored
Normal file
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
|
||||
MIT License (Expat)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
37
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
generated
vendored
Normal file
37
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
# go-isatty
|
||||
|
||||
isatty for golang
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/mattn/go-isatty"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
fmt.Println("Is Terminal")
|
||||
} else {
|
||||
fmt.Println("Is Not Terminal")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-isatty
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
2
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
generated
vendored
Normal file
2
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
// Package isatty implements interface to isatty
|
||||
package isatty
|
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
generated
vendored
Normal file
9
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +build appengine
|
||||
|
||||
package isatty
|
||||
|
||||
// IsTerminal returns true if the file descriptor is terminal which
|
||||
// is always false on on appengine classic which is a sandboxed PaaS.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
return false
|
||||
}
|
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
generated
vendored
Normal file
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build darwin freebsd openbsd netbsd
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
generated
vendored
Normal file
18
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// +build linux
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termios syscall.Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
16
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
generated
vendored
Normal file
16
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// +build solaris
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var termio unix.Termio
|
||||
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
|
||||
return err == nil
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
// +build windows
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
|
||||
// IsTerminal return true if the file descriptor is terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
106
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
Normal file
106
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
|
||||
TeamCity Reporter for Ginkgo
|
||||
|
||||
Makes use of TeamCity's support for Service Messages
|
||||
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const (
|
||||
messageId = "##teamcity"
|
||||
)
|
||||
|
||||
type TeamCityReporter struct {
|
||||
writer io.Writer
|
||||
testSuiteName string
|
||||
ReporterConfig config.DefaultReporterConfigType
|
||||
}
|
||||
|
||||
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
|
||||
return &TeamCityReporter{
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.testSuiteName = escape(summary.SuiteDescription)
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testName := escape(name)
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
|
||||
message := reporter.failureMessage(setupSummary.Failure)
|
||||
details := reporter.failureDetails(setupSummary.Failure)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
|
||||
durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
|
||||
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||
details := escape(specSummary.CapturedOutput)
|
||||
fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details)
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
message := reporter.failureMessage(specSummary.Failure)
|
||||
details := reporter.failureDetails(specSummary.Failure)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName)
|
||||
}
|
||||
|
||||
durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string {
|
||||
return escape(failure.ComponentCodeLocation.String())
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string {
|
||||
return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String()))
|
||||
}
|
||||
|
||||
func escape(output string) string {
|
||||
output = strings.Replace(output, "|", "||", -1)
|
||||
output = strings.Replace(output, "'", "|'", -1)
|
||||
output = strings.Replace(output, "\n", "|n", -1)
|
||||
output = strings.Replace(output, "\r", "|r", -1)
|
||||
output = strings.Replace(output, "[", "|[", -1)
|
||||
output = strings.Replace(output, "]", "|]", -1)
|
||||
return output
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type CodeLocation struct {
|
||||
FileName string
|
||||
LineNumber int
|
||||
FullStackTrace string
|
||||
}
|
||||
|
||||
func (codeLocation CodeLocation) String() string {
|
||||
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
|
||||
}
|
150
vendor/github.com/onsi/ginkgo/types/deprecation_support.go
generated
vendored
Normal file
150
vendor/github.com/onsi/ginkgo/types/deprecation_support.go
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/formatter"
|
||||
)
|
||||
|
||||
type Deprecation struct {
|
||||
Message string
|
||||
DocLink string
|
||||
Version string
|
||||
}
|
||||
|
||||
type deprecations struct{}
|
||||
|
||||
var Deprecations = deprecations{}
|
||||
|
||||
func (d deprecations) CustomReporter() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "You are using a custom reporter. Support for custom reporters will likely be removed in V2. Most users were using them to generate junit or teamcity reports and this functionality will be merged into the core reporter. In addition, Ginkgo 2.0 will support emitting a JSON-formatted report that users can then manipulate to generate custom reports.\n\n{{red}}{{bold}}If this change will be impactful to you please leave a comment on {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}",
|
||||
DocLink: "removed-custom-reporters",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) V1Reporter() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "You are using a V1 Ginkgo Reporter. Please update your custom reporter to the new V2 Reporter interface.",
|
||||
DocLink: "changed-reporter-interface",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Async() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
|
||||
DocLink: "removed-async-testing",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Measure() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.",
|
||||
DocLink: "removed-measure",
|
||||
Version: "1.16.3",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Convert() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "The convert command is deprecated in Ginkgo V2",
|
||||
DocLink: "removed-ginkgo-convert",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
func (d deprecations) Blur() Deprecation {
|
||||
return Deprecation{
|
||||
Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.",
|
||||
Version: "1.16.0",
|
||||
}
|
||||
}
|
||||
|
||||
type DeprecationTracker struct {
|
||||
deprecations map[Deprecation][]CodeLocation
|
||||
}
|
||||
|
||||
func NewDeprecationTracker() *DeprecationTracker {
|
||||
return &DeprecationTracker{
|
||||
deprecations: map[Deprecation][]CodeLocation{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) {
|
||||
ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS")
|
||||
if deprecation.Version != "" && ackVersion != "" {
|
||||
ack := ParseSemVer(ackVersion)
|
||||
version := ParseSemVer(deprecation.Version)
|
||||
if ack.GreaterThanOrEqualTo(version) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(cl) == 1 {
|
||||
d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
|
||||
} else {
|
||||
d.deprecations[deprecation] = []CodeLocation{}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeprecationTracker) DidTrackDeprecations() bool {
|
||||
return len(d.deprecations) > 0
|
||||
}
|
||||
|
||||
func (d *DeprecationTracker) DeprecationsReport() string {
|
||||
out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
|
||||
out += formatter.F("{{light-yellow}}============================================={{/}}\n")
|
||||
out += formatter.F("Ginkgo 2.0 is under active development and will introduce (a small number of) breaking changes.\n")
|
||||
out += formatter.F("To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md{{/}}\n")
|
||||
out += formatter.F("To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
|
||||
|
||||
for deprecation, locations := range d.deprecations {
|
||||
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
|
||||
if deprecation.DocLink != "" {
|
||||
out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
|
||||
}
|
||||
for _, location := range locations {
|
||||
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
|
||||
}
|
||||
}
|
||||
out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
|
||||
out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", config.VERSION)
|
||||
return out
|
||||
}
|
||||
|
||||
type SemVer struct {
|
||||
Major int
|
||||
Minor int
|
||||
Patch int
|
||||
}
|
||||
|
||||
func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool {
|
||||
return (s.Major > o.Major) ||
|
||||
(s.Major == o.Major && s.Minor > o.Minor) ||
|
||||
(s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch)
|
||||
}
|
||||
|
||||
func ParseSemVer(semver string) SemVer {
|
||||
out := SemVer{}
|
||||
semver = strings.TrimFunc(semver, func(r rune) bool {
|
||||
return !(unicode.IsNumber(r) || r == '.')
|
||||
})
|
||||
components := strings.Split(semver, ".")
|
||||
if len(components) > 0 {
|
||||
out.Major, _ = strconv.Atoi(components[0])
|
||||
}
|
||||
if len(components) > 1 {
|
||||
out.Minor, _ = strconv.Atoi(components[1])
|
||||
}
|
||||
if len(components) > 2 {
|
||||
out.Patch, _ = strconv.Atoi(components[2])
|
||||
}
|
||||
return out
|
||||
}
|
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
Normal file
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteState int
|
||||
|
||||
const (
|
||||
RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
|
||||
|
||||
RemoteBeforeSuiteStatePending
|
||||
RemoteBeforeSuiteStatePassed
|
||||
RemoteBeforeSuiteStateFailed
|
||||
RemoteBeforeSuiteStateDisappeared
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteData struct {
|
||||
Data []byte
|
||||
State RemoteBeforeSuiteState
|
||||
}
|
||||
|
||||
func (r RemoteBeforeSuiteData) ToJSON() []byte {
|
||||
data, _ := json.Marshal(r)
|
||||
return data
|
||||
}
|
||||
|
||||
type RemoteAfterSuiteData struct {
|
||||
CanRun bool
|
||||
}
|
174
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
Normal file
174
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
|
||||
/*
|
||||
SuiteSummary represents the a summary of the test suite and is passed to both
|
||||
Reporter.SpecSuiteWillBegin
|
||||
Reporter.SpecSuiteDidEnd
|
||||
|
||||
this is unfortunate as these two methods should receive different objects. When running in parallel
|
||||
each node does not deterministically know how many specs it will end up running.
|
||||
|
||||
Unfortunately making such a change would break backward compatibility.
|
||||
|
||||
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields
|
||||
with -1.
|
||||
*/
|
||||
type SuiteSummary struct {
|
||||
SuiteDescription string
|
||||
SuiteSucceeded bool
|
||||
SuiteID string
|
||||
|
||||
NumberOfSpecsBeforeParallelization int
|
||||
NumberOfTotalSpecs int
|
||||
NumberOfSpecsThatWillBeRun int
|
||||
NumberOfPendingSpecs int
|
||||
NumberOfSkippedSpecs int
|
||||
NumberOfPassedSpecs int
|
||||
NumberOfFailedSpecs int
|
||||
// Flaked specs are those that failed initially, but then passed on a
|
||||
// subsequent try.
|
||||
NumberOfFlakedSpecs int
|
||||
RunTime time.Duration
|
||||
}
|
||||
|
||||
type SpecSummary struct {
|
||||
ComponentTexts []string
|
||||
ComponentCodeLocations []CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
IsMeasurement bool
|
||||
NumberOfSamples int
|
||||
Measurements map[string]*SpecMeasurement
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
func (s SpecSummary) HasFailureState() bool {
|
||||
return s.State.IsFailure()
|
||||
}
|
||||
|
||||
func (s SpecSummary) TimedOut() bool {
|
||||
return s.State == SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (s SpecSummary) Panicked() bool {
|
||||
return s.State == SpecStatePanicked
|
||||
}
|
||||
|
||||
func (s SpecSummary) Failed() bool {
|
||||
return s.State == SpecStateFailed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Passed() bool {
|
||||
return s.State == SpecStatePassed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Skipped() bool {
|
||||
return s.State == SpecStateSkipped
|
||||
}
|
||||
|
||||
func (s SpecSummary) Pending() bool {
|
||||
return s.State == SpecStatePending
|
||||
}
|
||||
|
||||
type SetupSummary struct {
|
||||
ComponentType SpecComponentType
|
||||
CodeLocation CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
type SpecFailure struct {
|
||||
Message string
|
||||
Location CodeLocation
|
||||
ForwardedPanic string
|
||||
|
||||
ComponentIndex int
|
||||
ComponentType SpecComponentType
|
||||
ComponentCodeLocation CodeLocation
|
||||
}
|
||||
|
||||
type SpecMeasurement struct {
|
||||
Name string
|
||||
Info interface{}
|
||||
Order int
|
||||
|
||||
Results []float64
|
||||
|
||||
Smallest float64
|
||||
Largest float64
|
||||
Average float64
|
||||
StdDeviation float64
|
||||
|
||||
SmallestLabel string
|
||||
LargestLabel string
|
||||
AverageLabel string
|
||||
Units string
|
||||
Precision int
|
||||
}
|
||||
|
||||
func (s SpecMeasurement) PrecisionFmt() string {
|
||||
if s.Precision == 0 {
|
||||
return "%f"
|
||||
}
|
||||
|
||||
str := strconv.Itoa(s.Precision)
|
||||
|
||||
return "%." + str + "f"
|
||||
}
|
||||
|
||||
type SpecState uint
|
||||
|
||||
const (
|
||||
SpecStateInvalid SpecState = iota
|
||||
|
||||
SpecStatePending
|
||||
SpecStateSkipped
|
||||
SpecStatePassed
|
||||
SpecStateFailed
|
||||
SpecStatePanicked
|
||||
SpecStateTimedOut
|
||||
)
|
||||
|
||||
func (state SpecState) IsFailure() bool {
|
||||
return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
|
||||
}
|
||||
|
||||
type SpecComponentType uint
|
||||
|
||||
const (
|
||||
SpecComponentTypeInvalid SpecComponentType = iota
|
||||
|
||||
SpecComponentTypeContainer
|
||||
SpecComponentTypeBeforeSuite
|
||||
SpecComponentTypeAfterSuite
|
||||
SpecComponentTypeBeforeEach
|
||||
SpecComponentTypeJustBeforeEach
|
||||
SpecComponentTypeJustAfterEach
|
||||
SpecComponentTypeAfterEach
|
||||
SpecComponentTypeIt
|
||||
SpecComponentTypeMeasure
|
||||
)
|
||||
|
||||
type FlagType uint
|
||||
|
||||
const (
|
||||
FlagTypeNone FlagType = iota
|
||||
FlagTypeFocused
|
||||
FlagTypePending
|
||||
)
|
Loading…
Add table
Add a link
Reference in a new issue