Compare commits

..

No commits in common. "master" and "2.1.7" have entirely different histories.

315 changed files with 9294 additions and 7984 deletions

View file

@ -83,7 +83,7 @@ jobs:
prerelease: false prerelease: false
- name: Upload release assets - name: Upload release assets
uses: softprops/action-gh-release@ab50eebb6488051c6788d97fa95232267c6a4e23 uses: softprops/action-gh-release@c43d7637b9b9ce3e953168c325d27253a5d48d8e
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,21 +1,3 @@
# Version 2.1.8
- Dependencies have been updated, notably the QUIC implementation,
which could be vulnerable to denial-of-service attacks.
- In forwarding rules, the target can now optionally include a
non-standard DNS port number. The port number is also now optional when
using IPv6.
- An annoying log message related to permissions on Windows has been
suppressed.
- Resolver IP addresses can now be refreshed more frequently.
Additionally, jitter has been introduced to prevent all resolvers from
being refreshed simultaneously. Further changes have been implemented
to mitigate issues arising from multiple concurrent attempts to resolve
a resolver's IP address.
- An empty value for "tls_cipher_suite" is now equivalent to leaving
the property undefined. Previously, it disabled all TLS cipher suites,
which had little practical justification.
- In forwarding rules, an optional `*.` prefix is now accepted.
# Version 2.1.7 # Version 2.1.7
- This version reintroduces support for XSalsa20 enryption in DNSCrypt, - This version reintroduces support for XSalsa20 enryption in DNSCrypt,
which was removed in 2.1.6. Unfortunately, a bunch of servers still which was removed in 2.1.6. Unfortunately, a bunch of servers still

View file

@ -3,6 +3,9 @@
[![Financial Contributors on Open Collective](https://opencollective.com/dnscrypt/all/badge.svg?label=financial+contributors)](https://opencollective.com/dnscrypt) [![Financial Contributors on Open Collective](https://opencollective.com/dnscrypt/all/badge.svg?label=financial+contributors)](https://opencollective.com/dnscrypt)
[![DNSCrypt-Proxy Release](https://img.shields.io/github/release/dnscrypt/dnscrypt-proxy.svg?label=Latest%20Release&style=popout)](https://github.com/dnscrypt/dnscrypt-proxy/releases/latest) [![DNSCrypt-Proxy Release](https://img.shields.io/github/release/dnscrypt/dnscrypt-proxy.svg?label=Latest%20Release&style=popout)](https://github.com/dnscrypt/dnscrypt-proxy/releases/latest)
[![Build Status](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml/badge.svg)](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml) [![Build Status](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml/badge.svg)](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml)
![CodeQL scan](https://github.com/DNSCrypt/dnscrypt-proxy/workflows/CodeQL%20scan/badge.svg)
![ShiftLeft Scan](https://github.com/DNSCrypt/dnscrypt-proxy/workflows/ShiftLeft%20Scan/badge.svg)
[![#dnscrypt-proxy:matrix.org](https://img.shields.io/matrix/dnscrypt-proxy:matrix.org.svg?label=DNSCrypt-Proxy%20Matrix%20Chat&server_fqdn=matrix.org&style=popout)](https://matrix.to/#/#dnscrypt-proxy:matrix.org)
## Overview ## Overview

View file

@ -170,12 +170,6 @@ func ColdStart(proxy *Proxy) (*CaptivePortalHandler, error) {
if err != nil { if err != nil {
continue continue
} }
if strings.Contains(ipsStr, "*") {
return nil, fmt.Errorf(
"A captive portal rule must use an exact host name at line %d",
1+lineNo,
)
}
var ips []net.IP var ips []net.IP
for _, ip := range strings.Split(ipsStr, ",") { for _, ip := range strings.Split(ipsStr, ",") {
ipStr := strings.TrimSpace(ip) ipStr := strings.TrimSpace(ip)

View file

@ -6,9 +6,12 @@ import (
"errors" "errors"
"net" "net"
"os" "os"
"path"
"strconv" "strconv"
"strings" "strings"
"unicode" "unicode"
"github.com/jedisct1/dlog"
) )
type CryptoConstruction uint16 type CryptoConstruction uint16
@ -164,3 +167,31 @@ func ReadTextFile(filename string) (string, error) {
} }
func isDigit(b byte) bool { return b >= '0' && b <= '9' } func isDigit(b byte) bool { return b >= '0' && b <= '9' }
func maybeWritableByOtherUsers(p string) (bool, string, error) {
p = path.Clean(p)
for p != "/" && p != "." {
st, err := os.Stat(p)
if err != nil {
return false, p, err
}
mode := st.Mode()
if mode.Perm()&2 != 0 && !(st.IsDir() && mode&os.ModeSticky == os.ModeSticky) {
return true, p, nil
}
p = path.Dir(p)
}
return false, "", nil
}
func WarnIfMaybeWritableByOtherUsers(p string) {
if ok, px, err := maybeWritableByOtherUsers(p); ok {
if px == p {
dlog.Criticalf("[%s] is writable by other system users - If this is not intentional, it is recommended to fix the access permissions", p)
} else {
dlog.Warnf("[%s] can be modified by other system users because [%s] is writable by other users - If this is not intentional, it is recommended to fix the access permissions", p, px)
}
} else if err != nil {
dlog.Warnf("Error while checking if [%s] is accessible: [%s] : [%s]", p, px, err)
}
}

View file

@ -2,8 +2,7 @@
# IP blocklist # # IP blocklist #
############################## ##############################
## Rules for blocking DNS responses if they contain ## Rules for IP-based response blocking
## IP addresses matching patterns.
## ##
## Sample feeds of suspect IP addresses: ## Sample feeds of suspect IP addresses:
## - https://github.com/stamparm/ipsum ## - https://github.com/stamparm/ipsum

View file

@ -221,12 +221,11 @@ cert_refresh_delay = 240
## 52393 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 ## 52393 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
## ##
## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...), ## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...),
## uncommenting the following line may improve performance. ## the following suite improves performance.
## This may also help on Intel CPUs running 32-bit operating systems. ## This may also help on Intel CPUs running 32-bit operating systems.
## However, this can cause issues fetching sources or connecting to some HTTP servers,
## and should not be set on regular CPUs.
## ##
## Keep tls_cipher_suite undefined to let the app automatically choose secure parameters. ## Keep tls_cipher_suite empty if you have issues fetching sources or
## connecting to some DoH servers.
# tls_cipher_suite = [52392, 49199] # tls_cipher_suite = [52392, 49199]

View file

@ -36,11 +36,7 @@
# example.com 9.9.9.9,8.8.8.8 # example.com 9.9.9.9,8.8.8.8
## Forward queries to a resolver using IPv6 ## Forward queries to a resolver using IPv6
# ipv6.example.com [2001:DB8::42] # ipv6.example.com [2001:DB8::42]:53
## Forward to a non-standard port number
# x.example.com 192.168.0.1:1053
# y.example.com [2001:DB8::42]:1053
## Forward queries for .onion names to a local Tor client ## Forward queries for .onion names to a local Tor client
## Tor must be configured with the following in the torrc file: ## Tor must be configured with the following in the torrc file:

View file

@ -15,7 +15,7 @@ import (
) )
const ( const (
AppVersion = "2.1.8" AppVersion = "2.1.7"
DefaultConfigFileName = "dnscrypt-proxy.toml" DefaultConfigFileName = "dnscrypt-proxy.toml"
) )

View file

@ -1,7 +0,0 @@
//go:build !unix
package main
func WarnIfMaybeWritableByOtherUsers(p string) {
// No-op
}

View file

@ -1,38 +0,0 @@
//go:build unix
package main
import (
"os"
"path"
"github.com/jedisct1/dlog"
)
func maybeWritableByOtherUsers(p string) (bool, string, error) {
p = path.Clean(p)
for p != "/" && p != "." {
st, err := os.Stat(p)
if err != nil {
return false, p, err
}
mode := st.Mode()
if mode.Perm()&2 != 0 && !(st.IsDir() && mode&os.ModeSticky == os.ModeSticky) {
return true, p, nil
}
p = path.Dir(p)
}
return false, "", nil
}
func WarnIfMaybeWritableByOtherUsers(p string) {
if ok, px, err := maybeWritableByOtherUsers(p); ok {
if px == p {
dlog.Criticalf("[%s] is writable by other system users - If this is not intentional, it is recommended to fix the access permissions", p)
} else {
dlog.Warnf("[%s] can be modified by other system users because [%s] is writable by other users - If this is not intentional, it is recommended to fix the access permissions", p, px)
}
} else if err != nil {
dlog.Warnf("Error while checking if [%s] is accessible: [%s] : [%s]", p, px, err)
}
}

View file

@ -54,17 +54,12 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
if err != nil { if err != nil {
return err return err
} }
requiresDHCP := false
for lineNo, line := range strings.Split(lines, "\n") { for lineNo, line := range strings.Split(lines, "\n") {
line = TrimAndStripInlineComments(line) line = TrimAndStripInlineComments(line)
if len(line) == 0 { if len(line) == 0 {
continue continue
} }
domain, serversStr, ok := StringTwoFields(line) domain, serversStr, ok := StringTwoFields(line)
domain = strings.TrimPrefix(domain, "*.")
if strings.Contains(domain, "*") {
ok = false
}
if !ok { if !ok {
return fmt.Errorf( return fmt.Errorf(
"Syntax error for a forwarding rule at line %d. Expected syntax: example.com 9.9.9.9,8.8.8.8", "Syntax error for a forwarding rule at line %d. Expected syntax: example.com 9.9.9.9,8.8.8.8",
@ -72,6 +67,7 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
) )
} }
domain = strings.ToLower(domain) domain = strings.ToLower(domain)
requiresDHCP := false
var sequence []SearchSequenceItem var sequence []SearchSequenceItem
for _, server := range strings.Split(serversStr, ",") { for _, server := range strings.Split(serversStr, ",") {
server = strings.TrimSpace(server) server = strings.TrimSpace(server)
@ -102,9 +98,14 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
dlog.Criticalf("Unknown keyword [%s] at line %d", server, 1+lineNo) dlog.Criticalf("Unknown keyword [%s] at line %d", server, 1+lineNo)
continue continue
} }
if server, err = normalizeIPAndOptionalPort(server, "53"); err != nil { server = strings.TrimPrefix(server, "[")
dlog.Criticalf("Syntax error for a forwarding rule at line %d: %s", 1+lineNo, err) server = strings.TrimSuffix(server, "]")
continue if ip := net.ParseIP(server); ip != nil {
if ip.To4() != nil {
server = fmt.Sprintf("%s:%d", server, 53)
} else {
server = fmt.Sprintf("[%s]:%d", server, 53)
}
} }
idxServers := -1 idxServers := -1
for i, item := range sequence { for i, item := range sequence {
@ -120,28 +121,36 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
dlog.Infof("Forwarding [%s] to [%s]", domain, server) dlog.Infof("Forwarding [%s] to [%s]", domain, server)
} }
} }
if requiresDHCP {
if len(proxy.userName) > 0 {
dlog.Warn("DHCP/DNS detection may not work when 'user_name' is set or when starting as a non-root user")
}
if proxy.SourceIPv6 {
dlog.Notice("Starting a DHCP/DNS detector for IPv6")
d6 := &dhcpdns.Detector{RemoteIPPort: "[2001:DB8::53]:80"}
if err := d6.Detect(); err != nil {
dlog.Criticalf("Failed to start the DHCP/DNS IPv6 server: %s", err)
continue
}
go d6.Serve(9, 10)
plugin.dhcpdns = append(plugin.dhcpdns, d6)
}
if proxy.SourceIPv4 {
dlog.Notice("Starting a DHCP/DNS detector for IPv4")
d4 := &dhcpdns.Detector{RemoteIPPort: "192.0.2.53:80"}
if err := d4.Detect(); err != nil {
dlog.Criticalf("Failed to start the DHCP/DNS IPv4 server: %s", err)
continue
}
go d4.Serve(9, 10)
plugin.dhcpdns = append(plugin.dhcpdns, d4)
}
}
plugin.forwardMap = append(plugin.forwardMap, PluginForwardEntry{ plugin.forwardMap = append(plugin.forwardMap, PluginForwardEntry{
domain: domain, domain: domain,
sequence: sequence, sequence: sequence,
}) })
} }
if requiresDHCP {
if len(proxy.userName) > 0 {
dlog.Warn("DHCP/DNS detection may not work when 'user_name' is set or when starting as a non-root user")
}
if proxy.SourceIPv6 {
dlog.Notice("Starting a DHCP/DNS detector for IPv6")
d6 := &dhcpdns.Detector{RemoteIPPort: "[2001:DB8::53]:80"}
go d6.Serve(9, 10)
plugin.dhcpdns = append(plugin.dhcpdns, d6)
}
if proxy.SourceIPv4 {
dlog.Notice("Starting a DHCP/DNS detector for IPv4")
d4 := &dhcpdns.Detector{RemoteIPPort: "192.0.2.53:80"}
go d4.Serve(9, 10)
plugin.dhcpdns = append(plugin.dhcpdns, d4)
}
}
return nil return nil
} }
@ -190,7 +199,7 @@ func (plugin *PluginForward) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
dlog.Infof("No response from the DHCP server while resolving [%s]", qName) dlog.Infof("No response from the DHCP server while resolving [%s]", qName)
continue continue
} }
if len(dhcpDNS) > 0 { if dhcpDNS != nil && len(dhcpDNS) > 0 {
server = net.JoinHostPort(dhcpDNS[rand.Intn(len(dhcpDNS))].String(), "53") server = net.JoinHostPort(dhcpDNS[rand.Intn(len(dhcpDNS))].String(), "53")
break break
} }
@ -239,37 +248,3 @@ func (plugin *PluginForward) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
} }
return err return err
} }
func normalizeIPAndOptionalPort(addr string, defaultPort string) (string, error) {
var host, port string
var err error
if strings.HasPrefix(addr, "[") {
if !strings.Contains(addr, "]:") {
if addr[len(addr)-1] != ']' {
return "", fmt.Errorf("invalid IPv6 format: missing closing ']'")
}
host = addr[1 : len(addr)-1]
port = defaultPort
} else {
host, port, err = net.SplitHostPort(addr)
if err != nil {
return "", err
}
}
} else {
host, port, err = net.SplitHostPort(addr)
if err != nil {
host = addr
port = defaultPort
}
}
ip := net.ParseIP(host)
if ip == nil {
return "", fmt.Errorf("invalid IP address: [%s]", host)
}
if ip.To4() != nil {
return fmt.Sprintf("%s:%s", ip.String(), port), nil
}
return fmt.Sprintf("[%s]:%s", ip.String(), port), nil
}

View file

@ -34,16 +34,14 @@ const (
DefaultBootstrapResolver = "9.9.9.9:53" DefaultBootstrapResolver = "9.9.9.9:53"
DefaultKeepAlive = 5 * time.Second DefaultKeepAlive = 5 * time.Second
DefaultTimeout = 30 * time.Second DefaultTimeout = 30 * time.Second
SystemResolverIPTTL = 12 * time.Hour SystemResolverIPTTL = 24 * time.Hour
MinResolverIPTTL = 4 * time.Hour MinResolverIPTTL = 12 * time.Hour
ResolverIPTTLMaxJitter = 15 * time.Minute
ExpiredCachedIPGraceTTL = 15 * time.Minute ExpiredCachedIPGraceTTL = 15 * time.Minute
) )
type CachedIPItem struct { type CachedIPItem struct {
ip net.IP ip net.IP
expiration *time.Time expiration *time.Time
updatingUntil *time.Time
} }
type CachedIPs struct { type CachedIPs struct {
@ -58,7 +56,7 @@ type AltSupport struct {
type XTransport struct { type XTransport struct {
transport *http.Transport transport *http.Transport
h3Transport *http3.Transport h3Transport *http3.RoundTripper
keepAlive time.Duration keepAlive time.Duration
timeout time.Duration timeout time.Duration
cachedIPs CachedIPs cachedIPs CachedIPs
@ -107,54 +105,31 @@ func ParseIP(ipStr string) net.IP {
// If ttl < 0, never expire // If ttl < 0, never expire
// Otherwise, ttl is set to max(ttl, MinResolverIPTTL) // Otherwise, ttl is set to max(ttl, MinResolverIPTTL)
func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Duration) { func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Duration) {
item := &CachedIPItem{ip: ip, expiration: nil, updatingUntil: nil} item := &CachedIPItem{ip: ip, expiration: nil}
if ttl >= 0 { if ttl >= 0 {
if ttl < MinResolverIPTTL { if ttl < MinResolverIPTTL {
ttl = MinResolverIPTTL ttl = MinResolverIPTTL
} }
ttl += time.Duration(rand.Int63n(int64(ResolverIPTTLMaxJitter)))
expiration := time.Now().Add(ttl) expiration := time.Now().Add(ttl)
item.expiration = &expiration item.expiration = &expiration
} }
xTransport.cachedIPs.Lock() xTransport.cachedIPs.Lock()
xTransport.cachedIPs.cache[host] = item xTransport.cachedIPs.cache[host] = item
xTransport.cachedIPs.Unlock() xTransport.cachedIPs.Unlock()
dlog.Debugf("[%s] IP address [%s] stored to the cache, valid for %v", host, ip, ttl)
} }
// Mark an entry as being updated func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool) {
func (xTransport *XTransport) markUpdatingCachedIP(host string) { ip, expired = nil, false
xTransport.cachedIPs.Lock()
item, ok := xTransport.cachedIPs.cache[host]
if ok {
now := time.Now()
until := now.Add(xTransport.timeout)
item.updatingUntil = &until
xTransport.cachedIPs.cache[host] = item
dlog.Debugf("[%s] IP addresss marked as updating", host)
}
xTransport.cachedIPs.Unlock()
}
func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool, updating bool) {
ip, expired, updating = nil, false, false
xTransport.cachedIPs.RLock() xTransport.cachedIPs.RLock()
item, ok := xTransport.cachedIPs.cache[host] item, ok := xTransport.cachedIPs.cache[host]
xTransport.cachedIPs.RUnlock() xTransport.cachedIPs.RUnlock()
if !ok { if !ok {
dlog.Debugf("[%s] IP address not found in the cache", host)
return return
} }
ip = item.ip ip = item.ip
expiration := item.expiration expiration := item.expiration
if expiration != nil && time.Until(*expiration) < 0 { if expiration != nil && time.Until(*expiration) < 0 {
expired = true expired = true
if item.updatingUntil != nil && time.Until(*item.updatingUntil) > 0 {
updating = true
dlog.Debugf("[%s] IP address is being updated", host)
} else {
dlog.Debugf("[%s] IP address expired, not being updated yet", host)
}
} }
return return
} }
@ -178,7 +153,7 @@ func (xTransport *XTransport) rebuildTransport() {
ipOnly := host ipOnly := host
// resolveAndUpdateCache() is always called in `Fetch()` before the `Dial()` // resolveAndUpdateCache() is always called in `Fetch()` before the `Dial()`
// method is used, so that a cached entry must be present at this point. // method is used, so that a cached entry must be present at this point.
cachedIP, _, _ := xTransport.loadCachedIP(host) cachedIP, _ := xTransport.loadCachedIP(host)
if cachedIP != nil { if cachedIP != nil {
if ipv4 := cachedIP.To4(); ipv4 != nil { if ipv4 := cachedIP.To4(); ipv4 != nil {
ipOnly = ipv4.String() ipOnly = ipv4.String()
@ -242,13 +217,12 @@ func (xTransport *XTransport) rebuildTransport() {
tlsClientConfig.Certificates = []tls.Certificate{cert} tlsClientConfig.Certificates = []tls.Certificate{cert}
} }
overrideCipherSuite := len(xTransport.tlsCipherSuite) > 0 if xTransport.tlsDisableSessionTickets || xTransport.tlsCipherSuite != nil {
if xTransport.tlsDisableSessionTickets || overrideCipherSuite {
tlsClientConfig.SessionTicketsDisabled = xTransport.tlsDisableSessionTickets tlsClientConfig.SessionTicketsDisabled = xTransport.tlsDisableSessionTickets
if !xTransport.tlsDisableSessionTickets { if !xTransport.tlsDisableSessionTickets {
tlsClientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10) tlsClientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10)
} }
if overrideCipherSuite { if xTransport.tlsCipherSuite != nil {
tlsClientConfig.PreferServerCipherSuites = false tlsClientConfig.PreferServerCipherSuites = false
tlsClientConfig.CipherSuites = xTransport.tlsCipherSuite tlsClientConfig.CipherSuites = xTransport.tlsCipherSuite
@ -261,7 +235,7 @@ func (xTransport *XTransport) rebuildTransport() {
continue continue
} }
for _, supportedVersion := range suite.SupportedVersions { for _, supportedVersion := range suite.SupportedVersions {
if supportedVersion == tls.VersionTLS12 { if supportedVersion != tls.VersionTLS13 {
for _, expectedSuiteID := range xTransport.tlsCipherSuite { for _, expectedSuiteID := range xTransport.tlsCipherSuite {
if expectedSuiteID == suite.ID { if expectedSuiteID == suite.ID {
compatibleSuitesCount += 1 compatibleSuitesCount += 1
@ -288,7 +262,7 @@ func (xTransport *XTransport) rebuildTransport() {
dlog.Debugf("Dialing for H3: [%v]", addrStr) dlog.Debugf("Dialing for H3: [%v]", addrStr)
host, port := ExtractHostAndPort(addrStr, stamps.DefaultPort) host, port := ExtractHostAndPort(addrStr, stamps.DefaultPort)
ipOnly := host ipOnly := host
cachedIP, _, _ := xTransport.loadCachedIP(host) cachedIP, _ := xTransport.loadCachedIP(host)
network := "udp4" network := "udp4"
if cachedIP != nil { if cachedIP != nil {
if ipv4 := cachedIP.To4(); ipv4 != nil { if ipv4 := cachedIP.To4(); ipv4 != nil {
@ -319,7 +293,7 @@ func (xTransport *XTransport) rebuildTransport() {
tlsCfg.ServerName = host tlsCfg.ServerName = host
return quic.DialEarly(ctx, udpConn, udpAddr, tlsCfg, cfg) return quic.DialEarly(ctx, udpConn, udpAddr, tlsCfg, cfg)
} }
h3Transport := &http3.Transport{DisableCompression: true, TLSClientConfig: &tlsClientConfig, Dial: dial} h3Transport := &http3.RoundTripper{DisableCompression: true, TLSClientConfig: &tlsClientConfig, Dial: dial}
xTransport.h3Transport = h3Transport xTransport.h3Transport = h3Transport
} }
} }
@ -427,12 +401,10 @@ func (xTransport *XTransport) resolveAndUpdateCache(host string) error {
if ParseIP(host) != nil { if ParseIP(host) != nil {
return nil return nil
} }
cachedIP, expired, updating := xTransport.loadCachedIP(host) cachedIP, expired := xTransport.loadCachedIP(host)
if cachedIP != nil && (!expired || updating) { if cachedIP != nil && !expired {
return nil return nil
} }
xTransport.markUpdatingCachedIP(host)
var foundIP net.IP var foundIP net.IP
var ttl time.Duration var ttl time.Duration
var err error var err error
@ -500,6 +472,7 @@ func (xTransport *XTransport) resolveAndUpdateCache(host string) error {
} }
} }
xTransport.saveCachedIP(host, foundIP, ttl) xTransport.saveCachedIP(host, foundIP, ttl)
dlog.Debugf("[%s] IP address [%s] added to the cache, valid for %v", host, foundIP, ttl)
return nil return nil
} }

38
go.mod
View file

@ -1,53 +1,53 @@
module github.com/dnscrypt/dnscrypt-proxy module github.com/dnscrypt/dnscrypt-proxy
go 1.24.1 go 1.23.4
require ( require (
github.com/BurntSushi/toml v1.5.0 github.com/BurntSushi/toml v1.4.0
github.com/VividCortex/ewma v1.2.0 github.com/VividCortex/ewma v1.2.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-immutable-radix v1.3.1
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405
github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e
github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774 github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774
github.com/jedisct1/go-hpke-compact v0.0.0-20241212093903-5caa4621366f github.com/jedisct1/go-hpke-compact v0.0.0-20241212093903-5caa4621366f
github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7 github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7
github.com/jedisct1/xsecretbox v0.0.0-20241212092125-3afc4917ac41 github.com/jedisct1/xsecretbox v0.0.0-20241212092125-3afc4917ac41
github.com/k-sone/critbitgo v1.4.0 github.com/k-sone/critbitgo v1.4.0
github.com/kardianos/service v1.2.2 github.com/kardianos/service v1.2.2
github.com/lifenjoiner/dhcpdns v0.0.7 github.com/lifenjoiner/dhcpdns v0.0.6
github.com/miekg/dns v1.1.64 github.com/miekg/dns v1.1.62
github.com/opencoff/go-sieve v0.2.1 github.com/opencoff/go-sieve v0.2.1
github.com/powerman/check v1.8.0 github.com/powerman/check v1.8.0
github.com/quic-go/quic-go v0.50.1 github.com/quic-go/quic-go v0.48.2
golang.org/x/crypto v0.36.0 golang.org/x/crypto v0.32.0
golang.org/x/net v0.38.0 golang.org/x/net v0.34.0
golang.org/x/sys v0.31.0 golang.org/x/sys v0.29.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/natefinch/lumberjack.v2 v2.2.1
) )
require ( require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/hashicorp/go-syslog v1.0.0 // indirect github.com/hashicorp/go-syslog v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/powerman/deepequal v0.1.0 // indirect github.com/powerman/deepequal v0.1.0 // indirect
github.com/quic-go/qpack v0.5.1 // indirect github.com/quic-go/qpack v0.5.1 // indirect
github.com/smartystreets/goconvey v1.8.1 // indirect github.com/smartystreets/goconvey v1.8.1 // indirect
go.uber.org/mock v0.5.0 // indirect go.uber.org/mock v0.5.0 // indirect
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
golang.org/x/mod v0.23.0 // indirect golang.org/x/mod v0.22.0 // indirect
golang.org/x/sync v0.12.0 // indirect golang.org/x/sync v0.10.0 // indirect
golang.org/x/text v0.23.0 // indirect golang.org/x/text v0.21.0 // indirect
golang.org/x/tools v0.30.0 // indirect golang.org/x/tools v0.29.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.56.3 // indirect google.golang.org/grpc v1.56.3 // indirect
google.golang.org/protobuf v1.34.2 // indirect google.golang.org/protobuf v1.36.1 // indirect
) )

89
go.sum
View file

@ -1,29 +1,25 @@
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA= github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA=
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU= github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
@ -32,15 +28,15 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM=
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg= github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg=
github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ= github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ=
github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a h1:8z8OvuZGZYGuvTeT5RD80ii6B6LftADl0EQr2z5asCg= github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e h1:tzG4EjKgHIqKVkLIAC4pXTIapuM2BR05uXokEEysAXA=
github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a/go.mod h1:SAINchklztk2jcLWJ4bpNF4KnwDUSUTX+cJbspWC2Rw= github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e/go.mod h1:SAINchklztk2jcLWJ4bpNF4KnwDUSUTX+cJbspWC2Rw=
github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774 h1:DobL5d8UxrYzlD0PbU/EVBAGHuDiFyH46gr6povMw50= github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774 h1:DobL5d8UxrYzlD0PbU/EVBAGHuDiFyH46gr6povMw50=
github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774/go.mod h1:mEGEFZsGe4sG5Mb3Xi89pmsy+TZ0946ArbYMGKAM5uA= github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774/go.mod h1:mEGEFZsGe4sG5Mb3Xi89pmsy+TZ0946ArbYMGKAM5uA=
github.com/jedisct1/go-hpke-compact v0.0.0-20241212093903-5caa4621366f h1:h5/HKrLaDfrb/Zi1y8eCsPGQpcOnKIslT/OpFc81i4c= github.com/jedisct1/go-hpke-compact v0.0.0-20241212093903-5caa4621366f h1:h5/HKrLaDfrb/Zi1y8eCsPGQpcOnKIslT/OpFc81i4c=
@ -55,14 +51,14 @@ github.com/k-sone/critbitgo v1.4.0 h1:l71cTyBGeh6X5ATh6Fibgw3+rtNT80BA0uNNWgkPrb
github.com/k-sone/critbitgo v1.4.0/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s= github.com/k-sone/critbitgo v1.4.0/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s=
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60= github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/lifenjoiner/dhcpdns v0.0.7 h1:VJM2aFWHU9V7M5v4UYYNaHhIHZkbdvSI6WGGpq6/TNQ= github.com/lifenjoiner/dhcpdns v0.0.6 h1:rn4Y5RRR5sgQ6RjWenwhA7i/uHzHW9hbZpCobA4CAJs=
github.com/lifenjoiner/dhcpdns v0.0.7/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk= github.com/lifenjoiner/dhcpdns v0.0.6/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M= github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M=
github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY= github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@ -75,39 +71,36 @@ github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+
github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04= github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q= github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
@ -115,11 +108,9 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml Documentation: https://godocs.io/github.com/BurntSushi/toml
See the [releases page](https://github.com/BurntSushi/toml/releases) for a See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show changelog; this information is also in the git tag annotations (e.g. `git show

View file

@ -196,19 +196,6 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
return md.unify(primValue.undecoded, rvalue(v)) return md.unify(primValue.undecoded, rvalue(v))
} }
// markDecodedRecursive is a helper to mark any key under the given tmap as
// decoded, recursing as needed
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
for key := range tmap {
md.decoded[md.context.add(key).String()] = struct{}{}
if tmap, ok := tmap[key].(map[string]any); ok {
md.context = append(md.context, key)
markDecodedRecursive(md, tmap)
md.context = md.context[0 : len(md.context)-1]
}
}
}
// unify performs a sort of type unification based on the structure of `rv`, // unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation. // which is the client representation.
// //
@ -235,16 +222,6 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
if err != nil { if err != nil {
return md.parseErr(err) return md.parseErr(err)
} }
// Assume the Unmarshaler decoded everything, so mark all keys under
// this table as decoded.
if tmap, ok := data.(map[string]any); ok {
markDecodedRecursive(md, tmap)
}
if aot, ok := data.([]map[string]any); ok {
for _, tmap := range aot {
markDecodedRecursive(md, tmap)
}
}
return nil return nil
} }
if v, ok := rvi.(encoding.TextUnmarshaler); ok { if v, ok := rvi.(encoding.TextUnmarshaler); ok {
@ -563,14 +540,12 @@ func (md *MetaData) badtype(dst string, data any) error {
func (md *MetaData) parseErr(err error) error { func (md *MetaData) parseErr(err error) error {
k := md.context.String() k := md.context.String()
d := string(md.data)
return ParseError{ return ParseError{
Message: err.Error(),
err: err,
LastKey: k, LastKey: k,
Position: md.keyInfo[k].pos.withCol(d), Position: md.keyInfo[k].pos,
Line: md.keyInfo[k].pos.Line, Line: md.keyInfo[k].pos.Line,
input: d, err: err,
input: string(md.data),
} }
} }

View file

@ -402,30 +402,31 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
// Sort keys so that we have deterministic output. And write keys directly // Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps. // underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []reflect.Value var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() { for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
mapKeysSub = append(mapKeysSub, mapKey) mapKeysSub = append(mapKeysSub, k)
} else { } else {
mapKeysDirect = append(mapKeysDirect, mapKey) mapKeysDirect = append(mapKeysDirect, k)
} }
} }
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) sort.Strings(mapKeys)
for i, mapKey := range mapKeys { for i, mapKey := range mapKeys {
val := eindirect(rv.MapIndex(mapKey)) val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
if isNil(val) { if isNil(val) {
continue continue
} }
if inline { if inline {
enc.writeKeyValue(Key{mapKey.String()}, val, true) enc.writeKeyValue(Key{mapKey}, val, true)
if trailC || i != len(mapKeys)-1 { if trailC || i != len(mapKeys)-1 {
enc.wf(", ") enc.wf(", ")
} }
} else { } else {
enc.encode(key.add(mapKey.String()), val) enc.encode(key.add(mapKey), val)
} }
} }
} }
@ -440,6 +441,8 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
} }
} }
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func pointerTo(t reflect.Type) reflect.Type { func pointerTo(t reflect.Type) reflect.Type {
if t.Kind() == reflect.Ptr { if t.Kind() == reflect.Ptr {
return pointerTo(t.Elem()) return pointerTo(t.Elem())
@ -474,14 +477,15 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i)) frv := eindirect(rv.Field(i))
// Need to make a copy because ... ehm, I don't know why... I guess if is32Bit {
// allocating a new array can cause it to fail(?) // Copy so it works correct on 32bit archs; not clear why this
// // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// Done for: https://github.com/BurntSushi/toml/issues/430 // This also works fine on 64bit, but 32bit archs are somewhat
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 // rare and this is a wee bit faster.
copyStart := make([]int, len(start)) copyStart := make([]int, len(start))
copy(copyStart, start) copy(copyStart, start)
start = copyStart start = copyStart
}
// Treat anonymous struct fields with tag names as though they are // Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does. // not anonymous, like encoding/json does.
@ -503,7 +507,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
} }
addFields(rt, rv, nil) addFields(rt, rv, nil)
writeFields := func(fields [][]int, totalFields int) { writeFields := func(fields [][]int) {
for _, fieldIndex := range fields { for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex) fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := rv.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex)
@ -533,7 +537,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline { if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true) enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != totalFields-1 { if fieldIndex[0] != len(fields)-1 {
enc.wf(", ") enc.wf(", ")
} }
} else { } else {
@ -545,10 +549,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if inline { if inline {
enc.wf("{") enc.wf("{")
} }
writeFields(fieldsDirect)
l := len(fieldsDirect) + len(fieldsSub) writeFields(fieldsSub)
writeFields(fieldsDirect, l)
writeFields(fieldsSub, l)
if inline { if inline {
enc.wf("}") enc.wf("}")
} }

View file

@ -67,36 +67,21 @@ type ParseError struct {
// Position of an error. // Position of an error.
type Position struct { type Position struct {
Line int // Line number, starting at 1. Line int // Line number, starting at 1.
Col int // Error column, starting at 1.
Start int // Start of error, as byte offset starting at 0. Start int // Start of error, as byte offset starting at 0.
Len int // Length of the error in bytes. Len int // Lenght in bytes.
}
func (p Position) withCol(tomlFile string) Position {
var (
pos int
lines = strings.Split(tomlFile, "\n")
)
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= p.Start {
p.Col = p.Start - pos + 1
if p.Col < 1 { // Should never happen, but just in case.
p.Col = 1
}
break
}
pos += ll
}
return p
} }
func (pe ParseError) Error() string { func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" { if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
} }
return fmt.Sprintf("toml: line %d (last key %q): %s", return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, pe.Message) pe.Position.Line, pe.LastKey, msg)
} }
// ErrorWithPosition returns the error with detailed location context. // ErrorWithPosition returns the error with detailed location context.
@ -107,19 +92,26 @@ func (pe ParseError) ErrorWithPosition() string {
return pe.Error() return pe.Error()
} }
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up // TODO: don't show control characters as literals? This may not show up
// well everywhere. // well everywhere.
var (
lines = strings.Split(pe.input, "\n")
b = new(strings.Builder)
)
if pe.Position.Len == 1 { if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
pe.Message, pe.Position.Line, pe.Position.Col) msg, pe.Position.Line, col+1)
} else { } else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) msg, pe.Position.Line, col, col+pe.Position.Len)
} }
if pe.Position.Line > 2 { if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
@ -137,7 +129,7 @@ func (pe ParseError) ErrorWithPosition() string {
diff := len(expanded) - len(lines[pe.Position.Line-1]) diff := len(expanded) - len(lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
return b.String() return b.String()
} }
@ -159,6 +151,23 @@ func (pe ParseError) ErrorWithUsage() string {
return m return m
} }
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
func expandTab(s string) string { func expandTab(s string) string {
var ( var (
b strings.Builder b strings.Builder

View file

@ -275,9 +275,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
func (lx *lexer) errorf(format string, values ...any) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF { if lx.atEOF {
pos := lx.getPos() pos := lx.getPos()
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { pos.Line--
pos.Line--
}
pos.Len = 1 pos.Len = 1
pos.Start = lx.pos - 1 pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@ -494,9 +492,6 @@ func lexKeyEnd(lx *lexer) stateFn {
lx.emit(itemKeyEnd) lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue) return lexSkip(lx, lexValue)
default: default:
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
}
return lx.errorf("expected '.' or '=', but got %q instead", r) return lx.errorf("expected '.' or '=', but got %q instead", r)
} }
} }
@ -565,9 +560,6 @@ func lexValue(lx *lexer) stateFn {
if r == eof { if r == eof {
return lx.errorf("unexpected EOF; expected value") return lx.errorf("unexpected EOF; expected value")
} }
if r == '\n' {
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
}
return lx.errorf("expected value but found %q instead", r) return lx.errorf("expected value but found %q instead", r)
} }
@ -1119,7 +1111,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
case 'x': case 'x':
r = lx.peek() r = lx.peek()
if !isHex(r) { if !isHex(r) {
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
} }
return lexHexInteger return lexHexInteger
} }
@ -1267,6 +1259,23 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
func isBareKeyChar(r rune, tomlNext bool) bool { func isBareKeyChar(r rune, tomlNext bool) bool {
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || if tomlNext {
(r >= '0' && r <= '9') || r == '_' || r == '-' return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-' ||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
(r >= 0x037f && r <= 0x1fff) ||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
(r >= 0x10000 && r <= 0xeffff)
}
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
} }

View file

@ -135,6 +135,9 @@ func (k Key) maybeQuoted(i int) string {
// Like append(), but only increase the cap by 1. // Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key { func (k Key) add(piece string) Key {
if cap(k) > len(k) {
return append(k, piece)
}
newKey := make(Key, len(k)+1) newKey := make(Key, len(k)+1)
copy(newKey, k) copy(newKey, k)
newKey[len(k)] = piece newKey[len(k)] = piece

View file

@ -50,6 +50,7 @@ func parse(data string) (p *parser, err error) {
// it anyway. // it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:] data = data[2:]
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:] data = data[3:]
} }
@ -64,7 +65,7 @@ func parse(data string) (p *parser, err error) {
if i := strings.IndexRune(data[:ex], 0); i > -1 { if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{ return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Position: Position{Line: 1, Start: i, Len: 1},
Line: 1, Line: 1,
input: data, input: data,
} }
@ -91,9 +92,8 @@ func parse(data string) (p *parser, err error) {
func (p *parser) panicErr(it item, err error) { func (p *parser) panicErr(it item, err error) {
panic(ParseError{ panic(ParseError{
Message: err.Error(),
err: err, err: err,
Position: it.pos.withCol(p.lx.input), Position: it.pos,
Line: it.pos.Len, Line: it.pos.Len,
LastKey: p.current(), LastKey: p.current(),
}) })
@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
func (p *parser) panicItemf(it item, format string, v ...any) { func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{ panic(ParseError{
Message: fmt.Sprintf(format, v...), Message: fmt.Sprintf(format, v...),
Position: it.pos.withCol(p.lx.input), Position: it.pos,
Line: it.pos.Len, Line: it.pos.Len,
LastKey: p.current(), LastKey: p.current(),
}) })
@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
func (p *parser) panicf(format string, v ...any) { func (p *parser) panicf(format string, v ...any) {
panic(ParseError{ panic(ParseError{
Message: fmt.Sprintf(format, v...), Message: fmt.Sprintf(format, v...),
Position: p.pos.withCol(p.lx.input), Position: p.pos,
Line: p.pos.Line, Line: p.pos.Line,
LastKey: p.current(), LastKey: p.current(),
}) })
@ -123,11 +123,10 @@ func (p *parser) next() item {
if it.typ == itemError { if it.typ == itemError {
if it.err != nil { if it.err != nil {
panic(ParseError{ panic(ParseError{
Message: it.err.Error(), Position: it.pos,
err: it.err,
Position: it.pos.withCol(p.lx.input),
Line: it.pos.Line, Line: it.pos.Line,
LastKey: p.current(), LastKey: p.current(),
err: it.err,
}) })
} }
@ -528,7 +527,7 @@ func numUnderscoresOK(s string) bool {
} }
} }
// isHex is a superset of all the permissible characters surrounding an // isHexis a superset of all the permissable characters surrounding an
// underscore. // underscore.
accept = isHex(r) accept = isHex(r)
} }

View file

@ -1,5 +1,24 @@
# Changelog # Changelog
## Release 3.2.3 (2022-11-29)
### Changed
- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
- #353: Updated masterminds/semver which included bug fixes
- #354: Updated golang.org/x/crypto which included bug fixes
## Release 3.2.2 (2021-02-04)
This is a re-release of 3.2.1 to satisfy something with the Go module system.
## Release 3.2.1 (2021-02-04)
### Changed
- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
## Release 3.2.0 (2020-12-14) ## Release 3.2.0 (2020-12-14)
### Added ### Added

View file

@ -1,4 +1,4 @@
# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) # Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
all functions that depend on external (non standard library) or crypto packages all functions that depend on external (non standard library) or crypto packages

View file

@ -1,6 +1,6 @@
# https://taskfile.dev # https://taskfile.dev
version: '2' version: '3'
tasks: tasks:
default: default:

View file

@ -17,6 +17,7 @@ package profile
import ( import (
"errors" "errors"
"sort" "sort"
"strings"
) )
func (p *Profile) decoder() []decoder { func (p *Profile) decoder() []decoder {
@ -121,6 +122,7 @@ func (p *Profile) preEncode() {
} }
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
p.docURLX = addString(strings, p.DocURL)
p.stringTable = make([]string, len(strings)) p.stringTable = make([]string, len(strings))
for s, i := range strings { for s, i := range strings {
@ -155,6 +157,7 @@ func (p *Profile) encode(b *buffer) {
encodeInt64Opt(b, 12, p.Period) encodeInt64Opt(b, 12, p.Period)
encodeInt64s(b, 13, p.commentX) encodeInt64s(b, 13, p.commentX)
encodeInt64(b, 14, p.defaultSampleTypeX) encodeInt64(b, 14, p.defaultSampleTypeX)
encodeInt64Opt(b, 15, p.docURLX)
} }
var profileDecoder = []decoder{ var profileDecoder = []decoder{
@ -183,12 +186,13 @@ var profileDecoder = []decoder{
// repeated Location location = 4 // repeated Location location = 4
func(b *buffer, m message) error { func(b *buffer, m message) error {
x := new(Location) x := new(Location)
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer x.Line = b.tmpLines[:0] // Use shared space temporarily
pp := m.(*Profile) pp := m.(*Profile)
pp.Location = append(pp.Location, x) pp.Location = append(pp.Location, x)
err := decodeMessage(b, x) err := decodeMessage(b, x)
var tmp []Line b.tmpLines = x.Line[:0]
x.Line = append(tmp, x.Line...) // Shrink to allocated size // Copy to shrink size and detach from shared space.
x.Line = append([]Line(nil), x.Line...)
return err return err
}, },
// repeated Function function = 5 // repeated Function function = 5
@ -235,6 +239,8 @@ var profileDecoder = []decoder{
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
// int64 defaultSampleType = 14 // int64 defaultSampleType = 14
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
// string doc_link = 15;
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
} }
// postDecode takes the unexported fields populated by decode (with // postDecode takes the unexported fields populated by decode (with
@ -252,6 +258,14 @@ func (p *Profile) postDecode() error {
} else { } else {
mappings[m.ID] = m mappings[m.ID] = m
} }
// If this a main linux kernel mapping with a relocation symbol suffix
// ("[kernel.kallsyms]_text"), extract said suffix.
// It is fairly hacky to handle at this level, but the alternatives appear even worse.
const prefix = "[kernel.kallsyms]"
if strings.HasPrefix(m.File, prefix) {
m.KernelRelocationSymbol = m.File[len(prefix):]
}
} }
functions := make(map[uint64]*Function, len(p.Function)) functions := make(map[uint64]*Function, len(p.Function))
@ -298,41 +312,52 @@ func (p *Profile) postDecode() error {
st.Unit, err = getString(p.stringTable, &st.unitX, err) st.Unit, err = getString(p.stringTable, &st.unitX, err)
} }
// Pre-allocate space for all locations.
numLocations := 0
for _, s := range p.Sample { for _, s := range p.Sample {
labels := make(map[string][]string, len(s.labelX)) numLocations += len(s.locationIDX)
numLabels := make(map[string][]int64, len(s.labelX)) }
numUnits := make(map[string][]string, len(s.labelX)) locBuffer := make([]*Location, numLocations)
for _, l := range s.labelX {
var key, value string for _, s := range p.Sample {
key, err = getString(p.stringTable, &l.keyX, err) if len(s.labelX) > 0 {
if l.strX != 0 { labels := make(map[string][]string, len(s.labelX))
value, err = getString(p.stringTable, &l.strX, err) numLabels := make(map[string][]int64, len(s.labelX))
labels[key] = append(labels[key], value) numUnits := make(map[string][]string, len(s.labelX))
} else if l.numX != 0 || l.unitX != 0 { for _, l := range s.labelX {
numValues := numLabels[key] var key, value string
units := numUnits[key] key, err = getString(p.stringTable, &l.keyX, err)
if l.unitX != 0 { if l.strX != 0 {
var unit string value, err = getString(p.stringTable, &l.strX, err)
unit, err = getString(p.stringTable, &l.unitX, err) labels[key] = append(labels[key], value)
units = padStringArray(units, len(numValues)) } else if l.numX != 0 || l.unitX != 0 {
numUnits[key] = append(units, unit) numValues := numLabels[key]
} units := numUnits[key]
numLabels[key] = append(numLabels[key], l.numX) if l.unitX != 0 {
} var unit string
} unit, err = getString(p.stringTable, &l.unitX, err)
if len(labels) > 0 { units = padStringArray(units, len(numValues))
s.Label = labels numUnits[key] = append(units, unit)
} }
if len(numLabels) > 0 { numLabels[key] = append(numLabels[key], l.numX)
s.NumLabel = numLabels
for key, units := range numUnits {
if len(units) > 0 {
numUnits[key] = padStringArray(units, len(numLabels[key]))
} }
} }
s.NumUnit = numUnits if len(labels) > 0 {
s.Label = labels
}
if len(numLabels) > 0 {
s.NumLabel = numLabels
for key, units := range numUnits {
if len(units) > 0 {
numUnits[key] = padStringArray(units, len(numLabels[key]))
}
}
s.NumUnit = numUnits
}
} }
s.Location = make([]*Location, len(s.locationIDX))
s.Location = locBuffer[:len(s.locationIDX)]
locBuffer = locBuffer[len(s.locationIDX):]
for i, lid := range s.locationIDX { for i, lid := range s.locationIDX {
if lid < uint64(len(locationIds)) { if lid < uint64(len(locationIds)) {
s.Location[i] = locationIds[lid] s.Location[i] = locationIds[lid]
@ -363,6 +388,7 @@ func (p *Profile) postDecode() error {
p.commentX = nil p.commentX = nil
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
p.stringTable = nil p.stringTable = nil
return err return err
} }
@ -509,6 +535,7 @@ func (p *Line) decoder() []decoder {
func (p *Line) encode(b *buffer) { func (p *Line) encode(b *buffer) {
encodeUint64Opt(b, 1, p.functionIDX) encodeUint64Opt(b, 1, p.functionIDX)
encodeInt64Opt(b, 2, p.Line) encodeInt64Opt(b, 2, p.Line)
encodeInt64Opt(b, 3, p.Column)
} }
var lineDecoder = []decoder{ var lineDecoder = []decoder{
@ -517,6 +544,8 @@ var lineDecoder = []decoder{
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
// optional int64 line = 2 // optional int64 line = 2
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
// optional int64 column = 3
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
} }
func (p *Function) decoder() []decoder { func (p *Function) decoder() []decoder {

View file

@ -22,6 +22,10 @@ import "regexp"
// samples where at least one frame matches focus but none match ignore. // samples where at least one frame matches focus but none match ignore.
// Returns true is the corresponding regexp matched at least one sample. // Returns true is the corresponding regexp matched at least one sample.
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
if focus == nil && ignore == nil && hide == nil && show == nil {
fm = true // Missing focus implies a match
return
}
focusOrIgnore := make(map[uint64]bool) focusOrIgnore := make(map[uint64]bool)
hidden := make(map[uint64]bool) hidden := make(map[uint64]bool)
for _, l := range p.Location { for _, l := range p.Location {

View file

@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte
} }
// Strip out addresses for better merge. // Strip out addresses for better merge.
if err = p.Aggregate(true, true, true, true, false); err != nil { if err = p.Aggregate(true, true, true, true, false, false); err != nil {
return nil, err return nil, err
} }
@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) {
} }
// Strip out addresses for better merge. // Strip out addresses for better merge.
if err = p.Aggregate(true, true, true, true, false); err != nil { if err = p.Aggregate(true, true, true, true, false, false); err != nil {
return nil, err return nil, err
} }

View file

@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) {
// //
// The general format for profilez samples is a sequence of words in // The general format for profilez samples is a sequence of words in
// binary format. The first words are a header with the following data: // binary format. The first words are a header with the following data:
// 1st word -- 0 //
// 2nd word -- 3 // 1st word -- 0
// 3rd word -- 0 if a c++ application, 1 if a java application. // 2nd word -- 3
// 4th word -- Sampling period (in microseconds). // 3rd word -- 0 if a c++ application, 1 if a java application.
// 5th word -- Padding. // 4th word -- Sampling period (in microseconds).
// 5th word -- Padding.
func parseCPU(b []byte) (*Profile, error) { func parseCPU(b []byte) (*Profile, error) {
var parse func([]byte) (uint64, []byte) var parse func([]byte) (uint64, []byte)
var n1, n2, n3, n4, n5 uint64 var n1, n2, n3, n4, n5 uint64
@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) {
// //
// profilez samples are a repeated sequence of stack frames of the // profilez samples are a repeated sequence of stack frames of the
// form: // form:
// 1st word -- The number of times this stack was encountered. //
// 2nd word -- The size of the stack (StackSize). // 1st word -- The number of times this stack was encountered.
// 3rd word -- The first address on the stack. // 2nd word -- The size of the stack (StackSize).
// ... // 3rd word -- The first address on the stack.
// StackSize + 2 -- The last address on the stack // ...
// StackSize + 2 -- The last address on the stack
//
// The last stack trace is of the form: // The last stack trace is of the form:
// 1st word -- 0 //
// 2nd word -- 1 // 1st word -- 0
// 3rd word -- 0 // 2nd word -- 1
// 3rd word -- 0
// //
// Addresses from stack traces may point to the next instruction after // Addresses from stack traces may point to the next instruction after
// each call. Optionally adjust by -1 to land somewhere on the actual // each call. Optionally adjust by -1 to land somewhere on the actual
@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) {
// Recognize each thread and populate profile samples. // Recognize each thread and populate profile samples.
for !isMemoryMapSentinel(line) { for !isMemoryMapSentinel(line) {
if strings.HasPrefix(line, "---- no stack trace for") { if strings.HasPrefix(line, "---- no stack trace for") {
line = ""
break break
} }
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {

View file

@ -15,6 +15,7 @@
package profile package profile
import ( import (
"encoding/binary"
"fmt" "fmt"
"sort" "sort"
"strconv" "strconv"
@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
for _, src := range srcs { for _, src := range srcs {
// Clear the profile-specific hash tables // Clear the profile-specific hash tables
pm.locationsByID = make(map[uint64]*Location, len(src.Location)) pm.locationsByID = makeLocationIDMap(len(src.Location))
pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.functionsByID = make(map[uint64]*Function, len(src.Function))
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
@ -136,7 +137,7 @@ type profileMerger struct {
p *Profile p *Profile
// Memoization tables within a profile. // Memoization tables within a profile.
locationsByID map[uint64]*Location locationsByID locationIDMap
functionsByID map[uint64]*Function functionsByID map[uint64]*Function
mappingsByID map[uint64]mapInfo mappingsByID map[uint64]mapInfo
@ -153,6 +154,16 @@ type mapInfo struct {
} }
func (pm *profileMerger) mapSample(src *Sample) *Sample { func (pm *profileMerger) mapSample(src *Sample) *Sample {
// Check memoization table
k := pm.sampleKey(src)
if ss, ok := pm.samples[k]; ok {
for i, v := range src.Value {
ss.Value[i] += v
}
return ss
}
// Make new sample.
s := &Sample{ s := &Sample{
Location: make([]*Location, len(src.Location)), Location: make([]*Location, len(src.Location)),
Value: make([]int64, len(src.Value)), Value: make([]int64, len(src.Value)),
@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
s.NumLabel[k] = vv s.NumLabel[k] = vv
s.NumUnit[k] = uu s.NumUnit[k] = uu
} }
// Check memoization table. Must be done on the remapped location to
// account for the remapped mapping. Add current values to the
// existing sample.
k := s.key()
if ss, ok := pm.samples[k]; ok {
for i, v := range src.Value {
ss.Value[i] += v
}
return ss
}
copy(s.Value, src.Value) copy(s.Value, src.Value)
pm.samples[k] = s pm.samples[k] = s
pm.p.Sample = append(pm.p.Sample, s) pm.p.Sample = append(pm.p.Sample, s)
return s return s
} }
// key generates sampleKey to be used as a key for maps. func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
func (sample *Sample) key() sampleKey { // Accumulate contents into a string.
ids := make([]string, len(sample.Location)) var buf strings.Builder
for i, l := range sample.Location { buf.Grow(64) // Heuristic to avoid extra allocs
ids[i] = strconv.FormatUint(l.ID, 16)
// encode a number
putNumber := func(v uint64) {
var num [binary.MaxVarintLen64]byte
n := binary.PutUvarint(num[:], v)
buf.Write(num[:n])
} }
labels := make([]string, 0, len(sample.Label)) // encode a string prefixed with its length.
for k, v := range sample.Label { putDelimitedString := func(s string) {
labels = append(labels, fmt.Sprintf("%q%q", k, v)) putNumber(uint64(len(s)))
buf.WriteString(s)
} }
sort.Strings(labels)
numlabels := make([]string, 0, len(sample.NumLabel)) for _, l := range sample.Location {
for k, v := range sample.NumLabel { // Get the location in the merged profile, which may have a different ID.
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) if loc := pm.mapLocation(l); loc != nil {
putNumber(loc.ID)
}
} }
sort.Strings(numlabels) putNumber(0) // Delimiter
return sampleKey{ for _, l := range sortedKeys1(sample.Label) {
strings.Join(ids, "|"), putDelimitedString(l)
strings.Join(labels, ""), values := sample.Label[l]
strings.Join(numlabels, ""), putNumber(uint64(len(values)))
for _, v := range values {
putDelimitedString(v)
}
} }
for _, l := range sortedKeys2(sample.NumLabel) {
putDelimitedString(l)
values := sample.NumLabel[l]
putNumber(uint64(len(values)))
for _, v := range values {
putNumber(uint64(v))
}
units := sample.NumUnit[l]
putNumber(uint64(len(units)))
for _, v := range units {
putDelimitedString(v)
}
}
return sampleKey(buf.String())
} }
type sampleKey struct { type sampleKey string
locations string
labels string // sortedKeys1 returns the sorted keys found in a string->[]string map.
numlabels string //
// Note: this is currently non-generic since github pprof runs golint,
// which does not support generics. When that issue is fixed, it can
// be merged with sortedKeys2 and made into a generic function.
func sortedKeys1(m map[string][]string) []string {
if len(m) == 0 {
return nil
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
//
// Note: this is currently non-generic since github pprof runs golint,
// which does not support generics. When that issue is fixed, it can
// be merged with sortedKeys1 and made into a generic function.
func sortedKeys2(m map[string][]int64) []string {
if len(m) == 0 {
return nil
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
} }
func (pm *profileMerger) mapLocation(src *Location) *Location { func (pm *profileMerger) mapLocation(src *Location) *Location {
@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
return nil return nil
} }
if l, ok := pm.locationsByID[src.ID]; ok { if l := pm.locationsByID.get(src.ID); l != nil {
return l return l
} }
@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
// account for the remapped mapping ID. // account for the remapped mapping ID.
k := l.key() k := l.key()
if ll, ok := pm.locations[k]; ok { if ll, ok := pm.locations[k]; ok {
pm.locationsByID[src.ID] = ll pm.locationsByID.set(src.ID, ll)
return ll return ll
} }
pm.locationsByID[src.ID] = l pm.locationsByID.set(src.ID, l)
pm.locations[k] = l pm.locations[k] = l
pm.p.Location = append(pm.p.Location, l) pm.p.Location = append(pm.p.Location, l)
return l return l
@ -269,12 +326,13 @@ func (l *Location) key() locationKey {
key.addr -= l.Mapping.Start key.addr -= l.Mapping.Start
key.mappingID = l.Mapping.ID key.mappingID = l.Mapping.ID
} }
lines := make([]string, len(l.Line)*2) lines := make([]string, len(l.Line)*3)
for i, line := range l.Line { for i, line := range l.Line {
if line.Function != nil { if line.Function != nil {
lines[i*2] = strconv.FormatUint(line.Function.ID, 16) lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
} }
lines[i*2+1] = strconv.FormatInt(line.Line, 16) lines[i*2+1] = strconv.FormatInt(line.Line, 16)
lines[i*2+2] = strconv.FormatInt(line.Column, 16)
} }
key.lines = strings.Join(lines, "|") key.lines = strings.Join(lines, "|")
return key return key
@ -303,16 +361,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
return mi return mi
} }
m := &Mapping{ m := &Mapping{
ID: uint64(len(pm.p.Mapping) + 1), ID: uint64(len(pm.p.Mapping) + 1),
Start: src.Start, Start: src.Start,
Limit: src.Limit, Limit: src.Limit,
Offset: src.Offset, Offset: src.Offset,
File: src.File, File: src.File,
BuildID: src.BuildID, KernelRelocationSymbol: src.KernelRelocationSymbol,
HasFunctions: src.HasFunctions, BuildID: src.BuildID,
HasFilenames: src.HasFilenames, HasFunctions: src.HasFunctions,
HasLineNumbers: src.HasLineNumbers, HasFilenames: src.HasFilenames,
HasInlineFrames: src.HasInlineFrames, HasLineNumbers: src.HasLineNumbers,
HasInlineFrames: src.HasInlineFrames,
} }
pm.p.Mapping = append(pm.p.Mapping, m) pm.p.Mapping = append(pm.p.Mapping, m)
@ -360,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line {
ln := Line{ ln := Line{
Function: pm.mapFunction(src.Function), Function: pm.mapFunction(src.Function),
Line: src.Line, Line: src.Line,
Column: src.Column,
} }
return ln return ln
} }
@ -416,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
var timeNanos, durationNanos, period int64 var timeNanos, durationNanos, period int64
var comments []string var comments []string
seenComments := map[string]bool{} seenComments := map[string]bool{}
var docURL string
var defaultSampleType string var defaultSampleType string
for _, s := range srcs { for _, s := range srcs {
if timeNanos == 0 || s.TimeNanos < timeNanos { if timeNanos == 0 || s.TimeNanos < timeNanos {
@ -434,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
if defaultSampleType == "" { if defaultSampleType == "" {
defaultSampleType = s.DefaultSampleType defaultSampleType = s.DefaultSampleType
} }
if docURL == "" {
docURL = s.DocURL
}
} }
p := &Profile{ p := &Profile{
@ -449,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
Comments: comments, Comments: comments,
DefaultSampleType: defaultSampleType, DefaultSampleType: defaultSampleType,
DocURL: docURL,
} }
copy(p.SampleType, srcs[0].SampleType) copy(p.SampleType, srcs[0].SampleType)
return p, nil return p, nil
@ -479,3 +544,131 @@ func (p *Profile) compatible(pb *Profile) error {
func equalValueType(st1, st2 *ValueType) bool { func equalValueType(st1, st2 *ValueType) bool {
return st1.Type == st2.Type && st1.Unit == st2.Unit return st1.Type == st2.Type && st1.Unit == st2.Unit
} }
// locationIDMap is like a map[uint64]*Location, but provides efficiency for
// ids that are densely numbered, which is often the case.
type locationIDMap struct {
dense []*Location // indexed by id for id < len(dense)
sparse map[uint64]*Location // indexed by id for id >= len(dense)
}
func makeLocationIDMap(n int) locationIDMap {
return locationIDMap{
dense: make([]*Location, n),
sparse: map[uint64]*Location{},
}
}
func (lm locationIDMap) get(id uint64) *Location {
if id < uint64(len(lm.dense)) {
return lm.dense[int(id)]
}
return lm.sparse[id]
}
func (lm locationIDMap) set(id uint64, loc *Location) {
if id < uint64(len(lm.dense)) {
lm.dense[id] = loc
return
}
lm.sparse[id] = loc
}
// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
// keeps sample types that appear in all profiles only and drops/reorders the
// sample types as necessary.
//
// In the case of sample types order is not the same for given profiles the
// order is derived from the first profile.
//
// Profiles are modified in-place.
//
// It returns an error if the sample type's intersection is empty.
func CompatibilizeSampleTypes(ps []*Profile) error {
sTypes := commonSampleTypes(ps)
if len(sTypes) == 0 {
return fmt.Errorf("profiles have empty common sample type list")
}
for _, p := range ps {
if err := compatibilizeSampleTypes(p, sTypes); err != nil {
return err
}
}
return nil
}
// commonSampleTypes returns sample types that appear in all profiles in the
// order how they ordered in the first profile.
func commonSampleTypes(ps []*Profile) []string {
if len(ps) == 0 {
return nil
}
sTypes := map[string]int{}
for _, p := range ps {
for _, st := range p.SampleType {
sTypes[st.Type]++
}
}
var res []string
for _, st := range ps[0].SampleType {
if sTypes[st.Type] == len(ps) {
res = append(res, st.Type)
}
}
return res
}
// compatibilizeSampleTypes drops sample types that are not present in sTypes
// list and reorder them if needed.
//
// It sets DefaultSampleType to sType[0] if it is not in sType list.
//
// It assumes that all sample types from the sTypes list are present in the
// given profile otherwise it returns an error.
func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
if len(sTypes) == 0 {
return fmt.Errorf("sample type list is empty")
}
defaultSampleType := sTypes[0]
reMap, needToModify := make([]int, len(sTypes)), false
for i, st := range sTypes {
if st == p.DefaultSampleType {
defaultSampleType = p.DefaultSampleType
}
idx := searchValueType(p.SampleType, st)
if idx < 0 {
return fmt.Errorf("%q sample type is not found in profile", st)
}
reMap[i] = idx
if idx != i {
needToModify = true
}
}
if !needToModify && len(sTypes) == len(p.SampleType) {
return nil
}
p.DefaultSampleType = defaultSampleType
oldSampleTypes := p.SampleType
p.SampleType = make([]*ValueType, len(sTypes))
for i, idx := range reMap {
p.SampleType[i] = oldSampleTypes[idx]
}
values := make([]int64, len(sTypes))
for _, s := range p.Sample {
for i, idx := range reMap {
values[i] = s.Value[idx]
}
s.Value = s.Value[:len(values)]
copy(s.Value, values)
}
return nil
}
func searchValueType(vts []*ValueType, s string) int {
for i, vt := range vts {
if vt.Type == s {
return i
}
}
return -1
}

View file

@ -21,7 +21,6 @@ import (
"compress/gzip" "compress/gzip"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"path/filepath" "path/filepath"
"regexp" "regexp"
@ -40,6 +39,7 @@ type Profile struct {
Location []*Location Location []*Location
Function []*Function Function []*Function
Comments []string Comments []string
DocURL string
DropFrames string DropFrames string
KeepFrames string KeepFrames string
@ -54,6 +54,7 @@ type Profile struct {
encodeMu sync.Mutex encodeMu sync.Mutex
commentX []int64 commentX []int64
docURLX int64
dropFramesX int64 dropFramesX int64
keepFramesX int64 keepFramesX int64
stringTable []string stringTable []string
@ -73,9 +74,23 @@ type ValueType struct {
type Sample struct { type Sample struct {
Location []*Location Location []*Location
Value []int64 Value []int64
Label map[string][]string // Label is a per-label-key map to values for string labels.
//
// In general, having multiple values for the given label key is strongly
// discouraged - see docs for the sample label field in profile.proto. The
// main reason this unlikely state is tracked here is to make the
// decoding->encoding roundtrip not lossy. But we expect that the value
// slices present in this map are always of length 1.
Label map[string][]string
// NumLabel is a per-label-key map to values for numeric labels. See a note
// above on handling multiple values for a label.
NumLabel map[string][]int64 NumLabel map[string][]int64
NumUnit map[string][]string // NumUnit is a per-label-key map to the unit names of corresponding numeric
// label values. The unit info may be missing even if the label is in
// NumLabel, see the docs in profile.proto for details. When the value is
// slice is present and not nil, its length must be equal to the length of
// the corresponding value slice in NumLabel.
NumUnit map[string][]string
locationIDX []uint64 locationIDX []uint64
labelX []label labelX []label
@ -106,6 +121,15 @@ type Mapping struct {
fileX int64 fileX int64
buildIDX int64 buildIDX int64
// Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
// For linux kernel mappings generated by some tools, correct symbolization depends
// on knowing which of the two possible relocation symbols was used for `Start`.
// This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
//
// Note, this public field is not persisted in the proto. For the purposes of
// copying / merging / hashing profiles, it is considered subsumed by `File`.
KernelRelocationSymbol string
} }
// Location corresponds to Profile.Location // Location corresponds to Profile.Location
@ -123,6 +147,7 @@ type Location struct {
type Line struct { type Line struct {
Function *Function Function *Function
Line int64 Line int64
Column int64
functionIDX uint64 functionIDX uint64
} }
@ -144,7 +169,7 @@ type Function struct {
// may be a gzip-compressed encoded protobuf or one of many legacy // may be a gzip-compressed encoded protobuf or one of many legacy
// profile formats which may be unsupported in the future. // profile formats which may be unsupported in the future.
func Parse(r io.Reader) (*Profile, error) { func Parse(r io.Reader) (*Profile, error) {
data, err := ioutil.ReadAll(r) data, err := io.ReadAll(r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -159,7 +184,7 @@ func ParseData(data []byte) (*Profile, error) {
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
gz, err := gzip.NewReader(bytes.NewBuffer(data)) gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err == nil { if err == nil {
data, err = ioutil.ReadAll(gz) data, err = io.ReadAll(gz)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("decompressing profile: %v", err) return nil, fmt.Errorf("decompressing profile: %v", err)
@ -414,7 +439,7 @@ func (p *Profile) CheckValid() error {
// Aggregate merges the locations in the profile into equivalence // Aggregate merges the locations in the profile into equivalence
// classes preserving the request attributes. It also updates the // classes preserving the request attributes. It also updates the
// samples to point to the merged locations. // samples to point to the merged locations.
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
for _, m := range p.Mapping { for _, m := range p.Mapping {
m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasInlineFrames = m.HasInlineFrames && inlineFrame
m.HasFunctions = m.HasFunctions && function m.HasFunctions = m.HasFunctions && function
@ -436,7 +461,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
} }
// Aggregate locations // Aggregate locations
if !inlineFrame || !address || !linenumber { if !inlineFrame || !address || !linenumber || !columnnumber {
for _, l := range p.Location { for _, l := range p.Location {
if !inlineFrame && len(l.Line) > 1 { if !inlineFrame && len(l.Line) > 1 {
l.Line = l.Line[len(l.Line)-1:] l.Line = l.Line[len(l.Line)-1:]
@ -444,6 +469,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address
if !linenumber { if !linenumber {
for i := range l.Line { for i := range l.Line {
l.Line[i].Line = 0 l.Line[i].Line = 0
l.Line[i].Column = 0
}
}
if !columnnumber {
for i := range l.Line {
l.Line[i].Column = 0
} }
} }
if !address { if !address {
@ -526,6 +557,9 @@ func (p *Profile) String() string {
for _, c := range p.Comments { for _, c := range p.Comments {
ss = append(ss, "Comment: "+c) ss = append(ss, "Comment: "+c)
} }
if url := p.DocURL; url != "" {
ss = append(ss, fmt.Sprintf("Doc: %s", url))
}
if pt := p.PeriodType; pt != nil { if pt := p.PeriodType; pt != nil {
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
} }
@ -605,10 +639,11 @@ func (l *Location) string() string {
for li := range l.Line { for li := range l.Line {
lnStr := "??" lnStr := "??"
if fn := l.Line[li].Function; fn != nil { if fn := l.Line[li].Function; fn != nil {
lnStr = fmt.Sprintf("%s %s:%d s=%d", lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
fn.Name, fn.Name,
fn.Filename, fn.Filename,
l.Line[li].Line, l.Line[li].Line,
l.Line[li].Column,
fn.StartLine) fn.StartLine)
if fn.Name != fn.SystemName { if fn.Name != fn.SystemName {
lnStr = lnStr + "(" + fn.SystemName + ")" lnStr = lnStr + "(" + fn.SystemName + ")"
@ -707,6 +742,35 @@ func (s *Sample) HasLabel(key, value string) bool {
return false return false
} }
// SetNumLabel sets the specified key to the specified value for all samples in the
// profile. "unit" is a slice that describes the units that each corresponding member
// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
// unit for a given value, that member of "unit" should be the empty string.
// "unit" must either have the same length as "value", or be nil.
func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
for _, sample := range p.Sample {
if sample.NumLabel == nil {
sample.NumLabel = map[string][]int64{key: value}
} else {
sample.NumLabel[key] = value
}
if sample.NumUnit == nil {
sample.NumUnit = map[string][]string{key: unit}
} else {
sample.NumUnit[key] = unit
}
}
}
// RemoveNumLabel removes all numerical labels associated with the specified key for all
// samples in the profile.
func (p *Profile) RemoveNumLabel(key string) {
for _, sample := range p.Sample {
delete(sample.NumLabel, key)
delete(sample.NumUnit, key)
}
}
// DiffBaseSample returns true if a sample belongs to the diff base and false // DiffBaseSample returns true if a sample belongs to the diff base and false
// otherwise. // otherwise.
func (s *Sample) DiffBaseSample() bool { func (s *Sample) DiffBaseSample() bool {
@ -785,10 +849,10 @@ func (p *Profile) HasFileLines() bool {
// Unsymbolizable returns true if a mapping points to a binary for which // Unsymbolizable returns true if a mapping points to a binary for which
// locations can't be symbolized in principle, at least now. Examples are // locations can't be symbolized in principle, at least now. Examples are
// "[vdso]", [vsyscall]" and some others, see the code. // "[vdso]", "[vsyscall]" and some others, see the code.
func (m *Mapping) Unsymbolizable() bool { func (m *Mapping) Unsymbolizable() bool {
name := filepath.Base(m.File) name := filepath.Base(m.File)
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
} }
// Copy makes a fully independent copy of a profile. // Copy makes a fully independent copy of a profile.

View file

@ -39,11 +39,12 @@ import (
) )
type buffer struct { type buffer struct {
field int // field tag field int // field tag
typ int // proto wire type code for field typ int // proto wire type code for field
u64 uint64 u64 uint64
data []byte data []byte
tmp [16]byte tmp [16]byte
tmpLines []Line // temporary storage used while decoding "repeated Line".
} }
type decoder func(*buffer, message) error type decoder func(*buffer, message) error
@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error {
if b.typ == 2 { if b.typ == 2 {
// Packed encoding // Packed encoding
data := b.data data := b.data
tmp := make([]int64, 0, len(data)) // Maximally sized
for len(data) > 0 { for len(data) > 0 {
var u uint64 var u uint64
var err error var err error
@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error {
if u, data, err = decodeVarint(data); err != nil { if u, data, err = decodeVarint(data); err != nil {
return err return err
} }
tmp = append(tmp, int64(u)) *x = append(*x, int64(u))
} }
*x = append(*x, tmp...)
return nil return nil
} }
var i int64 var i int64
@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
if b.typ == 2 { if b.typ == 2 {
data := b.data data := b.data
// Packed encoding // Packed encoding
tmp := make([]uint64, 0, len(data)) // Maximally sized
for len(data) > 0 { for len(data) > 0 {
var u uint64 var u uint64
var err error var err error
@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
if u, data, err = decodeVarint(data); err != nil { if u, data, err = decodeVarint(data); err != nil {
return err return err
} }
tmp = append(tmp, u) *x = append(*x, u)
} }
*x = append(*x, tmp...)
return nil return nil
} }
var u uint64 var u uint64

View file

@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
prune := make(map[uint64]bool) prune := make(map[uint64]bool)
pruneBeneath := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool)
// simplifyFunc can be expensive, so cache results.
// Note that the same function name can be encountered many times due
// different lines and addresses in the same function.
pruneCache := map[string]bool{} // Map from function to whether or not to prune
pruneFromHere := func(s string) bool {
if r, ok := pruneCache[s]; ok {
return r
}
funcName := simplifyFunc(s)
if dropRx.MatchString(funcName) {
if keepRx == nil || !keepRx.MatchString(funcName) {
pruneCache[s] = true
return true
}
}
pruneCache[s] = false
return false
}
for _, loc := range p.Location { for _, loc := range p.Location {
var i int var i int
for i = len(loc.Line) - 1; i >= 0; i-- { for i = len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
funcName := simplifyFunc(fn.Name) if pruneFromHere(fn.Name) {
if dropRx.MatchString(funcName) { break
if keepRx == nil || !keepRx.MatchString(funcName) {
break
}
} }
} }
} }

View file

@ -1,3 +1,5 @@
Copyright (c) 2014 HashiCorp, Inc.
Mozilla Public License, version 2.0 Mozilla Public License, version 2.0
1. Definitions 1. Definitions

View file

@ -25,7 +25,7 @@ type entry struct {
// NewLRU constructs an LRU of the given size // NewLRU constructs an LRU of the given size
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
if size <= 0 { if size <= 0 {
return nil, errors.New("Must provide a positive size") return nil, errors.New("must provide a positive size")
} }
c := &LRU{ c := &LRU{
size: size, size: size,
@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok { if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent) c.evictList.MoveToFront(ent)
if ent.Value.(*entry) == nil {
return nil, false
}
return ent.Value.(*entry).value, true return ent.Value.(*entry).value, true
} }
return return
@ -106,7 +109,7 @@ func (c *LRU) Remove(key interface{}) (present bool) {
} }
// RemoveOldest removes the oldest item from the cache. // RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
ent := c.evictList.Back() ent := c.evictList.Back()
if ent != nil { if ent != nil {
c.removeElement(ent) c.removeElement(ent)
@ -117,7 +120,7 @@ func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
} }
// GetOldest returns the oldest entry // GetOldest returns the oldest entry
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { func (c *LRU) GetOldest() (key, value interface{}, ok bool) {
ent := c.evictList.Back() ent := c.evictList.Back()
if ent != nil { if ent != nil {
kv := ent.Value.(*entry) kv := ent.Value.(*entry)
@ -142,6 +145,19 @@ func (c *LRU) Len() int {
return c.evictList.Len() return c.evictList.Len()
} }
// Resize changes the cache size.
func (c *LRU) Resize(size int) (evicted int) {
diff := c.Len() - size
if diff < 0 {
diff = 0
}
for i := 0; i < diff; i++ {
c.removeOldest()
}
c.size = size
return diff
}
// removeOldest removes the oldest item from the cache. // removeOldest removes the oldest item from the cache.
func (c *LRU) removeOldest() { func (c *LRU) removeOldest() {
ent := c.evictList.Back() ent := c.evictList.Back()

View file

@ -1,3 +1,4 @@
// Package simplelru provides simple LRU implementation based on build-in container/list.
package simplelru package simplelru
// LRUCache is the interface for simple LRU cache. // LRUCache is the interface for simple LRU cache.
@ -10,7 +11,7 @@ type LRUCache interface {
// updates the "recently used"-ness of the key. #value, isFound // updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool) Get(key interface{}) (value interface{}, ok bool)
// Check if a key exsists in cache without updating the recent-ness. // Checks if a key exists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool) Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key. // Returns key's value without updating the "recently used"-ness of the key.
@ -31,6 +32,9 @@ type LRUCache interface {
// Returns the number of items in the cache. // Returns the number of items in the cache.
Len() int Len() int
// Clear all cache entries // Clears all cache entries.
Purge() Purge()
// Resizes cache, returning number evicted
Resize(int) int
} }

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2018-2025 Frank Denis Copyright (c) 2018-2023 Frank Denis
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View file

@ -241,11 +241,7 @@ func getOutboundParams(ip string) (*net.IPAddr, *net.Interface, error) {
var ipUnicast net.IP var ipUnicast net.IP
var got bool var got bool
for _, addr := range addrs { for _, addr := range addrs {
ipa, ok := addr.(*net.IPNet) ipi := addr.(*net.IPNet).IP
if !ok {
continue
}
ipi := ipa.IP
if ipi.Equal(ipAddr.IP) { if ipi.Equal(ipAddr.IP) {
got = true got = true
} }
@ -402,7 +398,6 @@ type Detector struct {
sync.RWMutex sync.RWMutex
got bool got bool
// RemoteIPPort is the remote IPPort to detect within UDP. // RemoteIPPort is the remote IPPort to detect within UDP.
// Won't send any data to it. `Dial` in UDP only detects if the network is available.
RemoteIPPort string RemoteIPPort string
lastActiveIP string lastActiveIP string
dns []net.IP dns []net.IP

View file

@ -12,8 +12,6 @@ go build
cli cli
``` ```
It will need root privileges on operating systems other than Windows.
## Homepage ## Homepage
https://github.com/lifenjoiner/dhcpdns https://github.com/lifenjoiner/dhcpdns

View file

@ -85,8 +85,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/wintbiit/NineDNS * https://github.com/wintbiit/NineDNS
* https://linuxcontainers.org/incus/ * https://linuxcontainers.org/incus/
* https://ifconfig.es * https://ifconfig.es
* https://github.com/zmap/zdns
* https://framagit.org/bortzmeyer/check-soa
Send pull request if you want to be listed here. Send pull request if you want to be listed here.
@ -193,9 +192,6 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 9460 - Service Binding and Parameter Specification via the DNS * 9460 - Service Binding and Parameter Specification via the DNS
* 9461 - Service Binding Mapping for DNS Servers * 9461 - Service Binding Mapping for DNS Servers
* 9462 - Discovery of Designated Resolvers * 9462 - Discovery of Designated Resolvers
* 9460 - SVCB and HTTPS Records
* 9606 - DNS Resolver Information
* Draft - Compact Denial of Existence in DNSSEC
## Loosely Based Upon ## Loosely Based Upon

View file

@ -250,6 +250,14 @@ func (d *DS) ToCDS() *CDS {
// zero, it is used as-is, otherwise the TTL of the RRset is used as the // zero, it is used as-is, otherwise the TTL of the RRset is used as the
// OrigTTL. // OrigTTL.
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
}
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
h0 := rrset[0].Header() h0 := rrset[0].Header()
rr.Hdr.Rrtype = TypeRRSIG rr.Hdr.Rrtype = TypeRRSIG
rr.Hdr.Name = h0.Name rr.Hdr.Name = h0.Name
@ -264,18 +272,6 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
rr.Labels-- // wildcard, remove from label count rr.Labels-- // wildcard, remove from label count
} }
return rr.signAsIs(k, rrset)
}
func (rr *RRSIG) signAsIs(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
}
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
sigwire := new(rrsigWireFmt) sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm sigwire.Algorithm = rr.Algorithm
@ -374,12 +370,9 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
if rr.Algorithm != k.Algorithm { if rr.Algorithm != k.Algorithm {
return ErrKey return ErrKey
} }
if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
signerName := CanonicalName(rr.SignerName)
if !equal(signerName, k.Hdr.Name) {
return ErrKey return ErrKey
} }
if k.Protocol != 3 { if k.Protocol != 3 {
return ErrKey return ErrKey
} }
@ -391,18 +384,9 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
} }
// IsRRset checked that we have at least one RR and that the RRs in // IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type, // the set have consistent type, class, and name. Also check that type and
// class and name matches the RRSIG record. // class matches the RRSIG record.
// Also checks RFC 4035 5.3.1 the number of labels in the RRset owner if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
// name MUST be greater than or equal to the value in the RRSIG RR's Labels field.
// RFC 4035 5.3.1 Signer's Name MUST be the name of the zone that [contains the RRset].
// Since we don't have SOA info, checking suffix may be the best we can do...?
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class ||
h0.Rrtype != rr.TypeCovered ||
uint8(CountLabel(h0.Name)) < rr.Labels ||
!equal(h0.Name, rr.Hdr.Name) ||
!strings.HasSuffix(CanonicalName(h0.Name), signerName) {
return ErrRRset return ErrRRset
} }
@ -416,7 +400,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
sigwire.Expiration = rr.Expiration sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag sigwire.KeyTag = rr.KeyTag
sigwire.SignerName = signerName sigwire.SignerName = CanonicalName(rr.SignerName)
// Create the desired binary blob // Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize) signeddata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signeddata) n, err := packSigWire(sigwire, signeddata)

71
vendor/github.com/miekg/dns/edns.go generated vendored
View file

@ -27,7 +27,6 @@ const (
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
_DO = 1 << 15 // DNSSEC OK _DO = 1 << 15 // DNSSEC OK
_CO = 1 << 14 // Compact Answers OK
) )
// makeDataOpt is used to unpack the EDNS0 option(s) from a message. // makeDataOpt is used to unpack the EDNS0 option(s) from a message.
@ -59,7 +58,7 @@ func makeDataOpt(code uint16) EDNS0 {
case EDNS0EDE: case EDNS0EDE:
return new(EDNS0_EDE) return new(EDNS0_EDE)
case EDNS0ESU: case EDNS0ESU:
return new(EDNS0_ESU) return &EDNS0_ESU{Code: EDNS0ESU}
default: default:
e := new(EDNS0_LOCAL) e := new(EDNS0_LOCAL)
e.Code = code e.Code = code
@ -67,7 +66,8 @@ func makeDataOpt(code uint16) EDNS0 {
} }
} }
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. See RFC 6891. // OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct { type OPT struct {
Hdr RR_Header Hdr RR_Header
Option []EDNS0 `dns:"opt"` Option []EDNS0 `dns:"opt"`
@ -76,11 +76,7 @@ type OPT struct {
func (rr *OPT) String() string { func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() { if rr.Do() {
if rr.Co() { s += "flags: do; "
s += "flags: do, co; "
} else {
s += "flags: do; "
}
} else { } else {
s += "flags:; " s += "flags:; "
} }
@ -148,6 +144,8 @@ func (*OPT) parse(c *zlexer, origin string) *ParseError {
func (rr *OPT) isDuplicate(r2 RR) bool { return false } func (rr *OPT) isDuplicate(r2 RR) bool { return false }
// return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined. // Version returns the EDNS version used. Only zero is defined.
func (rr *OPT) Version() uint8 { func (rr *OPT) Version() uint8 {
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
@ -200,34 +198,14 @@ func (rr *OPT) SetDo(do ...bool) {
} }
} }
// Co returns the value of the CO (Compact Answers OK) bit. // Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used.
func (rr *OPT) Co() bool {
return rr.Hdr.Ttl&_CO == _CO
}
// SetCo sets the CO (Compact Answers OK) bit.
// If we pass an argument, set the CO bit to that value.
// It is possible to pass 2 or more arguments, but they will be ignored.
func (rr *OPT) SetCo(co ...bool) {
if len(co) == 1 {
if co[0] {
rr.Hdr.Ttl |= _CO
} else {
rr.Hdr.Ttl &^= _CO
}
} else {
rr.Hdr.Ttl |= _CO
}
}
// Z returns the Z part of the OPT RR as a uint16 with only the 14 least significant bits used.
func (rr *OPT) Z() uint16 { func (rr *OPT) Z() uint16 {
return uint16(rr.Hdr.Ttl & 0x3FFF) return uint16(rr.Hdr.Ttl & 0x7FFF)
} }
// SetZ sets the Z part of the OPT RR, note only the 14 least significant bits of z are used. // SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used.
func (rr *OPT) SetZ(z uint16) { func (rr *OPT) SetZ(z uint16) {
rr.Hdr.Ttl = rr.Hdr.Ttl&^0x3FFF | uint32(z&0x3FFF) rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF)
} }
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. // EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
@ -258,8 +236,8 @@ type EDNS0 interface {
// e.Nsid = "AA" // e.Nsid = "AA"
// o.Option = append(o.Option, e) // o.Option = append(o.Option, e)
type EDNS0_NSID struct { type EDNS0_NSID struct {
Code uint16 // always EDNS0NSID Code uint16 // Always EDNS0NSID
Nsid string // string needs to be hex encoded Nsid string // This string needs to be hex encoded
} }
func (e *EDNS0_NSID) pack() ([]byte, error) { func (e *EDNS0_NSID) pack() ([]byte, error) {
@ -297,7 +275,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid}
// When packing it will apply SourceNetmask. If you need more advanced logic, // When packing it will apply SourceNetmask. If you need more advanced logic,
// patches welcome and good luck. // patches welcome and good luck.
type EDNS0_SUBNET struct { type EDNS0_SUBNET struct {
Code uint16 // always EDNS0SUBNET Code uint16 // Always EDNS0SUBNET
Family uint16 // 1 for IP, 2 for IP6 Family uint16 // 1 for IP, 2 for IP6
SourceNetmask uint8 SourceNetmask uint8
SourceScope uint8 SourceScope uint8
@ -421,8 +399,8 @@ func (e *EDNS0_SUBNET) copy() EDNS0 {
// //
// There is no guarantee that the Cookie string has a specific length. // There is no guarantee that the Cookie string has a specific length.
type EDNS0_COOKIE struct { type EDNS0_COOKIE struct {
Code uint16 // always EDNS0COOKIE Code uint16 // Always EDNS0COOKIE
Cookie string // hex encoded cookie data Cookie string // Hex-encoded cookie data
} }
func (e *EDNS0_COOKIE) pack() ([]byte, error) { func (e *EDNS0_COOKIE) pack() ([]byte, error) {
@ -452,7 +430,7 @@ func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.C
// e.Lease = 120 // in seconds // e.Lease = 120 // in seconds
// o.Option = append(o.Option, e) // o.Option = append(o.Option, e)
type EDNS0_UL struct { type EDNS0_UL struct {
Code uint16 // always EDNS0UL Code uint16 // Always EDNS0UL
Lease uint32 Lease uint32
KeyLease uint32 KeyLease uint32
} }
@ -491,7 +469,7 @@ func (e *EDNS0_UL) unpack(b []byte) error {
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 // EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
// Implemented for completeness, as the EDNS0 type code is assigned. // Implemented for completeness, as the EDNS0 type code is assigned.
type EDNS0_LLQ struct { type EDNS0_LLQ struct {
Code uint16 // always EDNS0LLQ Code uint16 // Always EDNS0LLQ
Version uint16 Version uint16
Opcode uint16 Opcode uint16
Error uint16 Error uint16
@ -537,7 +515,7 @@ func (e *EDNS0_LLQ) copy() EDNS0 {
// EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. // EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
type EDNS0_DAU struct { type EDNS0_DAU struct {
Code uint16 // always EDNS0DAU Code uint16 // Always EDNS0DAU
AlgCode []uint8 AlgCode []uint8
} }
@ -561,7 +539,7 @@ func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
type EDNS0_DHU struct { type EDNS0_DHU struct {
Code uint16 // always EDNS0DHU Code uint16 // Always EDNS0DHU
AlgCode []uint8 AlgCode []uint8
} }
@ -585,7 +563,7 @@ func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
type EDNS0_N3U struct { type EDNS0_N3U struct {
Code uint16 // always EDNS0N3U Code uint16 // Always EDNS0N3U
AlgCode []uint8 AlgCode []uint8
} }
@ -610,7 +588,7 @@ func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
// EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314. // EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314.
type EDNS0_EXPIRE struct { type EDNS0_EXPIRE struct {
Code uint16 // always EDNS0EXPIRE Code uint16 // Always EDNS0EXPIRE
Expire uint32 Expire uint32
Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire. Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire.
} }
@ -690,7 +668,7 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error {
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep // EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
// the TCP connection alive. See RFC 7828. // the TCP connection alive. See RFC 7828.
type EDNS0_TCP_KEEPALIVE struct { type EDNS0_TCP_KEEPALIVE struct {
Code uint16 // always EDNSTCPKEEPALIVE Code uint16 // Always EDNSTCPKEEPALIVE
// Timeout is an idle timeout value for the TCP connection, specified in // Timeout is an idle timeout value for the TCP connection, specified in
// units of 100 milliseconds, encoded in network byte order. If set to 0, // units of 100 milliseconds, encoded in network byte order. If set to 0,
@ -861,12 +839,13 @@ func (e *EDNS0_EDE) unpack(b []byte) error {
return nil return nil
} }
// The EDNS0_ESU option for ENUM Source-URI Extension. // The EDNS0_ESU option for ENUM Source-URI Extension
type EDNS0_ESU struct { type EDNS0_ESU struct {
Code uint16 // always EDNS0ESU Code uint16
Uri string Uri string
} }
// Option implements the EDNS0 interface.
func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU } func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU }
func (e *EDNS0_ESU) String() string { return e.Uri } func (e *EDNS0_ESU) String() string { return e.Uri }
func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} } func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} }

View file

@ -3,15 +3,9 @@
package dns package dns
import ( import "net"
"fmt"
"net"
)
const ( const supportsReusePort = false
supportsReusePort = false
supportsReuseAddr = false
)
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
if reuseport || reuseaddr { if reuseport || reuseaddr {
@ -21,6 +15,8 @@ func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, e
return net.Listen(network, addr) return net.Listen(network, addr)
} }
const supportsReuseAddr = false
func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) {
if reuseport || reuseaddr { if reuseport || reuseaddr {
// TODO(tmthrgd): return an error? // TODO(tmthrgd): return an error?
@ -28,13 +24,3 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn,
return net.ListenPacket(network, addr) return net.ListenPacket(network, addr)
} }
// this is just for test compatibility
func checkReuseport(fd uintptr) (bool, error) {
return false, fmt.Errorf("not supported")
}
// this is just for test compatibility
func checkReuseaddr(fd uintptr) (bool, error) {
return false, fmt.Errorf("not supported")
}

View file

@ -39,40 +39,10 @@ func reuseaddrControl(network, address string, c syscall.RawConn) error {
return opErr return opErr
} }
func reuseaddrandportControl(network, address string, c syscall.RawConn) error {
err := reuseaddrControl(network, address, c)
if err != nil {
return err
}
return reuseportControl(network, address, c)
}
// this is just for test compatibility
func checkReuseport(fd uintptr) (bool, error) {
v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT)
if err != nil {
return false, err
}
return v == 1, nil
}
// this is just for test compatibility
func checkReuseaddr(fd uintptr) (bool, error) {
v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR)
if err != nil {
return false, err
}
return v == 1, nil
}
func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) {
var lc net.ListenConfig var lc net.ListenConfig
switch { switch {
case reuseaddr && reuseport: case reuseaddr && reuseport:
lc.Control = reuseaddrandportControl
case reuseport: case reuseport:
lc.Control = reuseportControl lc.Control = reuseportControl
case reuseaddr: case reuseaddr:
@ -86,7 +56,6 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn,
var lc net.ListenConfig var lc net.ListenConfig
switch { switch {
case reuseaddr && reuseport: case reuseaddr && reuseport:
lc.Control = reuseaddrandportControl
case reuseport: case reuseport:
lc.Control = reuseportControl lc.Control = reuseportControl
case reuseaddr: case reuseaddr:

View file

@ -1620,16 +1620,6 @@ func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
return nil return nil
} }
// Uses the same format as TXT
func (rr *RESINFO) parse(c *zlexer, o string) *ParseError {
s, e := endingToTxtSlice(c, "bad RESINFO Resinfo")
if e != nil {
return e
}
rr.Txt = s
return nil
}
func (rr *URI) parse(c *zlexer, o string) *ParseError { func (rr *URI) parse(c *zlexer, o string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
i, e := strconv.ParseUint(l.token, 10, 16) i, e := strconv.ParseUint(l.token, 10, 16)

View file

@ -226,7 +226,6 @@ type Server struct {
// If NotifyStartedFunc is set it is called once the server has started listening. // If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func() NotifyStartedFunc func()
// DecorateReader is optional, allows customization of the process that reads raw DNS messages. // DecorateReader is optional, allows customization of the process that reads raw DNS messages.
// The decorated reader must not mutate the data read from the conn.
DecorateReader DecorateReader DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages. // DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter DecorateWriter DecorateWriter

View file

@ -7,6 +7,7 @@ import (
"crypto/rsa" "crypto/rsa"
"encoding/binary" "encoding/binary"
"math/big" "math/big"
"strings"
"time" "time"
) )
@ -150,7 +151,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
} }
// If key has come from the DNS name compression might // If key has come from the DNS name compression might
// have mangled the case of the name // have mangled the case of the name
if !equal(signername, k.Header().Name) { if !strings.EqualFold(signername, k.Header().Name) {
return &Error{err: "signer name doesn't match key name"} return &Error{err: "signer name doesn't match key name"}
} }
sigend := offset sigend := offset

12
vendor/github.com/miekg/dns/svcb.go generated vendored
View file

@ -214,7 +214,11 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue {
} }
} }
// SVCB RR. See RFC 9460. // SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-08).
//
// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
// The API, including constants and types related to SVCBKeyValues, may
// change in future versions in accordance with the latest drafts.
type SVCB struct { type SVCB struct {
Hdr RR_Header Hdr RR_Header
Priority uint16 // If zero, Value must be empty or discarded by the user of this library Priority uint16 // If zero, Value must be empty or discarded by the user of this library
@ -222,8 +226,12 @@ type SVCB struct {
Value []SVCBKeyValue `dns:"pairs"` Value []SVCBKeyValue `dns:"pairs"`
} }
// HTTPS RR. See RFC 9460. Everything valid for SVCB applies to HTTPS as well. // HTTPS RR. Everything valid for SVCB applies to HTTPS as well.
// Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols. // Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols.
//
// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
// The API, including constants and types related to SVCBKeyValues, may
// change in future versions in accordance with the latest drafts.
type HTTPS struct { type HTTPS struct {
SVCB SVCB
} }

10
vendor/github.com/miekg/dns/types.go generated vendored
View file

@ -101,7 +101,6 @@ const (
TypeCAA uint16 = 257 TypeCAA uint16 = 257
TypeAVC uint16 = 258 TypeAVC uint16 = 258
TypeAMTRELAY uint16 = 260 TypeAMTRELAY uint16 = 260
TypeRESINFO uint16 = 261
TypeTKEY uint16 = 249 TypeTKEY uint16 = 249
TypeTSIG uint16 = 250 TypeTSIG uint16 = 250
@ -1509,15 +1508,6 @@ func (rr *ZONEMD) String() string {
" " + rr.Digest " " + rr.Digest
} }
// RESINFO RR. See RFC 9606.
type RESINFO struct {
Hdr RR_Header
Txt []string `dns:"txt"`
}
func (rr *RESINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
// APL RR. See RFC 3123. // APL RR. See RFC 3123.
type APL struct { type APL struct {
Hdr RR_Header Hdr RR_Header

View file

@ -3,7 +3,7 @@ package dns
import "fmt" import "fmt"
// Version is current version of this library. // Version is current version of this library.
var Version = v{1, 1, 64} var Version = v{1, 1, 62}
// v holds the version of this library. // v holds the version of this library.
type v struct { type v struct {

View file

@ -957,23 +957,6 @@ func (r1 *PX) isDuplicate(_r2 RR) bool {
return true return true
} }
func (r1 *RESINFO) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RESINFO)
if !ok {
return false
}
_ = r2
if len(r1.Txt) != len(r2.Txt) {
return false
}
for i := 0; i < len(r1.Txt); i++ {
if r1.Txt[i] != r2.Txt[i] {
return false
}
}
return true
}
func (r1 *RFC3597) isDuplicate(_r2 RR) bool { func (r1 *RFC3597) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RFC3597) r2, ok := _r2.(*RFC3597)
if !ok { if !ok {

19
vendor/github.com/miekg/dns/zmsg.go generated vendored
View file

@ -762,14 +762,6 @@ func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress boo
return off, nil return off, nil
} }
func (rr *RESINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packStringTxt(rr.Txt, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packStringHex(rr.Rdata, msg, off) off, err = packStringHex(rr.Rdata, msg, off)
if err != nil { if err != nil {
@ -2361,17 +2353,6 @@ func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
return off, nil return off, nil
} }
func (rr *RESINFO) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off
_ = rdStart
rr.Txt, off, err = unpackStringTxt(msg, off)
if err != nil {
return off, err
}
return off, nil
}
func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) { func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off rdStart := off
_ = rdStart _ = rdStart

View file

@ -66,7 +66,6 @@ var TypeToRR = map[uint16]func() RR{
TypeOPT: func() RR { return new(OPT) }, TypeOPT: func() RR { return new(OPT) },
TypePTR: func() RR { return new(PTR) }, TypePTR: func() RR { return new(PTR) },
TypePX: func() RR { return new(PX) }, TypePX: func() RR { return new(PX) },
TypeRESINFO: func() RR { return new(RESINFO) },
TypeRKEY: func() RR { return new(RKEY) }, TypeRKEY: func() RR { return new(RKEY) },
TypeRP: func() RR { return new(RP) }, TypeRP: func() RR { return new(RP) },
TypeRRSIG: func() RR { return new(RRSIG) }, TypeRRSIG: func() RR { return new(RRSIG) },
@ -155,7 +154,6 @@ var TypeToString = map[uint16]string{
TypeOPT: "OPT", TypeOPT: "OPT",
TypePTR: "PTR", TypePTR: "PTR",
TypePX: "PX", TypePX: "PX",
TypeRESINFO: "RESINFO",
TypeRKEY: "RKEY", TypeRKEY: "RKEY",
TypeRP: "RP", TypeRP: "RP",
TypeRRSIG: "RRSIG", TypeRRSIG: "RRSIG",
@ -240,7 +238,6 @@ func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *OPT) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
func (rr *PTR) Header() *RR_Header { return &rr.Hdr } func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
func (rr *PX) Header() *RR_Header { return &rr.Hdr } func (rr *PX) Header() *RR_Header { return &rr.Hdr }
func (rr *RESINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *RP) Header() *RR_Header { return &rr.Hdr } func (rr *RP) Header() *RR_Header { return &rr.Hdr }
@ -625,14 +622,6 @@ func (rr *PX) len(off int, compression map[string]struct{}) int {
return l return l
} }
func (rr *RESINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
l += len(x) + 1
}
return l
}
func (rr *RFC3597) len(off int, compression map[string]struct{}) int { func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression) l := rr.Hdr.len(off, compression)
l += len(rr.Rdata) / 2 l += len(rr.Rdata) / 2
@ -1159,10 +1148,6 @@ func (rr *PX) copy() RR {
} }
} }
func (rr *RESINFO) copy() RR {
return &RESINFO{rr.Hdr, cloneSlice(rr.Txt)}
}
func (rr *RFC3597) copy() RR { func (rr *RFC3597) copy() RR {
return &RFC3597{rr.Hdr, rr.Rdata} return &RFC3597{rr.Hdr, rr.Rdata}
} }

View file

@ -82,6 +82,10 @@ func New(colorMode ColorMode) Formatter {
return fmt.Sprintf("\x1b[38;5;%dm", colorCode) return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
} }
if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor {
colorMode = ColorModeNone
}
f := Formatter{ f := Formatter{
ColorMode: colorMode, ColorMode: colorMode,
colors: map[string]string{ colors: map[string]string{

View file

@ -2,6 +2,8 @@ package build
import ( import (
"fmt" "fmt"
"os"
"path"
"github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/ginkgo/internal"
@ -53,7 +55,18 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
if suite.State.Is(internal.TestSuiteStateFailedToCompile) { if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error()) fmt.Println(suite.CompilationError.Error())
} else { } else {
fmt.Printf("Compiled %s.test\n", suite.PackageName) if len(goFlagsConfig.O) == 0 {
goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test")
} else {
stat, err := os.Stat(goFlagsConfig.O)
if err != nil {
panic(err)
}
if stat.IsDir() {
goFlagsConfig.O += "/" + suite.PackageName + ".test"
}
}
fmt.Printf("Compiled %s\n", goFlagsConfig.O)
} }
} }

View file

@ -7,7 +7,7 @@ import (
"os" "os"
"text/template" "text/template"
sprig "github.com/go-task/slim-sprig" sprig "github.com/go-task/slim-sprig/v3"
"github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/ginkgo/internal"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"

View file

@ -10,7 +10,7 @@ import (
"strings" "strings"
"text/template" "text/template"
sprig "github.com/go-task/slim-sprig" sprig "github.com/go-task/slim-sprig/v3"
"github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/ginkgo/internal"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command {
{Name: "template-data", KeyPath: "CustomTemplateData", {Name: "template-data", KeyPath: "CustomTemplateData",
UsageArgument: "template-data-file", UsageArgument: "template-data-file",
Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
{Name: "tags", KeyPath: "Tags",
UsageArgument: "build-tags",
Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
}, },
&conf, &conf,
types.GinkgoFlagSections{}, types.GinkgoFlagSections{},
@ -59,6 +62,7 @@ You can also pass a <filename> of the form "file.go" and generate will emit "fil
} }
type specData struct { type specData struct {
BuildTags string
Package string Package string
Subject string Subject string
PackageImportPath string PackageImportPath string
@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
} }
data := specData{ data := specData{
BuildTags: getBuildTags(conf.Tags),
Package: determinePackageName(packageName, conf.Internal), Package: determinePackageName(packageName, conf.Internal),
Subject: formattedName, Subject: formattedName,
PackageImportPath: getPackageImportPath(), PackageImportPath: getPackageImportPath(),
@ -169,6 +174,7 @@ func moduleName(modRoot string) string {
if err != nil { if err != nil {
return "" return ""
} }
defer modFile.Close()
mod := make([]byte, 128) mod := make([]byte, 128)
_, err = modFile.Read(mod) _, err = modFile.Read(mod)

View file

@ -1,6 +1,7 @@
package generators package generators
var specText = `package {{.Package}} var specText = `{{.BuildTags}}
package {{.Package}}
import ( import (
{{.GinkgoImport}} {{.GinkgoImport}}
@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
}) })
` `
var agoutiSpecText = `package {{.Package}} var agoutiSpecText = `{{.BuildTags}}
package {{.Package}}
import ( import (
{{.GinkgoImport}} {{.GinkgoImport}}

View file

@ -1,6 +1,7 @@
package generators package generators
import ( import (
"fmt"
"go/build" "go/build"
"os" "os"
"path/filepath" "path/filepath"
@ -14,6 +15,7 @@ type GeneratorsConfig struct {
Agouti, NoDot, Internal bool Agouti, NoDot, Internal bool
CustomTemplate string CustomTemplate string
CustomTemplateData string CustomTemplateData string
Tags string
} }
func getPackageAndFormattedName() (string, string, string) { func getPackageAndFormattedName() (string, string, string) {
@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string {
return name + "_test" return name + "_test"
} }
// getBuildTags returns the resultant string to be added.
// If the input string is not empty, then returns a `//go:build {}` string,
// otherwise returns an empty string.
func getBuildTags(tags string) string {
if tags != "" {
return fmt.Sprintf("//go:build %s\n", tags)
}
return ""
}

View file

@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
return suite return suite
} }
if len(goFlagsConfig.O) > 0 {
userDefinedPath, err := filepath.Abs(goFlagsConfig.O)
if err != nil {
suite.State = TestSuiteStateFailedToCompile
suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error())
return suite
}
path = userDefinedPath
}
goFlagsConfig.O = path
ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ := os.Getwd()
ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
packagePath := suite.AbsPath() packagePath := suite.AbsPath()
@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
return suite return suite
} }
args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
if err != nil { if err != nil {
suite.State = TestSuiteStateFailedToCompile suite.State = TestSuiteStateFailedToCompile
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())

View file

@ -0,0 +1,129 @@
// Copyright (c) 2015, Wade Simmons
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package gocovmerge takes the results from multiple `go test -coverprofile`
// runs and merges them into one profile
// this file was originally taken from the gocovmerge project
// see also: https://go.shabbyrobe.org/gocovmerge
package internal
import (
"fmt"
"io"
"sort"
"golang.org/x/tools/cover"
)
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
if i < len(profiles) && profiles[i].FileName == p.FileName {
MergeCoverProfiles(profiles[i], p)
} else {
profiles = append(profiles, nil)
copy(profiles[i+1:], profiles[i:])
profiles[i] = p
}
return profiles
}
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
if len(profiles) == 0 {
return nil
}
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
return err
}
for _, p := range profiles {
for _, b := range p.Blocks {
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
return err
}
}
}
return nil
}
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
if into.Mode != merge.Mode {
return fmt.Errorf("cannot merge profiles with different modes")
}
// Since the blocks are sorted, we can keep track of where the last block
// was inserted and only look at the blocks after that as targets for merge
startIndex := 0
for _, b := range merge.Blocks {
var err error
startIndex, err = mergeProfileBlock(into, b, startIndex)
if err != nil {
return err
}
}
return nil
}
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
sortFunc := func(i int) bool {
pi := p.Blocks[i+startIndex]
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
}
i := 0
if sortFunc(i) != true {
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
}
i += startIndex
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
}
switch p.Mode {
case "set":
p.Blocks[i].Count |= pb.Count
case "count", "atomic":
p.Blocks[i].Count += pb.Count
default:
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
}
} else {
if i > 0 {
pa := p.Blocks[i-1]
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
}
}
if i < len(p.Blocks)-1 {
pa := p.Blocks[i+1]
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
}
}
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
copy(p.Blocks[i+1:], p.Blocks[i:])
p.Blocks[i] = pb
}
return i + 1, nil
}

View file

@ -1,7 +1,6 @@
package internal package internal
import ( import (
"bytes"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
@ -12,6 +11,7 @@ import (
"github.com/google/pprof/profile" "github.com/google/pprof/profile"
"github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
"golang.org/x/tools/cover"
) )
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
return messages, nil return messages, nil
} }
//loads each profile, combines them, deletes them, stores them in destination // loads each profile, merges them, deletes them, stores them in destination
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
combined := &bytes.Buffer{} var merged []*cover.Profile
modeRegex := regexp.MustCompile(`^mode: .*\n`) for _, file := range profiles {
for i, profile := range profiles { parsedProfiles, err := cover.ParseProfiles(file)
contents, err := os.ReadFile(profile)
if err != nil { if err != nil {
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) return err
} }
os.Remove(profile) os.Remove(file)
for _, p := range parsedProfiles {
// remove the cover mode line from every file merged = AddCoverProfile(merged, p)
// except the first one
if i > 0 {
contents = modeRegex.ReplaceAll(contents, []byte{})
}
_, err = combined.Write(contents)
// Add a newline to the end of every file if missing.
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
_, err = combined.Write([]byte("\n"))
}
if err != nil {
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
} }
} }
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
err := os.WriteFile(destination, combined.Bytes(), 0666)
if err != nil { if err != nil {
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) return err
}
defer dst.Close()
err = DumpCoverProfiles(merged, dst)
if err != nil {
return err
} }
return nil return nil
} }
@ -184,7 +173,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) {
cmd := exec.Command("go", "tool", "cover", "-func", profile) cmd := exec.Command("go", "tool", "cover", "-func", profile)
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err != nil { if err != nil {
return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
} }
re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
matches := re.FindStringSubmatch(string(output)) matches := re.FindStringSubmatch(string(output))
@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error {
return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
} }
prof, err := profile.Parse(proFile) prof, err := profile.Parse(proFile)
_ = proFile.Close()
if err != nil { if err != nil {
return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
} }

View file

@ -7,6 +7,7 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime"
"strings" "strings"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
@ -192,7 +193,7 @@ func precompiledTestSuite(path string) (TestSuite, error) {
return TestSuite{}, errors.New("this is not a .test binary") return TestSuite{}, errors.New("this is not a .test binary")
} }
if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
return TestSuite{}, errors.New("this is not executable") return TestSuite{}, errors.New("this is not executable")
} }
@ -225,7 +226,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
files, _ := os.ReadDir(dir) files, _ := os.ReadDir(dir)
re := regexp.MustCompile(`^[^._].*_test\.go$`) re := regexp.MustCompile(`^[^._].*_test\.go$`)
for _, file := range files { for _, file := range files {
if !file.IsDir() && re.Match([]byte(file.Name())) { if !file.IsDir() && re.MatchString(file.Name()) {
suite := TestSuite{ suite := TestSuite{
Path: relPath(dir), Path: relPath(dir),
PackageName: packageNameForSuite(dir), PackageName: packageNameForSuite(dir),
@ -240,7 +241,7 @@ func suitesInDir(dir string, recurse bool) TestSuites {
if recurse { if recurse {
re = regexp.MustCompile(`^[._]`) re = regexp.MustCompile(`^[._]`)
for _, file := range files { for _, file := range files {
if file.IsDir() && !re.Match([]byte(file.Name())) { if file.IsDir() && !re.MatchString(file.Name()) {
suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
} }
} }
@ -271,7 +272,7 @@ func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
for _, file := range files { for _, file := range files {
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { if !file.IsDir() && reTestFile.MatchString(file.Name()) {
contents, _ := os.ReadFile(dir + "/" + file.Name()) contents, _ := os.ReadFile(dir + "/" + file.Name())
if reGinkgo.Match(contents) { if reGinkgo.Match(contents) {
return true return true

View file

@ -1,10 +1,11 @@
package outline package outline
import ( import (
"github.com/onsi/ginkgo/v2/types"
"go/ast" "go/ast"
"go/token" "go/token"
"strconv" "strconv"
"github.com/onsi/ginkgo/v2/types"
) )
const ( const (
@ -244,9 +245,7 @@ func labelFromCallExpr(ce *ast.CallExpr) []string {
} }
if id.Name == "Label" { if id.Name == "Label" {
ls := extractLabels(expr) ls := extractLabels(expr)
for _, label := range ls { labels = append(labels, ls...)
labels = append(labels, label)
}
} }
} }
} }

View file

@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string {
} }
name := spec.Name.String() name := spec.Name.String()
if name == "<nil>" { if name == "<nil>" {
// If the package name is not explicitly specified, name = "ginkgo"
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
} }
if name == "." { if name == "." {
name = "" name = ""

View file

@ -1,10 +1,13 @@
package outline package outline
import ( import (
"bytes"
"encoding/csv"
"encoding/json" "encoding/json"
"fmt" "fmt"
"go/ast" "go/ast"
"go/token" "go/token"
"strconv"
"strings" "strings"
"golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/ast/inspector"
@ -84,9 +87,11 @@ func (o *outline) String() string {
// StringIndent returns a CSV-formated outline, but every line is indented by // StringIndent returns a CSV-formated outline, but every line is indented by
// one 'width' of spaces for every level of nesting. // one 'width' of spaces for every level of nesting.
func (o *outline) StringIndent(width int) string { func (o *outline) StringIndent(width int) string {
var b strings.Builder var b bytes.Buffer
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
csvWriter := csv.NewWriter(&b)
currentIndent := 0 currentIndent := 0
pre := func(n *ginkgoNode) { pre := func(n *ginkgoNode) {
b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
@ -96,8 +101,22 @@ func (o *outline) StringIndent(width int) string {
} else { } else {
labels = strings.Join(n.Labels, ", ") labels = strings.Join(n.Labels, ", ")
} }
//enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) row := []string{
n.Name,
n.Text,
strconv.Itoa(n.Start),
strconv.Itoa(n.End),
strconv.FormatBool(n.Spec),
strconv.FormatBool(n.Focused),
strconv.FormatBool(n.Pending),
labels,
}
csvWriter.Write(row)
// Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation
csvWriter.Flush()
currentIndent += width currentIndent += width
} }
post := func(n *ginkgoNode) { post := func(n *ginkgoNode) {
@ -106,5 +125,6 @@ func (o *outline) StringIndent(width int) string {
for _, n := range o.Nodes { for _, n := range o.Nodes {
n.Walk(pre, post) n.Walk(pre, post)
} }
return b.String() return b.String()
} }

View file

@ -78,7 +78,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) {
if err != nil { if err != nil {
continue continue
} }
if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
d.addDepIfNotPresent(pkg.Dir, depth) d.addDepIfNotPresent(pkg.Dir, depth)
} }
} }

View file

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"regexp" "regexp"
"strings"
"time" "time"
) )
@ -79,7 +80,11 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
continue continue
} }
if goTestRegExp.Match([]byte(info.Name())) { if isHiddenFile(info) {
continue
}
if goTestRegExp.MatchString(info.Name()) {
testHash += p.hashForFileInfo(info) testHash += p.hashForFileInfo(info)
if info.ModTime().After(testModifiedTime) { if info.ModTime().After(testModifiedTime) {
testModifiedTime = info.ModTime() testModifiedTime = info.ModTime()
@ -87,7 +92,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
continue continue
} }
if p.watchRegExp.Match([]byte(info.Name())) { if p.watchRegExp.MatchString(info.Name()) {
codeHash += p.hashForFileInfo(info) codeHash += p.hashForFileInfo(info)
if info.ModTime().After(codeModifiedTime) { if info.ModTime().After(codeModifiedTime) {
codeModifiedTime = info.ModTime() codeModifiedTime = info.ModTime()
@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti
return return
} }
func isHiddenFile(info os.FileInfo) bool {
return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
}
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
} }

View file

@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) {
r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
} }
func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
r.emitBlock("\n")
if r.conf.GithubOutput {
r.emitBlock(r.fi(1, "::group::%s", sectionName))
} else {
r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
}
fn()
if r.conf.GithubOutput {
r.emitBlock(r.fi(1, "::endgroup::"))
} else {
r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
}
}
func (r *DefaultReporter) DidRun(report types.SpecReport) { func (r *DefaultReporter) DidRun(report types.SpecReport) {
v := r.conf.Verbosity() v := r.conf.Verbosity()
inParallel := report.RunningInParallel inParallel := report.RunningInParallel
//should we completely omit this spec?
if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
return
}
header := r.specDenoter header := r.specDenoter
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
header = fmt.Sprintf("[%s]", report.LeafNodeType) header = fmt.Sprintf("[%s]", report.LeafNodeType)
@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
} }
} }
// If we have no content to show, jsut emit the header and return // If we have no content to show, just emit the header and return
if !reportHasContent { if !reportHasContent {
r.emit(r.f(highlightColor + header + "{{/}}")) r.emit(r.f(highlightColor + header + "{{/}}"))
if r.conf.ForceNewlines {
r.emit("\n")
}
return return
} }
@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) {
//Emit Stdout/Stderr Output //Emit Stdout/Stderr Output
if showSeparateStdSection { if showSeparateStdSection {
r.emitBlock("\n") r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) })
r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}"))
} }
if showSeparateVisibilityAlwaysReportsSection { if showSeparateVisibilityAlwaysReportsSection {
r.emitBlock("\n") r.wrapTextBlock("Report Entries", func() {
r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { r.emitReportEntry(1, entry)
r.emitReportEntry(1, entry) }
} })
r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}"))
} }
if showTimeline { if showTimeline {
r.emitBlock("\n") r.wrapTextBlock("Timeline", func() {
r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) r.emitTimeline(1, report, timeline)
r.emitTimeline(1, report, timeline) })
r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}"))
} }
// Emit Failure Message // Emit Failure Message
@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f
func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
highlightColor := r.highlightColorForState(state) highlightColor := r.highlightColorForState(state)
r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) if r.conf.GithubOutput {
level := "error"
if state.Is(types.SpecStateSkipped) {
level = "notice"
}
r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
} else {
r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
}
if failure.ForwardedPanic != "" { if failure.ForwardedPanic != "" {
r.emitBlock("\n") r.emitBlock("\n")
r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))

View file

@ -4,16 +4,21 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"path"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
) )
//GenerateJSONReport produces a JSON-formatted report at the passed in destination // GenerateJSONReport produces a JSON-formatted report at the passed in destination
func GenerateJSONReport(report types.Report, destination string) error { func GenerateJSONReport(report types.Report, destination string) error {
if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
return err
}
f, err := os.Create(destination) f, err := os.Create(destination)
if err != nil { if err != nil {
return err return err
} }
defer f.Close()
enc := json.NewEncoder(f) enc := json.NewEncoder(f)
enc.SetIndent("", " ") enc.SetIndent("", " ")
err = enc.Encode([]types.Report{ err = enc.Encode([]types.Report{
@ -22,11 +27,11 @@ func GenerateJSONReport(report types.Report, destination string) error {
if err != nil { if err != nil {
return err return err
} }
return f.Close() return nil
} }
//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources // MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
//It skips over reports that fail to decode but reports on them via the returned messages []string // It skips over reports that fail to decode but reports on them via the returned messages []string
func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
messages := []string{} messages := []string{}
allReports := []types.Report{} allReports := []types.Report{}
@ -46,15 +51,19 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string,
allReports = append(allReports, reports...) allReports = append(allReports, reports...)
} }
if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
return messages, err
}
f, err := os.Create(destination) f, err := os.Create(destination)
if err != nil { if err != nil {
return messages, err return messages, err
} }
defer f.Close()
enc := json.NewEncoder(f) enc := json.NewEncoder(f)
enc.SetIndent("", " ") enc.SetIndent("", " ")
err = enc.Encode(allReports) err = enc.Encode(allReports)
if err != nil { if err != nil {
return messages, err return messages, err
} }
return messages, f.Close() return messages, nil
} }

View file

@ -14,6 +14,8 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"os" "os"
"path"
"regexp"
"strings" "strings"
"github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/config"
@ -103,6 +105,8 @@ type JUnitProperty struct {
Value string `xml:"value,attr"` Value string `xml:"value,attr"`
} }
var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
type JUnitTestCase struct { type JUnitTestCase struct {
// Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
Name string `xml:"name,attr"` Name string `xml:"name,attr"`
@ -112,6 +116,8 @@ type JUnitTestCase struct {
Status string `xml:"status,attr"` Status string `xml:"status,attr"`
// Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
Time float64 `xml:"time,attr"` Time float64 `xml:"time,attr"`
// Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
Owner string `xml:"owner,attr,omitempty"`
//Skipped is populated with a message if the test was skipped or pending //Skipped is populated with a message if the test was skipped or pending
Skipped *JUnitSkipped `xml:"skipped,omitempty"` Skipped *JUnitSkipped `xml:"skipped,omitempty"`
//Error is populated if the test panicked or was interrupted //Error is populated if the test panicked or was interrupted
@ -171,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
{"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
{"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
{"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
{"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
{"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
{"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
@ -194,6 +201,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
if len(labels) > 0 && !config.OmitSpecLabels { if len(labels) > 0 && !config.OmitSpecLabels {
name = name + " [" + strings.Join(labels, ", ") + "]" name = name + " [" + strings.Join(labels, ", ") + "]"
} }
owner := ""
for _, label := range labels {
if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
owner = matches[1]
}
}
name = strings.TrimSpace(name) name = strings.TrimSpace(name)
test := JUnitTestCase{ test := JUnitTestCase{
@ -201,6 +214,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
Classname: report.SuiteDescription, Classname: report.SuiteDescription,
Status: spec.State.String(), Status: spec.State.String(),
Time: spec.RunTime.Seconds(), Time: spec.RunTime.Seconds(),
Owner: owner,
} }
if !spec.State.Is(config.OmitTimelinesForSpecState) { if !spec.State.Is(config.OmitTimelinesForSpecState) {
test.SystemErr = systemErrForUnstructuredReporters(spec) test.SystemErr = systemErrForUnstructuredReporters(spec)
@ -285,6 +299,9 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
TestSuites: []JUnitTestSuite{suite}, TestSuites: []JUnitTestSuite{suite},
} }
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
return err
}
f, err := os.Create(dst) f, err := os.Create(dst)
if err != nil { if err != nil {
return err return err
@ -308,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
continue continue
} }
err = xml.NewDecoder(f).Decode(&report) err = xml.NewDecoder(f).Decode(&report)
_ = f.Close()
if err != nil { if err != nil {
messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
continue continue
@ -322,6 +340,9 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error)
mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
} }
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
return messages, err
}
f, err := os.Create(dst) f, err := os.Create(dst)
if err != nil { if err != nil {
return messages, err return messages, err

View file

@ -11,6 +11,7 @@ package reporters
import ( import (
"fmt" "fmt"
"os" "os"
"path"
"strings" "strings"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
@ -27,6 +28,9 @@ func tcEscape(s string) string {
} }
func GenerateTeamcityReport(report types.Report, dst string) error { func GenerateTeamcityReport(report types.Report, dst string) error {
if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
return err
}
f, err := os.Create(dst) f, err := os.Create(dst)
if err != nil { if err != nil {
return err return err

View file

@ -149,7 +149,7 @@ func PruneStack(fullStackTrace string, skip int) string {
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
for i := 0; i < len(stack)/2; i++ { for i := 0; i < len(stack)/2; i++ {
// We filter out based on the source code file name. // We filter out based on the source code file name.
if !re.Match([]byte(stack[i*2+1])) { if !re.MatchString(stack[i*2+1]) {
prunedStack = append(prunedStack, stack[i*2]) prunedStack = append(prunedStack, stack[i*2])
prunedStack = append(prunedStack, stack[i*2+1]) prunedStack = append(prunedStack, stack[i*2+1])
} }

View file

@ -25,8 +25,10 @@ type SuiteConfig struct {
SkipFiles []string SkipFiles []string
LabelFilter string LabelFilter string
FailOnPending bool FailOnPending bool
FailOnEmpty bool
FailFast bool FailFast bool
FlakeAttempts int FlakeAttempts int
MustPassRepeatedly int
DryRun bool DryRun bool
PollProgressAfter time.Duration PollProgressAfter time.Duration
PollProgressInterval time.Duration PollProgressInterval time.Duration
@ -88,6 +90,9 @@ type ReporterConfig struct {
VeryVerbose bool VeryVerbose bool
FullTrace bool FullTrace bool
ShowNodeEvents bool ShowNodeEvents bool
GithubOutput bool
SilenceSkips bool
ForceNewlines bool
JSONReport string JSONReport string
JUnitReport string JUnitReport string
@ -197,6 +202,7 @@ type GoFlagsConfig struct {
A bool A bool
ASMFlags string ASMFlags string
BuildMode string BuildMode string
BuildVCS bool
Compiler string Compiler string
GCCGoFlags string GCCGoFlags string
GCFlags string GCFlags string
@ -214,6 +220,7 @@ type GoFlagsConfig struct {
ToolExec string ToolExec string
Work bool Work bool
X bool X bool
O string
} }
func NewDefaultGoFlagsConfig() GoFlagsConfig { func NewDefaultGoFlagsConfig() GoFlagsConfig {
@ -263,7 +270,7 @@ var FlagSections = GinkgoFlagSections{
// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
var SuiteConfigFlags = GinkgoFlags{ var SuiteConfigFlags = GinkgoFlags{
{KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
Usage: "The seed used to randomize the spec suite."}, Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
{KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
@ -273,6 +280,8 @@ var SuiteConfigFlags = GinkgoFlags{
Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
{KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
{KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
{KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
@ -319,7 +328,7 @@ var ParallelConfigFlags = GinkgoFlags{
// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
var ReporterConfigFlags = GinkgoFlags{ var ReporterConfigFlags = GinkgoFlags{
{KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
Usage: "If set, suppress color output in default reporter."}, Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"},
{KeyPath: "R.Verbose", Name: "v", SectionKey: "output", {KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
Usage: "If set, emits more output including GinkgoWriter contents."}, Usage: "If set, emits more output including GinkgoWriter contents."},
{KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
@ -330,6 +339,12 @@ var ReporterConfigFlags = GinkgoFlags{
Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
{KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
{KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
Usage: "If set, default reporter prints easier to manage output in Github Actions."},
{KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
Usage: "If set, default reporter will not print out skipped tests."},
{KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
Usage: "If set, default reporter will ensure a newline appears after each test."},
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
@ -498,7 +513,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{
// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
var GoBuildFlags = GinkgoFlags{ var GoBuildFlags = GinkgoFlags{
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
@ -514,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{
Usage: "arguments to pass on each go tool asm invocation."}, Usage: "arguments to pass on each go tool asm invocation."},
{KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
Usage: "build mode to use. See 'go help buildmode' for more."}, Usage: "build mode to use. See 'go help buildmode' for more."},
{KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build",
Usage: "adds version control information."},
{KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
{KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
@ -548,6 +565,8 @@ var GoBuildFlags = GinkgoFlags{
Usage: "print the name of the temporary work directory and do not delete it when exiting."}, Usage: "print the name of the temporary work directory and do not delete it when exiting."},
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build", {KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
Usage: "print the commands."}, Usage: "print the commands."},
{KeyPath: "Go.O", Name: "o", SectionKey: "go-build",
Usage: "output binary path (including name)."},
} }
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
@ -601,7 +620,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
} }
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
// the built test binary can generate a coverprofile // the built test binary can generate a coverprofile
if goFlagsConfig.CoverProfile != "" { if goFlagsConfig.CoverProfile != "" {
@ -624,7 +643,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string,
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
} }
args := []string{"test", "-c", "-o", destination, packageToBuild} args := []string{"test", "-c", packageToBuild}
goArgs, err := GenerateFlagArgs( goArgs, err := GenerateFlagArgs(
GoBuildFlags, GoBuildFlags,
map[string]interface{}{ map[string]interface{}{

View file

@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
return GinkgoError{ return GinkgoError{
Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), Heading: "No parameters have been passed to the Table Function",
Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), Message: "The Table Function expected at least 1 parameter",
CodeLocation: cl, CodeLocation: cl,
DocLink: "table-specs", DocLink: "table-specs",
} }
@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac
} }
} }
func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
return GinkgoError{
Heading: "Contexts cannot be used in subtree tables",
Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
CodeLocation: cl,
DocLink: "table-specs",
}
}
/* Parallel Synchronization errors */ /* Parallel Synchronization errors */
func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {

View file

@ -24,7 +24,8 @@ type GinkgoFlag struct {
DeprecatedDocLink string DeprecatedDocLink string
DeprecatedVersion string DeprecatedVersion string
ExportAs string ExportAs string
AlwaysExport bool
} }
type GinkgoFlags []GinkgoFlag type GinkgoFlags []GinkgoFlag
@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error {
return nil return nil
} }
//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. // given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
result := []string{} result := []string{}
for _, flag := range flags { for _, flag := range flags {
@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
iface := value.Interface() iface := value.Interface()
switch value.Type() { switch value.Type() {
case reflect.TypeOf(string("")): case reflect.TypeOf(string("")):
if iface.(string) != "" { if iface.(string) != "" || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface)) result = append(result, fmt.Sprintf("--%s=%s", name, iface))
} }
case reflect.TypeOf(int64(0)): case reflect.TypeOf(int64(0)):
if iface.(int64) != 0 { if iface.(int64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface)) result = append(result, fmt.Sprintf("--%s=%d", name, iface))
} }
case reflect.TypeOf(float64(0)): case reflect.TypeOf(float64(0)):
if iface.(float64) != 0 { if iface.(float64) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%f", name, iface)) result = append(result, fmt.Sprintf("--%s=%f", name, iface))
} }
case reflect.TypeOf(int(0)): case reflect.TypeOf(int(0)):
if iface.(int) != 0 { if iface.(int) != 0 || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%d", name, iface)) result = append(result, fmt.Sprintf("--%s=%d", name, iface))
} }
case reflect.TypeOf(bool(true)): case reflect.TypeOf(bool(true)):
@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error)
result = append(result, fmt.Sprintf("--%s", name)) result = append(result, fmt.Sprintf("--%s", name))
} }
case reflect.TypeOf(time.Duration(0)): case reflect.TypeOf(time.Duration(0)):
if iface.(time.Duration) != time.Duration(0) { if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
result = append(result, fmt.Sprintf("--%s=%s", name, iface)) result = append(result, fmt.Sprintf("--%s=%s", name, iface))
} }

View file

@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter {
return func(labels []string) bool { return a(labels) || b(labels) } return func(labels []string) bool { return a(labels) || b(labels) }
} }
func labelSetFor(key string, labels []string) map[string]bool {
key = strings.ToLower(strings.TrimSpace(key))
out := map[string]bool{}
for _, label := range labels {
components := strings.SplitN(label, ":", 2)
if len(components) < 2 {
continue
}
if key == strings.ToLower(strings.TrimSpace(components[0])) {
out[strings.ToLower(strings.TrimSpace(components[1]))] = true
}
}
return out
}
func isEmptyLabelSetAction(key string) LabelFilter {
return func(labels []string) bool {
return len(labelSetFor(key, labels)) == 0
}
}
func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
return func(labels []string) bool {
set := labelSetFor(key, labels)
for _, value := range expectedValues {
if set[value] {
return true
}
}
return false
}
}
func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
return func(labels []string) bool {
set := labelSetFor(key, labels)
for _, value := range expectedValues {
if !set[value] {
return false
}
}
return true
}
}
func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
return func(labels []string) bool {
set := labelSetFor(key, labels)
if len(set) != len(expectedValues) {
return false
}
for _, value := range expectedValues {
if !set[value] {
return false
}
}
return true
}
}
func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
expectedSet := map[string]bool{}
for _, value := range expectedValues {
expectedSet[value] = true
}
return func(labels []string) bool {
set := labelSetFor(key, labels)
for value := range set {
if !expectedSet[value] {
return false
}
}
return true
}
}
type lfToken uint type lfToken uint
const ( const (
@ -58,6 +135,9 @@ const (
lfTokenOr lfTokenOr
lfTokenRegexp lfTokenRegexp
lfTokenLabel lfTokenLabel
lfTokenSetKey
lfTokenSetOperation
lfTokenSetArgument
lfTokenEOF lfTokenEOF
) )
@ -71,6 +151,8 @@ func (l lfToken) Precedence() int {
return 2 return 2
case lfTokenNot: case lfTokenNot:
return 3 return 3
case lfTokenSetOperation:
return 4
} }
return -1 return -1
} }
@ -93,6 +175,12 @@ func (l lfToken) String() string {
return "/regexp/" return "/regexp/"
case lfTokenLabel: case lfTokenLabel:
return "label" return "label"
case lfTokenSetKey:
return "set_key"
case lfTokenSetOperation:
return "set_operation"
case lfTokenSetArgument:
return "set_argument"
case lfTokenEOF: case lfTokenEOF:
return "EOF" return "EOF"
} }
@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
} }
return matchLabelRegexAction(re), nil return matchLabelRegexAction(re), nil
case lfTokenSetOperation:
tokenSetOperation := strings.ToLower(tn.value)
if tokenSetOperation == "isempty" {
return isEmptyLabelSetAction(tn.leftNode.value), nil
}
if tn.rightNode == nil {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
}
rawValues := strings.Split(tn.rightNode.value, ",")
values := make([]string, len(rawValues))
for i := range rawValues {
values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
if strings.ContainsAny(values[i], "&|!,()/") {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
} else if values[i] == "" {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
}
}
switch tokenSetOperation {
case "containsany":
return containsAnyLabelSetAction(tn.leftNode.value, values), nil
case "containsall":
return containsAllLabelSetAction(tn.leftNode.value, values), nil
case "consistsof":
return consistsOfLabelSetAction(tn.leftNode.value, values), nil
case "issubsetof":
return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
}
} }
if tn.rightNode == nil { if tn.rightNode == nil {
@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string {
return out return out
} }
var validSetOperations = map[string]string{
"containsany": "containsAny",
"containsall": "containsAll",
"consistsof": "consistsOf",
"issubsetof": "isSubsetOf",
"isempty": "isEmpty",
}
func tokenize(input string) func() (*treeNode, error) { func tokenize(input string) func() (*treeNode, error) {
lastToken := lfTokenInvalid
lastValue := ""
runes, i := []rune(input), 0 runes, i := []rune(input), 0
peekIs := func(r rune) bool { peekIs := func(r rune) bool {
@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) {
} }
node := &treeNode{location: i} node := &treeNode{location: i}
defer func() {
lastToken = node.token
lastValue = node.value
}()
if lastToken == lfTokenSetKey {
//we should get a valid set operation next
value, n := consumeUntil(" )")
if validSetOperations[strings.ToLower(value)] == "" {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
}
i += n
node.token, node.value = lfTokenSetOperation, value
return node, nil
}
if lastToken == lfTokenSetOperation {
//we should get an argument next, if we aren't isempty
var arg = ""
origI := i
if runes[i] == '{' {
i += 1
value, n := consumeUntil("}")
if i+n >= len(runes) {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
}
i += n + 1
arg = value
} else {
value, n := consumeUntil("&|!,()/")
i += n
arg = strings.TrimSpace(value)
}
if strings.ToLower(lastValue) == "isempty" && arg != "" {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
}
if arg == "" && strings.ToLower(lastValue) != "isempty" {
if i < len(runes) && runes[i] == '/' {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
} else {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
}
}
// note that we sent an empty SetArgument token if we are isempty
node.token, node.value = lfTokenSetArgument, arg
return node, nil
}
switch runes[i] { switch runes[i] {
case '&': case '&':
if !peekIs('&') { if !peekIs('&') {
@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) {
i += n + 1 i += n + 1
node.token, node.value = lfTokenRegexp, value node.token, node.value = lfTokenRegexp, value
default: default:
value, n := consumeUntil("&|!,()/") value, n := consumeUntil("&|!,()/:")
i += n i += n
value = strings.TrimSpace(value)
//are we the beginning of a set operation?
if i < len(runes) && runes[i] == ':' {
if peekIs(' ') {
if value == "" {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
}
i += 1
//we are the beginning of a set operation
node.token, node.value = lfTokenSetKey, value
return node, nil
}
additionalValue, n := consumeUntil("&|!,()/")
additionalValue = strings.TrimSpace(additionalValue)
if additionalValue == ":" {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
}
i += n
value += additionalValue
}
valueToCheckForSetOperation := strings.ToLower(value)
for setOperation := range validSetOperations {
idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
if idx > 0 {
return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
}
}
node.token, node.value = lfTokenLabel, strings.TrimSpace(value) node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
} }
return node, nil return node, nil
@ -307,7 +511,7 @@ LOOP:
switch node.token { switch node.token {
case lfTokenEOF: case lfTokenEOF:
break LOOP break LOOP
case lfTokenLabel, lfTokenRegexp: case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
if current.rightNode != nil { if current.rightNode != nil {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
} }
@ -326,6 +530,18 @@ LOOP:
node.setLeftNode(nodeToStealFrom.rightNode) node.setLeftNode(nodeToStealFrom.rightNode)
nodeToStealFrom.setRightNode(node) nodeToStealFrom.setRightNode(node)
current = node current = node
case lfTokenSetOperation:
if current.rightNode == nil {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
}
node.setLeftNode(current.rightNode)
current.setRightNode(node)
current = node
case lfTokenSetArgument:
if current.rightNode != nil {
return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
}
current.setRightNode(node)
case lfTokenCloseGroup: case lfTokenCloseGroup:
firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
if firstUnmatchedOpenNode == nil { if firstUnmatchedOpenNode == nil {
@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
if strings.ContainsAny(out, "&|!,()/") { if strings.ContainsAny(out, "&|!,()/") {
return "", GinkgoErrors.InvalidLabel(label, cl) return "", GinkgoErrors.InvalidLabel(label, cl)
} }
if out[0] == ':' {
return "", GinkgoErrors.InvalidLabel(label, cl)
}
if strings.Contains(out, ":") {
components := strings.SplitN(out, ":", 2)
if len(components) < 2 || components[1] == "" {
return "", GinkgoErrors.InvalidLabel(label, cl)
}
}
return out, nil return out, nil
} }

View file

@ -3,13 +3,21 @@ package types
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os"
"sort" "sort"
"strings" "strings"
"time" "time"
) )
const GINKGO_FOCUS_EXIT_CODE = 197 const GINKGO_FOCUS_EXIT_CODE = 197
const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
func init() {
if os.Getenv("GINKGO_TIME_FORMAT") != "" {
GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT")
}
}
// Report captures information about a Ginkgo test run // Report captures information about a Ginkgo test run
type Report struct { type Report struct {
@ -97,9 +105,7 @@ func (report Report) Add(other Report) Report {
report.RunTime = report.EndTime.Sub(report.StartTime) report.RunTime = report.EndTime.Sub(report.StartTime)
reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
for i := range report.SpecReports { copy(reports, report.SpecReports)
reports[i] = report.SpecReports[i]
}
offset := len(report.SpecReports) offset := len(report.SpecReports)
for i := range other.SpecReports { for i := range other.SpecReports {
reports[i+offset] = other.SpecReports[i] reports[i+offset] = other.SpecReports[i]

View file

@ -1,3 +1,3 @@
package types package types
const VERSION = "2.9.5" const VERSION = "2.22.2"

View file

@ -16,9 +16,9 @@ linters:
disable-all: true disable-all: true
enable: enable:
- asciicheck - asciicheck
- copyloopvar
- depguard - depguard
- exhaustive - exhaustive
- exportloopref
- goimports - goimports
- gofmt # redundant, since gofmt *should* be a no-op after gofumpt - gofmt # redundant, since gofmt *should* be a no-op after gofumpt
- gofumpt - gofumpt
@ -44,8 +44,3 @@ issues:
linters: linters:
- exhaustive - exhaustive
- prealloc - prealloc
- unparam
- path: _test\.go
text: "SA1029:"
linters:
- staticcheck

View file

@ -9,8 +9,7 @@
quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)). quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)).
In addition to these base RFCs, it also implements the following RFCs: In addition to these base RFCs, it also implements the following RFCs:
* Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) * Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221))
* Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)) * Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899))
* QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369)) * QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369))
@ -34,7 +33,6 @@ Detailed documentation can be found on [quic-go.net](https://quic-go.net/docs/).
| [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/apernet/hysteria?style=flat-square) | | [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/apernet/hysteria?style=flat-square) |
| [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications | ![GitHub Repo stars](https://img.shields.io/github/stars/dunglas/mercure?style=flat-square) | | [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications | ![GitHub Repo stars](https://img.shields.io/github/stars/dunglas/mercure?style=flat-square) |
| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | | [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) |
| [reverst](https://github.com/flipt-io/reverst) | Reverse Tunnels in Go over HTTP/3 and QUIC | ![GitHub Repo stars](https://img.shields.io/github/stars/flipt-io/reverst?style=flat-square) |
| [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins | ![GitHub Repo stars](https://img.shields.io/github/stars/roadrunner-server/roadrunner?style=flat-square) | | [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins | ![GitHub Repo stars](https://img.shields.io/github/stars/roadrunner-server/roadrunner?style=flat-square) |
| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | | [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) |
| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | | [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) |

View file

@ -7,8 +7,38 @@ import (
"net" "net"
"github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/protocol"
"github.com/quic-go/quic-go/internal/utils"
"github.com/quic-go/quic-go/logging"
) )
type client struct {
sendConn sendConn
use0RTT bool
packetHandlers packetHandlerManager
onClose func()
tlsConf *tls.Config
config *Config
connIDGenerator ConnectionIDGenerator
srcConnID protocol.ConnectionID
destConnID protocol.ConnectionID
initialPacketNumber protocol.PacketNumber
hasNegotiatedVersion bool
version protocol.Version
handshakeChan chan struct{}
conn quicConn
tracer *logging.ConnectionTracer
tracingID ConnectionTracingID
logger utils.Logger
}
// make it possible to mock connection ID for initial generation in the tests // make it possible to mock connection ID for initial generation in the tests
var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
@ -102,3 +132,120 @@ func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn boo
isSingleUse: true, isSingleUse: true,
}, nil }, nil
} }
func dial(
ctx context.Context,
conn sendConn,
connIDGenerator ConnectionIDGenerator,
packetHandlers packetHandlerManager,
tlsConf *tls.Config,
config *Config,
onClose func(),
use0RTT bool,
) (quicConn, error) {
c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT)
if err != nil {
return nil, err
}
c.packetHandlers = packetHandlers
c.tracingID = nextConnTracingID()
if c.config.Tracer != nil {
c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID)
}
if c.tracer != nil && c.tracer.StartedConnection != nil {
c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID)
}
if err := c.dial(ctx); err != nil {
return nil, err
}
return c.conn, nil
}
func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) {
srcConnID, err := connIDGenerator.GenerateConnectionID()
if err != nil {
return nil, err
}
destConnID, err := generateConnectionIDForInitial()
if err != nil {
return nil, err
}
c := &client{
connIDGenerator: connIDGenerator,
srcConnID: srcConnID,
destConnID: destConnID,
sendConn: sendConn,
use0RTT: use0RTT,
onClose: onClose,
tlsConf: tlsConf,
config: config,
version: config.Versions[0],
handshakeChan: make(chan struct{}),
logger: utils.DefaultLogger.WithPrefix("client"),
}
return c, nil
}
func (c *client) dial(ctx context.Context) error {
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
c.conn = newClientConnection(
context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID),
c.sendConn,
c.packetHandlers,
c.destConnID,
c.srcConnID,
c.connIDGenerator,
c.config,
c.tlsConf,
c.initialPacketNumber,
c.use0RTT,
c.hasNegotiatedVersion,
c.tracer,
c.logger,
c.version,
)
c.packetHandlers.Add(c.srcConnID, c.conn)
errorChan := make(chan error, 1)
recreateChan := make(chan errCloseForRecreating)
go func() {
err := c.conn.run()
var recreateErr *errCloseForRecreating
if errors.As(err, &recreateErr) {
recreateChan <- *recreateErr
return
}
if c.onClose != nil {
c.onClose()
}
errorChan <- err // returns as soon as the connection is closed
}()
// only set when we're using 0-RTT
// Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever.
var earlyConnChan <-chan struct{}
if c.use0RTT {
earlyConnChan = c.conn.earlyConnReady()
}
select {
case <-ctx.Done():
c.conn.destroy(nil)
return context.Cause(ctx)
case err := <-errorChan:
return err
case recreateErr := <-recreateChan:
c.initialPacketNumber = recreateErr.nextPacketNumber
c.version = recreateErr.nextVersion
c.hasNegotiatedVersion = true
return c.dial(ctx)
case <-earlyConnChan:
// ready to send 0-RTT data
return nil
case <-c.conn.HandshakeComplete():
// handshake successfully completed
return nil
}
}

View file

@ -6,8 +6,6 @@ coverage:
- internal/handshake/cipher_suite.go - internal/handshake/cipher_suite.go
- internal/utils/linkedlist/linkedlist.go - internal/utils/linkedlist/linkedlist.go
- internal/testdata - internal/testdata
- logging/connection_tracer_multiplexer.go
- logging/tracer_multiplexer.go
- testutils/ - testutils/
- fuzzing/ - fuzzing/
- metrics/ - metrics/

Some files were not shown because too many files have changed in this diff Show more