mirror of
https://github.com/DNSCrypt/dnscrypt-proxy.git
synced 2025-03-31 11:47:39 +03:00
Update deps, especially quic-go
This commit is contained in:
parent
5a46f0db9a
commit
5d2519e2c1
111 changed files with 7205 additions and 1779 deletions
go.modgo.sum
vendor
github.com
BurntSushi/toml
miekg/dns
quic-go/quic-go/internal/ackhandler
golang.org/x
mod
tools
LICENSE
go
ast
gcexportdata
internal/packagesdriver
packages
types
internal
aliases
astutil/edge
gcimporter
bimport.goexportdata.gogcimporter.goiexport.goiimport.goiimport_go122.gonewInterface10.gonewInterface11.gopredeclared.gosupport.gosupport_go118.gounified_no.gounified_yes.goureader_yes.go
gocommand
imports
modindex
packagesinternal
pkgbits
stdlib
tokeninternal
typeparams
typesinternal
10
go.mod
10
go.mod
|
@ -3,7 +3,7 @@ module github.com/dnscrypt/dnscrypt-proxy
|
|||
go 1.24.1
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.0
|
||||
github.com/BurntSushi/toml v1.5.0
|
||||
github.com/VividCortex/ewma v1.2.0
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
|
||||
|
@ -18,10 +18,10 @@ require (
|
|||
github.com/k-sone/critbitgo v1.4.0
|
||||
github.com/kardianos/service v1.2.2
|
||||
github.com/lifenjoiner/dhcpdns v0.0.7
|
||||
github.com/miekg/dns v1.1.63
|
||||
github.com/miekg/dns v1.1.64
|
||||
github.com/opencoff/go-sieve v0.2.1
|
||||
github.com/powerman/check v1.8.0
|
||||
github.com/quic-go/quic-go v0.50.0
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/sys v0.31.0
|
||||
|
@ -43,10 +43,10 @@ require (
|
|||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/tools v0.22.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.56.3 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
|
|
20
go.sum
20
go.sum
|
@ -1,5 +1,5 @@
|
|||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
@ -57,8 +57,8 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
|
|||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||
github.com/lifenjoiner/dhcpdns v0.0.7 h1:VJM2aFWHU9V7M5v4UYYNaHhIHZkbdvSI6WGGpq6/TNQ=
|
||||
github.com/lifenjoiner/dhcpdns v0.0.7/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ=
|
||||
github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||
|
@ -75,8 +75,8 @@ github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+
|
|||
github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo=
|
||||
github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
|
||||
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
|
||||
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
||||
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
||||
|
@ -91,8 +91,8 @@ golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
|||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
|
@ -106,8 +106,8 @@ golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
|||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
|
|
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
|
@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
|
|||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
Documentation: https://godocs.io/github.com/BurntSushi/toml
|
||||
Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
|
||||
|
||||
See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
|
|
33
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
33
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
|
@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
|
|||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// markDecodedRecursive is a helper to mark any key under the given tmap as
|
||||
// decoded, recursing as needed
|
||||
func markDecodedRecursive(md *MetaData, tmap map[string]any) {
|
||||
for key := range tmap {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
if tmap, ok := tmap[key].(map[string]any); ok {
|
||||
md.context = append(md.context, key)
|
||||
markDecodedRecursive(md, tmap)
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
|
@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
|
|||
if err != nil {
|
||||
return md.parseErr(err)
|
||||
}
|
||||
// Assume the Unmarshaler decoded everything, so mark all keys under
|
||||
// this table as decoded.
|
||||
if tmap, ok := data.(map[string]any); ok {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
if aot, ok := data.([]map[string]any); ok {
|
||||
for _, tmap := range aot {
|
||||
markDecodedRecursive(md, tmap)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
|
@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error {
|
|||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
d := string(md.data)
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos.withCol(d),
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
input: d,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
46
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
46
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
|
@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
var mapKeysDirect, mapKeysSub []reflect.Value
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
mapKeysSub = append(mapKeysSub, mapKey)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
mapKeysDirect = append(mapKeysDirect, mapKey)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
|
||||
sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
|
||||
for i, mapKey := range mapKeys {
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
val := eindirect(rv.MapIndex(mapKey))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{mapKey}, val, true)
|
||||
enc.writeKeyValue(Key{mapKey.String()}, val, true)
|
||||
if trailC || i != len(mapKeys)-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
enc.encode(key.add(mapKey), val)
|
||||
enc.encode(key.add(mapKey.String()), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
}
|
||||
}
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
|
@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
if is32Bit {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
}
|
||||
// Need to make a copy because ... ehm, I don't know why... I guess
|
||||
// allocating a new array can cause it to fail(?)
|
||||
//
|
||||
// Done for: https://github.com/BurntSushi/toml/issues/430
|
||||
// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
|
@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
writeFields := func(fields [][]int) {
|
||||
writeFields := func(fields [][]int, totalFields int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
|
@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
if inline {
|
||||
enc.writeKeyValue(Key{keyName}, fieldVal, true)
|
||||
if fieldIndex[0] != len(fields)-1 {
|
||||
if fieldIndex[0] != totalFields-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
} else {
|
||||
|
@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
if inline {
|
||||
enc.wf("{")
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
|
||||
l := len(fieldsDirect) + len(fieldsSub)
|
||||
writeFields(fieldsDirect, l)
|
||||
writeFields(fieldsSub, l)
|
||||
if inline {
|
||||
enc.wf("}")
|
||||
}
|
||||
|
|
69
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
69
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
|
@ -67,21 +67,36 @@ type ParseError struct {
|
|||
// Position of an error.
|
||||
type Position struct {
|
||||
Line int // Line number, starting at 1.
|
||||
Col int // Error column, starting at 1.
|
||||
Start int // Start of error, as byte offset starting at 0.
|
||||
Len int // Lenght in bytes.
|
||||
Len int // Length of the error in bytes.
|
||||
}
|
||||
|
||||
func (p Position) withCol(tomlFile string) Position {
|
||||
var (
|
||||
pos int
|
||||
lines = strings.Split(tomlFile, "\n")
|
||||
)
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= p.Start {
|
||||
p.Col = p.Start - pos + 1
|
||||
if p.Col < 1 { // Should never happen, but just in case.
|
||||
p.Col = 1
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (pe ParseError) Error() string {
|
||||
msg := pe.Message
|
||||
if msg == "" { // Error from errorf()
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
if pe.LastKey == "" {
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
|
||||
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
|
||||
}
|
||||
return fmt.Sprintf("toml: line %d (last key %q): %s",
|
||||
pe.Position.Line, pe.LastKey, msg)
|
||||
pe.Position.Line, pe.LastKey, pe.Message)
|
||||
}
|
||||
|
||||
// ErrorWithPosition returns the error with detailed location context.
|
||||
|
@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
|
|||
return pe.Error()
|
||||
}
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
col = pe.column(lines)
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
|
||||
msg := pe.Message
|
||||
if msg == "" {
|
||||
msg = pe.err.Error()
|
||||
}
|
||||
|
||||
// TODO: don't show control characters as literals? This may not show up
|
||||
// well everywhere.
|
||||
|
||||
var (
|
||||
lines = strings.Split(pe.input, "\n")
|
||||
b = new(strings.Builder)
|
||||
)
|
||||
if pe.Position.Len == 1 {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
|
||||
msg, pe.Position.Line, col+1)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col)
|
||||
} else {
|
||||
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
|
||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
|
||||
|
@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
|
|||
diff := len(expanded) - len(lines[pe.Position.Line-1])
|
||||
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
|
@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
|
|||
return m
|
||||
}
|
||||
|
||||
func (pe ParseError) column(lines []string) int {
|
||||
var pos, col int
|
||||
for i := range lines {
|
||||
ll := len(lines[i]) + 1 // +1 for the removed newline
|
||||
if pos+ll >= pe.Position.Start {
|
||||
col = pe.Position.Start - pos
|
||||
if col < 0 { // Should never happen, but just in case.
|
||||
col = 0
|
||||
}
|
||||
break
|
||||
}
|
||||
pos += ll
|
||||
}
|
||||
|
||||
return col
|
||||
}
|
||||
|
||||
func expandTab(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
|
|
33
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
33
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
|
@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
|||
func (lx *lexer) errorf(format string, values ...any) stateFn {
|
||||
if lx.atEOF {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
|
||||
pos.Line--
|
||||
}
|
||||
pos.Len = 1
|
||||
pos.Start = lx.pos - 1
|
||||
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
|
||||
|
@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
|
|||
lx.emit(itemKeyEnd)
|
||||
return lexSkip(lx, lexValue)
|
||||
default:
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected '.' or '=', but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
|
|||
if r == eof {
|
||||
return lx.errorf("unexpected EOF; expected value")
|
||||
}
|
||||
if r == '\n' {
|
||||
return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
|
@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
|
|||
case 'x':
|
||||
r = lx.peek()
|
||||
if !isHex(r) {
|
||||
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
||||
lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
|
||||
}
|
||||
return lexHexInteger
|
||||
}
|
||||
|
@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
|
|||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
|
||||
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||
if tomlNext {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-' ||
|
||||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||
(r >= 0x037f && r <= 0x1fff) ||
|
||||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||
(r >= 0x10000 && r <= 0xeffff)
|
||||
}
|
||||
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-'
|
||||
return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') || r == '_' || r == '-'
|
||||
}
|
||||
|
|
3
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
3
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
|
@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
|
|||
|
||||
// Like append(), but only increase the cap by 1.
|
||||
func (k Key) add(piece string) Key {
|
||||
if cap(k) > len(k) {
|
||||
return append(k, piece)
|
||||
}
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
|
|
17
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
17
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
|
@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
|
|||
// it anyway.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
|
||||
data = data[2:]
|
||||
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
|
||||
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
|
||||
data = data[3:]
|
||||
}
|
||||
|
@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
|
|||
if i := strings.IndexRune(data[:ex], 0); i > -1 {
|
||||
return nil, ParseError{
|
||||
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
|
||||
Position: Position{Line: 1, Start: i, Len: 1},
|
||||
Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
|
||||
Line: 1,
|
||||
input: data,
|
||||
}
|
||||
|
@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
|
|||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
Message: err.Error(),
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
|
@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
|
|||
func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
|
@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
|
|||
func (p *parser) panicf(format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
Position: p.pos.withCol(p.lx.input),
|
||||
Line: p.pos.Line,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
|
@ -123,10 +123,11 @@ func (p *parser) next() item {
|
|||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
Position: it.pos,
|
||||
Message: it.err.Error(),
|
||||
err: it.err,
|
||||
Position: it.pos.withCol(p.lx.input),
|
||||
Line: it.pos.Line,
|
||||
LastKey: p.current(),
|
||||
err: it.err,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// isHexis a superset of all the permissable characters surrounding an
|
||||
// isHex is a superset of all the permissible characters surrounding an
|
||||
// underscore.
|
||||
accept = isHex(r)
|
||||
}
|
||||
|
|
5
vendor/github.com/miekg/dns/README.md
generated
vendored
5
vendor/github.com/miekg/dns/README.md
generated
vendored
|
@ -86,7 +86,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
|||
* https://linuxcontainers.org/incus/
|
||||
* https://ifconfig.es
|
||||
* https://github.com/zmap/zdns
|
||||
|
||||
* https://framagit.org/bortzmeyer/check-soa
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
|
@ -193,6 +193,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
|||
* 9460 - Service Binding and Parameter Specification via the DNS
|
||||
* 9461 - Service Binding Mapping for DNS Servers
|
||||
* 9462 - Discovery of Designated Resolvers
|
||||
* 9460 - SVCB and HTTPS Records
|
||||
* 9606 - DNS Resolver Information
|
||||
* Draft - Compact Denial of Existence in DNSSEC
|
||||
|
||||
## Loosely Based Upon
|
||||
|
||||
|
|
37
vendor/github.com/miekg/dns/edns.go
generated
vendored
37
vendor/github.com/miekg/dns/edns.go
generated
vendored
|
@ -27,6 +27,7 @@ const (
|
|||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
|
||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
|
||||
_DO = 1 << 15 // DNSSEC OK
|
||||
_CO = 1 << 14 // Compact Answers OK
|
||||
)
|
||||
|
||||
// makeDataOpt is used to unpack the EDNS0 option(s) from a message.
|
||||
|
@ -75,7 +76,11 @@ type OPT struct {
|
|||
func (rr *OPT) String() string {
|
||||
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
|
||||
if rr.Do() {
|
||||
s += "flags: do; "
|
||||
if rr.Co() {
|
||||
s += "flags: do, co; "
|
||||
} else {
|
||||
s += "flags: do; "
|
||||
}
|
||||
} else {
|
||||
s += "flags:; "
|
||||
}
|
||||
|
@ -195,14 +200,34 @@ func (rr *OPT) SetDo(do ...bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used.
|
||||
func (rr *OPT) Z() uint16 {
|
||||
return uint16(rr.Hdr.Ttl & 0x7FFF)
|
||||
// Co returns the value of the CO (Compact Answers OK) bit.
|
||||
func (rr *OPT) Co() bool {
|
||||
return rr.Hdr.Ttl&_CO == _CO
|
||||
}
|
||||
|
||||
// SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used.
|
||||
// SetCo sets the CO (Compact Answers OK) bit.
|
||||
// If we pass an argument, set the CO bit to that value.
|
||||
// It is possible to pass 2 or more arguments, but they will be ignored.
|
||||
func (rr *OPT) SetCo(co ...bool) {
|
||||
if len(co) == 1 {
|
||||
if co[0] {
|
||||
rr.Hdr.Ttl |= _CO
|
||||
} else {
|
||||
rr.Hdr.Ttl &^= _CO
|
||||
}
|
||||
} else {
|
||||
rr.Hdr.Ttl |= _CO
|
||||
}
|
||||
}
|
||||
|
||||
// Z returns the Z part of the OPT RR as a uint16 with only the 14 least significant bits used.
|
||||
func (rr *OPT) Z() uint16 {
|
||||
return uint16(rr.Hdr.Ttl & 0x3FFF)
|
||||
}
|
||||
|
||||
// SetZ sets the Z part of the OPT RR, note only the 14 least significant bits of z are used.
|
||||
func (rr *OPT) SetZ(z uint16) {
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF)
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&^0x3FFF | uint32(z&0x3FFF)
|
||||
}
|
||||
|
||||
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
|
||||
|
|
10
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
10
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
|
@ -1620,6 +1620,16 @@ func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Uses the same format as TXT
|
||||
func (rr *RESINFO) parse(c *zlexer, o string) *ParseError {
|
||||
s, e := endingToTxtSlice(c, "bad RESINFO Resinfo")
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
rr.Txt = s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rr *URI) parse(c *zlexer, o string) *ParseError {
|
||||
l, _ := c.Next()
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
|
|
12
vendor/github.com/miekg/dns/svcb.go
generated
vendored
12
vendor/github.com/miekg/dns/svcb.go
generated
vendored
|
@ -214,11 +214,7 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue {
|
|||
}
|
||||
}
|
||||
|
||||
// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-08).
|
||||
//
|
||||
// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
|
||||
// The API, including constants and types related to SVCBKeyValues, may
|
||||
// change in future versions in accordance with the latest drafts.
|
||||
// SVCB RR. See RFC 9460.
|
||||
type SVCB struct {
|
||||
Hdr RR_Header
|
||||
Priority uint16 // If zero, Value must be empty or discarded by the user of this library
|
||||
|
@ -226,12 +222,8 @@ type SVCB struct {
|
|||
Value []SVCBKeyValue `dns:"pairs"`
|
||||
}
|
||||
|
||||
// HTTPS RR. Everything valid for SVCB applies to HTTPS as well.
|
||||
// HTTPS RR. See RFC 9460. Everything valid for SVCB applies to HTTPS as well.
|
||||
// Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols.
|
||||
//
|
||||
// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
|
||||
// The API, including constants and types related to SVCBKeyValues, may
|
||||
// change in future versions in accordance with the latest drafts.
|
||||
type HTTPS struct {
|
||||
SVCB
|
||||
}
|
||||
|
|
10
vendor/github.com/miekg/dns/types.go
generated
vendored
10
vendor/github.com/miekg/dns/types.go
generated
vendored
|
@ -101,6 +101,7 @@ const (
|
|||
TypeCAA uint16 = 257
|
||||
TypeAVC uint16 = 258
|
||||
TypeAMTRELAY uint16 = 260
|
||||
TypeRESINFO uint16 = 261
|
||||
|
||||
TypeTKEY uint16 = 249
|
||||
TypeTSIG uint16 = 250
|
||||
|
@ -1508,6 +1509,15 @@ func (rr *ZONEMD) String() string {
|
|||
" " + rr.Digest
|
||||
}
|
||||
|
||||
// RESINFO RR. See RFC 9606.
|
||||
|
||||
type RESINFO struct {
|
||||
Hdr RR_Header
|
||||
Txt []string `dns:"txt"`
|
||||
}
|
||||
|
||||
func (rr *RESINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
|
||||
|
||||
// APL RR. See RFC 3123.
|
||||
type APL struct {
|
||||
Hdr RR_Header
|
||||
|
|
2
vendor/github.com/miekg/dns/version.go
generated
vendored
2
vendor/github.com/miekg/dns/version.go
generated
vendored
|
@ -3,7 +3,7 @@ package dns
|
|||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = v{1, 1, 63}
|
||||
var Version = v{1, 1, 64}
|
||||
|
||||
// v holds the version of this library.
|
||||
type v struct {
|
||||
|
|
17
vendor/github.com/miekg/dns/zduplicate.go
generated
vendored
17
vendor/github.com/miekg/dns/zduplicate.go
generated
vendored
|
@ -957,6 +957,23 @@ func (r1 *PX) isDuplicate(_r2 RR) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r1 *RESINFO) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*RESINFO)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_ = r2
|
||||
if len(r1.Txt) != len(r2.Txt) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(r1.Txt); i++ {
|
||||
if r1.Txt[i] != r2.Txt[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r1 *RFC3597) isDuplicate(_r2 RR) bool {
|
||||
r2, ok := _r2.(*RFC3597)
|
||||
if !ok {
|
||||
|
|
19
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
19
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
|
@ -762,6 +762,14 @@ func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress boo
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *RESINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packStringTxt(rr.Txt, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
|
||||
off, err = packStringHex(rr.Rdata, msg, off)
|
||||
if err != nil {
|
||||
|
@ -2353,6 +2361,17 @@ func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
|
|||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *RESINFO) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
rr.Txt, off, err = unpackStringTxt(msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
|
15
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
15
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
|
@ -66,6 +66,7 @@ var TypeToRR = map[uint16]func() RR{
|
|||
TypeOPT: func() RR { return new(OPT) },
|
||||
TypePTR: func() RR { return new(PTR) },
|
||||
TypePX: func() RR { return new(PX) },
|
||||
TypeRESINFO: func() RR { return new(RESINFO) },
|
||||
TypeRKEY: func() RR { return new(RKEY) },
|
||||
TypeRP: func() RR { return new(RP) },
|
||||
TypeRRSIG: func() RR { return new(RRSIG) },
|
||||
|
@ -154,6 +155,7 @@ var TypeToString = map[uint16]string{
|
|||
TypeOPT: "OPT",
|
||||
TypePTR: "PTR",
|
||||
TypePX: "PX",
|
||||
TypeRESINFO: "RESINFO",
|
||||
TypeRKEY: "RKEY",
|
||||
TypeRP: "RP",
|
||||
TypeRRSIG: "RRSIG",
|
||||
|
@ -238,6 +240,7 @@ func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
|
|||
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *PX) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RESINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *RP) Header() *RR_Header { return &rr.Hdr }
|
||||
|
@ -622,6 +625,14 @@ func (rr *PX) len(off int, compression map[string]struct{}) int {
|
|||
return l
|
||||
}
|
||||
|
||||
func (rr *RESINFO) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
for _, x := range rr.Txt {
|
||||
l += len(x) + 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
|
||||
l := rr.Hdr.len(off, compression)
|
||||
l += len(rr.Rdata) / 2
|
||||
|
@ -1148,6 +1159,10 @@ func (rr *PX) copy() RR {
|
|||
}
|
||||
}
|
||||
|
||||
func (rr *RESINFO) copy() RR {
|
||||
return &RESINFO{rr.Hdr, cloneSlice(rr.Txt)}
|
||||
}
|
||||
|
||||
func (rr *RFC3597) copy() RR {
|
||||
return &RFC3597{rr.Hdr, rr.Rdata}
|
||||
}
|
||||
|
|
7
vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
generated
vendored
7
vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
generated
vendored
|
@ -460,10 +460,10 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
|
|||
}
|
||||
if p.isPathProbePacket {
|
||||
probePacket := pnSpace.history.RemovePathProbe(p.PacketNumber)
|
||||
if probePacket == nil {
|
||||
panic(fmt.Sprintf("path probe doesn't exist: %d", p.PacketNumber))
|
||||
// the probe packet might already have been declared lost
|
||||
if probePacket != nil {
|
||||
h.ackedPackets = append(h.ackedPackets, probePacket)
|
||||
}
|
||||
h.ackedPackets = append(h.ackedPackets, probePacket)
|
||||
continue
|
||||
}
|
||||
h.ackedPackets = append(h.ackedPackets, p)
|
||||
|
@ -658,7 +658,6 @@ func (h *sentPacketHandler) detectLostPathProbes(now time.Time) {
|
|||
for _, f := range p.Frames {
|
||||
f.Handler.OnLost(f.Frame)
|
||||
}
|
||||
h.appDataPackets.history.Remove(p.PacketNumber)
|
||||
h.appDataPackets.history.RemovePathProbe(p.PacketNumber)
|
||||
}
|
||||
}
|
||||
|
|
4
vendor/golang.org/x/mod/LICENSE
generated
vendored
4
vendor/golang.org/x/mod/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
|
12
vendor/golang.org/x/mod/modfile/read.go
generated
vendored
12
vendor/golang.org/x/mod/modfile/read.go
generated
vendored
|
@ -226,8 +226,9 @@ func (x *FileSyntax) Cleanup() {
|
|||
continue
|
||||
}
|
||||
if ww == 1 && len(stmt.RParen.Comments.Before) == 0 {
|
||||
// Collapse block into single line.
|
||||
line := &Line{
|
||||
// Collapse block into single line but keep the Line reference used by the
|
||||
// parsed File structure.
|
||||
*stmt.Line[0] = Line{
|
||||
Comments: Comments{
|
||||
Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
|
||||
Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
|
||||
|
@ -235,7 +236,7 @@ func (x *FileSyntax) Cleanup() {
|
|||
},
|
||||
Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
|
||||
}
|
||||
x.Stmt[w] = line
|
||||
x.Stmt[w] = stmt.Line[0]
|
||||
w++
|
||||
continue
|
||||
}
|
||||
|
@ -876,6 +877,11 @@ func (in *input) parseLineBlock(start Position, token []string, lparen token) *L
|
|||
in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
|
||||
case ')':
|
||||
rparen := in.lex()
|
||||
// Don't preserve blank lines (denoted by a single empty comment, added above)
|
||||
// at the end of the block.
|
||||
if len(comments) == 1 && comments[0] == (Comment{}) {
|
||||
comments = nil
|
||||
}
|
||||
x.RParen.Before = comments
|
||||
x.RParen.Pos = rparen.pos
|
||||
if !in.peek().isEOL() {
|
||||
|
|
80
vendor/golang.org/x/mod/modfile/rule.go
generated
vendored
80
vendor/golang.org/x/mod/modfile/rule.go
generated
vendored
|
@ -43,6 +43,7 @@ type File struct {
|
|||
Exclude []*Exclude
|
||||
Replace []*Replace
|
||||
Retract []*Retract
|
||||
Tool []*Tool
|
||||
|
||||
Syntax *FileSyntax
|
||||
}
|
||||
|
@ -93,6 +94,12 @@ type Retract struct {
|
|||
Syntax *Line
|
||||
}
|
||||
|
||||
// A Tool is a single tool statement.
|
||||
type Tool struct {
|
||||
Path string
|
||||
Syntax *Line
|
||||
}
|
||||
|
||||
// A VersionInterval represents a range of versions with upper and lower bounds.
|
||||
// Intervals are closed: both bounds are included. When Low is equal to High,
|
||||
// the interval may refer to a single version ('v1.2.3') or an interval
|
||||
|
@ -297,7 +304,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse
|
|||
})
|
||||
}
|
||||
continue
|
||||
case "module", "godebug", "require", "exclude", "replace", "retract":
|
||||
case "module", "godebug", "require", "exclude", "replace", "retract", "tool":
|
||||
for _, l := range x.Line {
|
||||
f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
|
||||
}
|
||||
|
@ -509,6 +516,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
|
|||
Syntax: line,
|
||||
}
|
||||
f.Retract = append(f.Retract, retract)
|
||||
|
||||
case "tool":
|
||||
if len(args) != 1 {
|
||||
errorf("tool directive expects exactly one argument")
|
||||
return
|
||||
}
|
||||
s, err := parseString(&args[0])
|
||||
if err != nil {
|
||||
errorf("invalid quoted string: %v", err)
|
||||
return
|
||||
}
|
||||
f.Tool = append(f.Tool, &Tool{
|
||||
Path: s,
|
||||
Syntax: line,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1567,6 +1589,36 @@ func (f *File) DropRetract(vi VersionInterval) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AddTool adds a new tool directive with the given path.
|
||||
// It does nothing if the tool line already exists.
|
||||
func (f *File) AddTool(path string) error {
|
||||
for _, t := range f.Tool {
|
||||
if t.Path == path {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
f.Tool = append(f.Tool, &Tool{
|
||||
Path: path,
|
||||
Syntax: f.Syntax.addLine(nil, "tool", path),
|
||||
})
|
||||
|
||||
f.SortBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTool removes a tool directive with the given path.
|
||||
// It does nothing if no such tool directive exists.
|
||||
func (f *File) DropTool(path string) error {
|
||||
for _, t := range f.Tool {
|
||||
if t.Path == path {
|
||||
t.Syntax.markRemoved()
|
||||
*t = Tool{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) SortBlocks() {
|
||||
f.removeDups() // otherwise sorting is unsafe
|
||||
|
||||
|
@ -1593,9 +1645,9 @@ func (f *File) SortBlocks() {
|
|||
}
|
||||
}
|
||||
|
||||
// removeDups removes duplicate exclude and replace directives.
|
||||
// removeDups removes duplicate exclude, replace and tool directives.
|
||||
//
|
||||
// Earlier exclude directives take priority.
|
||||
// Earlier exclude and tool directives take priority.
|
||||
//
|
||||
// Later replace directives take priority.
|
||||
//
|
||||
|
@ -1605,10 +1657,10 @@ func (f *File) SortBlocks() {
|
|||
// retract directives are not de-duplicated since comments are
|
||||
// meaningful, and versions may be retracted multiple times.
|
||||
func (f *File) removeDups() {
|
||||
removeDups(f.Syntax, &f.Exclude, &f.Replace)
|
||||
removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool)
|
||||
}
|
||||
|
||||
func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
|
||||
func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) {
|
||||
kill := make(map[*Line]bool)
|
||||
|
||||
// Remove duplicate excludes.
|
||||
|
@ -1649,6 +1701,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
|
|||
}
|
||||
*replace = repl
|
||||
|
||||
if tool != nil {
|
||||
haveTool := make(map[string]bool)
|
||||
for _, t := range *tool {
|
||||
if haveTool[t.Path] {
|
||||
kill[t.Syntax] = true
|
||||
continue
|
||||
}
|
||||
haveTool[t.Path] = true
|
||||
}
|
||||
var newTool []*Tool
|
||||
for _, t := range *tool {
|
||||
if !kill[t.Syntax] {
|
||||
newTool = append(newTool, t)
|
||||
}
|
||||
}
|
||||
*tool = newTool
|
||||
}
|
||||
|
||||
// Duplicate require and retract directives are not removed.
|
||||
|
||||
// Drop killed statements from the syntax tree.
|
||||
|
|
2
vendor/golang.org/x/mod/modfile/work.go
generated
vendored
2
vendor/golang.org/x/mod/modfile/work.go
generated
vendored
|
@ -331,5 +331,5 @@ func (f *WorkFile) SortBlocks() {
|
|||
// retract directives are not de-duplicated since comments are
|
||||
// meaningful, and versions may be retracted multiple times.
|
||||
func (f *WorkFile) removeDups() {
|
||||
removeDups(f.Syntax, nil, &f.Replace)
|
||||
removeDups(f.Syntax, nil, &f.Replace, nil)
|
||||
}
|
||||
|
|
2
vendor/golang.org/x/mod/module/module.go
generated
vendored
2
vendor/golang.org/x/mod/module/module.go
generated
vendored
|
@ -506,7 +506,6 @@ var badWindowsNames = []string{
|
|||
"PRN",
|
||||
"AUX",
|
||||
"NUL",
|
||||
"COM0",
|
||||
"COM1",
|
||||
"COM2",
|
||||
"COM3",
|
||||
|
@ -516,7 +515,6 @@ var badWindowsNames = []string{
|
|||
"COM7",
|
||||
"COM8",
|
||||
"COM9",
|
||||
"LPT0",
|
||||
"LPT1",
|
||||
"LPT2",
|
||||
"LPT3",
|
||||
|
|
4
vendor/golang.org/x/tools/LICENSE
generated
vendored
4
vendor/golang.org/x/tools/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
|
24
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
24
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
|
@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
|
|||
|
||||
// Does augmented child strictly contain [start, end)?
|
||||
if augPos <= start && end <= augEnd {
|
||||
_, isToken := child.(tokenNode)
|
||||
return isToken || visit(child)
|
||||
if is[tokenNode](child) {
|
||||
return true
|
||||
}
|
||||
|
||||
// childrenOf elides the FuncType node beneath FuncDecl.
|
||||
// Add it back here for TypeParams, Params, Results,
|
||||
// all FieldLists). But we don't add it back for the "func" token
|
||||
// even though it is is the tree at FuncDecl.Type.Func.
|
||||
if decl, ok := node.(*ast.FuncDecl); ok {
|
||||
if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv {
|
||||
path = append(path, decl.Type)
|
||||
}
|
||||
}
|
||||
|
||||
return visit(child)
|
||||
}
|
||||
|
||||
// Does [start, end) overlap multiple children?
|
||||
|
@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node {
|
|||
//
|
||||
// As a workaround, we inline the case for FuncType
|
||||
// here and order things correctly.
|
||||
// We also need to insert the elided FuncType just
|
||||
// before the 'visit' recursion.
|
||||
//
|
||||
children = nil // discard ast.Walk(FuncDecl) info subtrees
|
||||
children = append(children, tok(n.Type.Func, len("func")))
|
||||
|
@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string {
|
|||
}
|
||||
panic(fmt.Sprintf("unexpected node type: %T", n))
|
||||
}
|
||||
|
||||
func is[T any](x any) bool {
|
||||
_, ok := x.(T)
|
||||
return ok
|
||||
}
|
||||
|
|
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
|
@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
|
|||
}
|
||||
|
||||
// UsesImport reports whether a given import is used.
|
||||
// The provided File must have been parsed with syntactic object resolution
|
||||
// (not using go/parser.SkipObjectResolution).
|
||||
func UsesImport(f *ast.File, path string) (used bool) {
|
||||
if f.Scope == nil {
|
||||
panic("file f was not parsed with syntactic object resolution")
|
||||
}
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return
|
||||
|
|
11
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
11
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
|
@ -7,12 +7,5 @@ package astutil
|
|||
import "go/ast"
|
||||
|
||||
// Unparen returns e with any enclosing parentheses stripped.
|
||||
func Unparen(e ast.Expr) ast.Expr {
|
||||
for {
|
||||
p, ok := e.(*ast.ParenExpr)
|
||||
if !ok {
|
||||
return e
|
||||
}
|
||||
e = p.X
|
||||
}
|
||||
}
|
||||
// Deprecated: use [ast.Unparen].
|
||||
func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
|
||||
|
|
152
vendor/golang.org/x/tools/go/ast/inspector/inspector.go
generated
vendored
152
vendor/golang.org/x/tools/go/ast/inspector/inspector.go
generated
vendored
|
@ -36,6 +36,9 @@ package inspector
|
|||
|
||||
import (
|
||||
"go/ast"
|
||||
_ "unsafe"
|
||||
|
||||
"golang.org/x/tools/internal/astutil/edge"
|
||||
)
|
||||
|
||||
// An Inspector provides methods for inspecting
|
||||
|
@ -44,6 +47,24 @@ type Inspector struct {
|
|||
events []event
|
||||
}
|
||||
|
||||
//go:linkname events
|
||||
func events(in *Inspector) []event { return in.events }
|
||||
|
||||
func packEdgeKindAndIndex(ek edge.Kind, index int) int32 {
|
||||
return int32(uint32(index+1)<<7 | uint32(ek))
|
||||
}
|
||||
|
||||
// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within
|
||||
// an []ast.Node slice) from the parent field of a pop event.
|
||||
//
|
||||
//go:linkname unpackEdgeKindAndIndex
|
||||
func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) {
|
||||
// The "parent" field of a pop node holds the
|
||||
// edge Kind in the lower 7 bits and the index+1
|
||||
// in the upper 25.
|
||||
return edge.Kind(x & 0x7f), int(x>>7) - 1
|
||||
}
|
||||
|
||||
// New returns an Inspector for the specified syntax trees.
|
||||
func New(files []*ast.File) *Inspector {
|
||||
return &Inspector{traverse(files)}
|
||||
|
@ -52,9 +73,10 @@ func New(files []*ast.File) *Inspector {
|
|||
// An event represents a push or a pop
|
||||
// of an ast.Node during a traversal.
|
||||
type event struct {
|
||||
node ast.Node
|
||||
typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
|
||||
index int // index of corresponding push or pop event
|
||||
node ast.Node
|
||||
typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
|
||||
index int32 // index of corresponding push or pop event
|
||||
parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only)
|
||||
}
|
||||
|
||||
// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
|
||||
|
@ -73,8 +95,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
|
|||
// check, Preorder is almost twice as fast as Nodes. The two
|
||||
// features seem to contribute similar slowdowns (~1.4x each).
|
||||
|
||||
// This function is equivalent to the PreorderSeq call below,
|
||||
// but to avoid the additional dynamic call (which adds 13-35%
|
||||
// to the benchmarks), we expand it out.
|
||||
//
|
||||
// in.PreorderSeq(types...)(func(n ast.Node) bool {
|
||||
// f(n)
|
||||
// return true
|
||||
// })
|
||||
|
||||
mask := maskOf(types)
|
||||
for i := 0; i < len(in.events); {
|
||||
for i := int32(0); i < int32(len(in.events)); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
|
@ -104,7 +135,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
|
|||
// matches an element of the types slice.
|
||||
func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
|
||||
mask := maskOf(types)
|
||||
for i := 0; i < len(in.events); {
|
||||
for i := int32(0); i < int32(len(in.events)); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
|
@ -138,7 +169,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc
|
|||
func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
|
||||
mask := maskOf(types)
|
||||
var stack []ast.Node
|
||||
for i := 0; i < len(in.events); {
|
||||
for i := int32(0); i < int32(len(in.events)); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
|
@ -171,50 +202,83 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s
|
|||
// traverse builds the table of events representing a traversal.
|
||||
func traverse(files []*ast.File) []event {
|
||||
// Preallocate approximate number of events
|
||||
// based on source file extent.
|
||||
// based on source file extent of the declarations.
|
||||
// (We use End-Pos not FileStart-FileEnd to neglect
|
||||
// the effect of long doc comments.)
|
||||
// This makes traverse faster by 4x (!).
|
||||
var extent int
|
||||
for _, f := range files {
|
||||
extent += int(f.End() - f.Pos())
|
||||
}
|
||||
// This estimate is based on the net/http package.
|
||||
capacity := extent * 33 / 100
|
||||
if capacity > 1e6 {
|
||||
capacity = 1e6 // impose some reasonable maximum
|
||||
capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M)
|
||||
|
||||
v := &visitor{
|
||||
events: make([]event, 0, capacity),
|
||||
stack: []item{{index: -1}}, // include an extra event so file nodes have a parent
|
||||
}
|
||||
events := make([]event, 0, capacity)
|
||||
|
||||
var stack []event
|
||||
stack = append(stack, event{}) // include an extra event so file nodes have a parent
|
||||
for _, f := range files {
|
||||
ast.Inspect(f, func(n ast.Node) bool {
|
||||
if n != nil {
|
||||
// push
|
||||
ev := event{
|
||||
node: n,
|
||||
typ: 0, // temporarily used to accumulate type bits of subtree
|
||||
index: len(events), // push event temporarily holds own index
|
||||
}
|
||||
stack = append(stack, ev)
|
||||
events = append(events, ev)
|
||||
} else {
|
||||
// pop
|
||||
top := len(stack) - 1
|
||||
ev := stack[top]
|
||||
typ := typeOf(ev.node)
|
||||
push := ev.index
|
||||
parent := top - 1
|
||||
|
||||
events[push].typ = typ // set type of push
|
||||
stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
|
||||
events[push].index = len(events) // make push refer to pop
|
||||
|
||||
stack = stack[:top]
|
||||
events = append(events, ev)
|
||||
}
|
||||
return true
|
||||
})
|
||||
for _, file := range files {
|
||||
walk(v, edge.Invalid, -1, file)
|
||||
}
|
||||
|
||||
return events
|
||||
return v.events
|
||||
}
|
||||
|
||||
type visitor struct {
|
||||
events []event
|
||||
stack []item
|
||||
}
|
||||
|
||||
type item struct {
|
||||
index int32 // index of current node's push event
|
||||
parentIndex int32 // index of parent node's push event
|
||||
typAccum uint64 // accumulated type bits of current node's descendents
|
||||
edgeKindAndIndex int32 // edge.Kind and index, bit packed
|
||||
}
|
||||
|
||||
func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) {
|
||||
var (
|
||||
index = int32(len(v.events))
|
||||
parentIndex = v.stack[len(v.stack)-1].index
|
||||
)
|
||||
v.events = append(v.events, event{
|
||||
node: node,
|
||||
parent: parentIndex,
|
||||
typ: typeOf(node),
|
||||
index: 0, // (pop index is set later by visitor.pop)
|
||||
})
|
||||
v.stack = append(v.stack, item{
|
||||
index: index,
|
||||
parentIndex: parentIndex,
|
||||
edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex),
|
||||
})
|
||||
|
||||
// 2B nodes ought to be enough for anyone!
|
||||
if int32(len(v.events)) < 0 {
|
||||
panic("event index exceeded int32")
|
||||
}
|
||||
|
||||
// 32M elements in an []ast.Node ought to be enough for anyone!
|
||||
if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex {
|
||||
panic("Node slice index exceeded uint25")
|
||||
}
|
||||
}
|
||||
|
||||
func (v *visitor) pop(node ast.Node) {
|
||||
top := len(v.stack) - 1
|
||||
current := v.stack[top]
|
||||
|
||||
push := &v.events[current.index]
|
||||
parent := &v.stack[top-1]
|
||||
|
||||
push.index = int32(len(v.events)) // make push event refer to pop
|
||||
parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent
|
||||
|
||||
v.stack = v.stack[:top]
|
||||
|
||||
v.events = append(v.events, event{
|
||||
node: node,
|
||||
typ: current.typAccum,
|
||||
index: current.index,
|
||||
parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex]
|
||||
})
|
||||
}
|
||||
|
|
85
vendor/golang.org/x/tools/go/ast/inspector/iter.go
generated
vendored
Normal file
85
vendor/golang.org/x/tools/go/ast/inspector/iter.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.23
|
||||
|
||||
package inspector
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"iter"
|
||||
)
|
||||
|
||||
// PreorderSeq returns an iterator that visits all the
|
||||
// nodes of the files supplied to New in depth-first order.
|
||||
// It visits each node n before n's children.
|
||||
// The complete traversal sequence is determined by ast.Inspect.
|
||||
//
|
||||
// The types argument, if non-empty, enables type-based
|
||||
// filtering of events: only nodes whose type matches an
|
||||
// element of the types slice are included in the sequence.
|
||||
func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
|
||||
|
||||
// This implementation is identical to Preorder,
|
||||
// except that it supports breaking out of the loop.
|
||||
|
||||
return func(yield func(ast.Node) bool) {
|
||||
mask := maskOf(types)
|
||||
for i := int32(0); i < int32(len(in.events)); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
if ev.typ&mask != 0 {
|
||||
if !yield(ev.node) {
|
||||
break
|
||||
}
|
||||
}
|
||||
pop := ev.index
|
||||
if in.events[pop].typ&mask == 0 {
|
||||
// Subtrees do not contain types: skip them and pop.
|
||||
i = pop + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All[N] returns an iterator over all the nodes of type N.
|
||||
// N must be a pointer-to-struct type that implements ast.Node.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for call := range All[*ast.CallExpr](in) { ... }
|
||||
func All[N interface {
|
||||
*S
|
||||
ast.Node
|
||||
}, S any](in *Inspector) iter.Seq[N] {
|
||||
|
||||
// To avoid additional dynamic call overheads,
|
||||
// we duplicate rather than call the logic of PreorderSeq.
|
||||
|
||||
mask := typeOf((N)(nil))
|
||||
return func(yield func(N) bool) {
|
||||
for i := int32(0); i < int32(len(in.events)); {
|
||||
ev := in.events[i]
|
||||
if ev.index > i {
|
||||
// push
|
||||
if ev.typ&mask != 0 {
|
||||
if !yield(ev.node.(N)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
pop := ev.index
|
||||
if in.events[pop].typ&mask == 0 {
|
||||
// Subtrees do not contain types: skip them and pop.
|
||||
i = pop + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
5
vendor/golang.org/x/tools/go/ast/inspector/typeof.go
generated
vendored
5
vendor/golang.org/x/tools/go/ast/inspector/typeof.go
generated
vendored
|
@ -12,6 +12,8 @@ package inspector
|
|||
import (
|
||||
"go/ast"
|
||||
"math"
|
||||
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -215,8 +217,9 @@ func typeOf(n ast.Node) uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
//go:linkname maskOf
|
||||
func maskOf(nodes []ast.Node) uint64 {
|
||||
if nodes == nil {
|
||||
if len(nodes) == 0 {
|
||||
return math.MaxUint64 // match all node types
|
||||
}
|
||||
var mask uint64
|
||||
|
|
341
vendor/golang.org/x/tools/go/ast/inspector/walk.go
generated
vendored
Normal file
341
vendor/golang.org/x/tools/go/ast/inspector/walk.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package inspector
|
||||
|
||||
// This file is a fork of ast.Inspect to reduce unnecessary dynamic
|
||||
// calls and to gather edge information.
|
||||
//
|
||||
// Consistency with the original is ensured by TestInspectAllNodes.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
|
||||
"golang.org/x/tools/internal/astutil/edge"
|
||||
)
|
||||
|
||||
func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) {
|
||||
for i, node := range list {
|
||||
walk(v, ek, i, node)
|
||||
}
|
||||
}
|
||||
|
||||
func walk(v *visitor, ek edge.Kind, index int, node ast.Node) {
|
||||
v.push(ek, index, node)
|
||||
|
||||
// walk children
|
||||
// (the order of the cases matches the order
|
||||
// of the corresponding node types in ast.go)
|
||||
switch n := node.(type) {
|
||||
// Comments and fields
|
||||
case *ast.Comment:
|
||||
// nothing to do
|
||||
|
||||
case *ast.CommentGroup:
|
||||
walkList(v, edge.CommentGroup_List, n.List)
|
||||
|
||||
case *ast.Field:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.Field_Doc, -1, n.Doc)
|
||||
}
|
||||
walkList(v, edge.Field_Names, n.Names)
|
||||
if n.Type != nil {
|
||||
walk(v, edge.Field_Type, -1, n.Type)
|
||||
}
|
||||
if n.Tag != nil {
|
||||
walk(v, edge.Field_Tag, -1, n.Tag)
|
||||
}
|
||||
if n.Comment != nil {
|
||||
walk(v, edge.Field_Comment, -1, n.Comment)
|
||||
}
|
||||
|
||||
case *ast.FieldList:
|
||||
walkList(v, edge.FieldList_List, n.List)
|
||||
|
||||
// Expressions
|
||||
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
|
||||
// nothing to do
|
||||
|
||||
case *ast.Ellipsis:
|
||||
if n.Elt != nil {
|
||||
walk(v, edge.Ellipsis_Elt, -1, n.Elt)
|
||||
}
|
||||
|
||||
case *ast.FuncLit:
|
||||
walk(v, edge.FuncLit_Type, -1, n.Type)
|
||||
walk(v, edge.FuncLit_Body, -1, n.Body)
|
||||
|
||||
case *ast.CompositeLit:
|
||||
if n.Type != nil {
|
||||
walk(v, edge.CompositeLit_Type, -1, n.Type)
|
||||
}
|
||||
walkList(v, edge.CompositeLit_Elts, n.Elts)
|
||||
|
||||
case *ast.ParenExpr:
|
||||
walk(v, edge.ParenExpr_X, -1, n.X)
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
walk(v, edge.SelectorExpr_X, -1, n.X)
|
||||
walk(v, edge.SelectorExpr_Sel, -1, n.Sel)
|
||||
|
||||
case *ast.IndexExpr:
|
||||
walk(v, edge.IndexExpr_X, -1, n.X)
|
||||
walk(v, edge.IndexExpr_Index, -1, n.Index)
|
||||
|
||||
case *ast.IndexListExpr:
|
||||
walk(v, edge.IndexListExpr_X, -1, n.X)
|
||||
walkList(v, edge.IndexListExpr_Indices, n.Indices)
|
||||
|
||||
case *ast.SliceExpr:
|
||||
walk(v, edge.SliceExpr_X, -1, n.X)
|
||||
if n.Low != nil {
|
||||
walk(v, edge.SliceExpr_Low, -1, n.Low)
|
||||
}
|
||||
if n.High != nil {
|
||||
walk(v, edge.SliceExpr_High, -1, n.High)
|
||||
}
|
||||
if n.Max != nil {
|
||||
walk(v, edge.SliceExpr_Max, -1, n.Max)
|
||||
}
|
||||
|
||||
case *ast.TypeAssertExpr:
|
||||
walk(v, edge.TypeAssertExpr_X, -1, n.X)
|
||||
if n.Type != nil {
|
||||
walk(v, edge.TypeAssertExpr_Type, -1, n.Type)
|
||||
}
|
||||
|
||||
case *ast.CallExpr:
|
||||
walk(v, edge.CallExpr_Fun, -1, n.Fun)
|
||||
walkList(v, edge.CallExpr_Args, n.Args)
|
||||
|
||||
case *ast.StarExpr:
|
||||
walk(v, edge.StarExpr_X, -1, n.X)
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
walk(v, edge.UnaryExpr_X, -1, n.X)
|
||||
|
||||
case *ast.BinaryExpr:
|
||||
walk(v, edge.BinaryExpr_X, -1, n.X)
|
||||
walk(v, edge.BinaryExpr_Y, -1, n.Y)
|
||||
|
||||
case *ast.KeyValueExpr:
|
||||
walk(v, edge.KeyValueExpr_Key, -1, n.Key)
|
||||
walk(v, edge.KeyValueExpr_Value, -1, n.Value)
|
||||
|
||||
// Types
|
||||
case *ast.ArrayType:
|
||||
if n.Len != nil {
|
||||
walk(v, edge.ArrayType_Len, -1, n.Len)
|
||||
}
|
||||
walk(v, edge.ArrayType_Elt, -1, n.Elt)
|
||||
|
||||
case *ast.StructType:
|
||||
walk(v, edge.StructType_Fields, -1, n.Fields)
|
||||
|
||||
case *ast.FuncType:
|
||||
if n.TypeParams != nil {
|
||||
walk(v, edge.FuncType_TypeParams, -1, n.TypeParams)
|
||||
}
|
||||
if n.Params != nil {
|
||||
walk(v, edge.FuncType_Params, -1, n.Params)
|
||||
}
|
||||
if n.Results != nil {
|
||||
walk(v, edge.FuncType_Results, -1, n.Results)
|
||||
}
|
||||
|
||||
case *ast.InterfaceType:
|
||||
walk(v, edge.InterfaceType_Methods, -1, n.Methods)
|
||||
|
||||
case *ast.MapType:
|
||||
walk(v, edge.MapType_Key, -1, n.Key)
|
||||
walk(v, edge.MapType_Value, -1, n.Value)
|
||||
|
||||
case *ast.ChanType:
|
||||
walk(v, edge.ChanType_Value, -1, n.Value)
|
||||
|
||||
// Statements
|
||||
case *ast.BadStmt:
|
||||
// nothing to do
|
||||
|
||||
case *ast.DeclStmt:
|
||||
walk(v, edge.DeclStmt_Decl, -1, n.Decl)
|
||||
|
||||
case *ast.EmptyStmt:
|
||||
// nothing to do
|
||||
|
||||
case *ast.LabeledStmt:
|
||||
walk(v, edge.LabeledStmt_Label, -1, n.Label)
|
||||
walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt)
|
||||
|
||||
case *ast.ExprStmt:
|
||||
walk(v, edge.ExprStmt_X, -1, n.X)
|
||||
|
||||
case *ast.SendStmt:
|
||||
walk(v, edge.SendStmt_Chan, -1, n.Chan)
|
||||
walk(v, edge.SendStmt_Value, -1, n.Value)
|
||||
|
||||
case *ast.IncDecStmt:
|
||||
walk(v, edge.IncDecStmt_X, -1, n.X)
|
||||
|
||||
case *ast.AssignStmt:
|
||||
walkList(v, edge.AssignStmt_Lhs, n.Lhs)
|
||||
walkList(v, edge.AssignStmt_Rhs, n.Rhs)
|
||||
|
||||
case *ast.GoStmt:
|
||||
walk(v, edge.GoStmt_Call, -1, n.Call)
|
||||
|
||||
case *ast.DeferStmt:
|
||||
walk(v, edge.DeferStmt_Call, -1, n.Call)
|
||||
|
||||
case *ast.ReturnStmt:
|
||||
walkList(v, edge.ReturnStmt_Results, n.Results)
|
||||
|
||||
case *ast.BranchStmt:
|
||||
if n.Label != nil {
|
||||
walk(v, edge.BranchStmt_Label, -1, n.Label)
|
||||
}
|
||||
|
||||
case *ast.BlockStmt:
|
||||
walkList(v, edge.BlockStmt_List, n.List)
|
||||
|
||||
case *ast.IfStmt:
|
||||
if n.Init != nil {
|
||||
walk(v, edge.IfStmt_Init, -1, n.Init)
|
||||
}
|
||||
walk(v, edge.IfStmt_Cond, -1, n.Cond)
|
||||
walk(v, edge.IfStmt_Body, -1, n.Body)
|
||||
if n.Else != nil {
|
||||
walk(v, edge.IfStmt_Else, -1, n.Else)
|
||||
}
|
||||
|
||||
case *ast.CaseClause:
|
||||
walkList(v, edge.CaseClause_List, n.List)
|
||||
walkList(v, edge.CaseClause_Body, n.Body)
|
||||
|
||||
case *ast.SwitchStmt:
|
||||
if n.Init != nil {
|
||||
walk(v, edge.SwitchStmt_Init, -1, n.Init)
|
||||
}
|
||||
if n.Tag != nil {
|
||||
walk(v, edge.SwitchStmt_Tag, -1, n.Tag)
|
||||
}
|
||||
walk(v, edge.SwitchStmt_Body, -1, n.Body)
|
||||
|
||||
case *ast.TypeSwitchStmt:
|
||||
if n.Init != nil {
|
||||
walk(v, edge.TypeSwitchStmt_Init, -1, n.Init)
|
||||
}
|
||||
walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign)
|
||||
walk(v, edge.TypeSwitchStmt_Body, -1, n.Body)
|
||||
|
||||
case *ast.CommClause:
|
||||
if n.Comm != nil {
|
||||
walk(v, edge.CommClause_Comm, -1, n.Comm)
|
||||
}
|
||||
walkList(v, edge.CommClause_Body, n.Body)
|
||||
|
||||
case *ast.SelectStmt:
|
||||
walk(v, edge.SelectStmt_Body, -1, n.Body)
|
||||
|
||||
case *ast.ForStmt:
|
||||
if n.Init != nil {
|
||||
walk(v, edge.ForStmt_Init, -1, n.Init)
|
||||
}
|
||||
if n.Cond != nil {
|
||||
walk(v, edge.ForStmt_Cond, -1, n.Cond)
|
||||
}
|
||||
if n.Post != nil {
|
||||
walk(v, edge.ForStmt_Post, -1, n.Post)
|
||||
}
|
||||
walk(v, edge.ForStmt_Body, -1, n.Body)
|
||||
|
||||
case *ast.RangeStmt:
|
||||
if n.Key != nil {
|
||||
walk(v, edge.RangeStmt_Key, -1, n.Key)
|
||||
}
|
||||
if n.Value != nil {
|
||||
walk(v, edge.RangeStmt_Value, -1, n.Value)
|
||||
}
|
||||
walk(v, edge.RangeStmt_X, -1, n.X)
|
||||
walk(v, edge.RangeStmt_Body, -1, n.Body)
|
||||
|
||||
// Declarations
|
||||
case *ast.ImportSpec:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.ImportSpec_Doc, -1, n.Doc)
|
||||
}
|
||||
if n.Name != nil {
|
||||
walk(v, edge.ImportSpec_Name, -1, n.Name)
|
||||
}
|
||||
walk(v, edge.ImportSpec_Path, -1, n.Path)
|
||||
if n.Comment != nil {
|
||||
walk(v, edge.ImportSpec_Comment, -1, n.Comment)
|
||||
}
|
||||
|
||||
case *ast.ValueSpec:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.ValueSpec_Doc, -1, n.Doc)
|
||||
}
|
||||
walkList(v, edge.ValueSpec_Names, n.Names)
|
||||
if n.Type != nil {
|
||||
walk(v, edge.ValueSpec_Type, -1, n.Type)
|
||||
}
|
||||
walkList(v, edge.ValueSpec_Values, n.Values)
|
||||
if n.Comment != nil {
|
||||
walk(v, edge.ValueSpec_Comment, -1, n.Comment)
|
||||
}
|
||||
|
||||
case *ast.TypeSpec:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.TypeSpec_Doc, -1, n.Doc)
|
||||
}
|
||||
walk(v, edge.TypeSpec_Name, -1, n.Name)
|
||||
if n.TypeParams != nil {
|
||||
walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams)
|
||||
}
|
||||
walk(v, edge.TypeSpec_Type, -1, n.Type)
|
||||
if n.Comment != nil {
|
||||
walk(v, edge.TypeSpec_Comment, -1, n.Comment)
|
||||
}
|
||||
|
||||
case *ast.BadDecl:
|
||||
// nothing to do
|
||||
|
||||
case *ast.GenDecl:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.GenDecl_Doc, -1, n.Doc)
|
||||
}
|
||||
walkList(v, edge.GenDecl_Specs, n.Specs)
|
||||
|
||||
case *ast.FuncDecl:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.FuncDecl_Doc, -1, n.Doc)
|
||||
}
|
||||
if n.Recv != nil {
|
||||
walk(v, edge.FuncDecl_Recv, -1, n.Recv)
|
||||
}
|
||||
walk(v, edge.FuncDecl_Name, -1, n.Name)
|
||||
walk(v, edge.FuncDecl_Type, -1, n.Type)
|
||||
if n.Body != nil {
|
||||
walk(v, edge.FuncDecl_Body, -1, n.Body)
|
||||
}
|
||||
|
||||
case *ast.File:
|
||||
if n.Doc != nil {
|
||||
walk(v, edge.File_Doc, -1, n.Doc)
|
||||
}
|
||||
walk(v, edge.File_Name, -1, n.Name)
|
||||
walkList(v, edge.File_Decls, n.Decls)
|
||||
// don't walk n.Comments - they have been
|
||||
// visited already through the individual
|
||||
// nodes
|
||||
|
||||
default:
|
||||
// (includes *ast.Package)
|
||||
panic(fmt.Sprintf("Walk: unexpected node type %T", n))
|
||||
}
|
||||
|
||||
v.pop(node)
|
||||
}
|
117
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
117
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
|
@ -2,22 +2,64 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gcexportdata provides functions for locating, reading, and
|
||||
// writing export data files containing type information produced by the
|
||||
// gc compiler. This package supports go1.7 export data format and all
|
||||
// later versions.
|
||||
// Package gcexportdata provides functions for reading and writing
|
||||
// export data, which is a serialized description of the API of a Go
|
||||
// package including the names, kinds, types, and locations of all
|
||||
// exported declarations.
|
||||
//
|
||||
// Although it might seem convenient for this package to live alongside
|
||||
// go/types in the standard library, this would cause version skew
|
||||
// problems for developer tools that use it, since they must be able to
|
||||
// consume the outputs of the gc compiler both before and after a Go
|
||||
// update such as from Go 1.7 to Go 1.8. Because this package lives in
|
||||
// golang.org/x/tools, sites can update their version of this repo some
|
||||
// time before the Go 1.8 release and rebuild and redeploy their
|
||||
// developer tools, which will then be able to consume both Go 1.7 and
|
||||
// Go 1.8 export data files, so they will work before and after the
|
||||
// Go update. (See discussion at https://golang.org/issue/15651.)
|
||||
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
|
||||
// The standard Go compiler (cmd/compile) writes an export data file
|
||||
// for each package it compiles, which it later reads when compiling
|
||||
// packages that import the earlier one. The compiler must thus
|
||||
// contain logic to both write and read export data.
|
||||
// (See the "Export" section in the cmd/compile/README file.)
|
||||
//
|
||||
// The [Read] function in this package can read files produced by the
|
||||
// compiler, producing [go/types] data structures. As a matter of
|
||||
// policy, Read supports export data files produced by only the last
|
||||
// two Go releases plus tip; see https://go.dev/issue/68898. The
|
||||
// export data files produced by the compiler contain additional
|
||||
// details related to generics, inlining, and other optimizations that
|
||||
// cannot be decoded by the [Read] function.
|
||||
//
|
||||
// In files written by the compiler, the export data is not at the
|
||||
// start of the file. Before calling Read, use [NewReader] to locate
|
||||
// the desired portion of the file.
|
||||
//
|
||||
// The [Write] function in this package encodes the exported API of a
|
||||
// Go package ([types.Package]) as a file. Such files can be later
|
||||
// decoded by Read, but cannot be consumed by the compiler.
|
||||
//
|
||||
// # Future changes
|
||||
//
|
||||
// Although Read supports the formats written by both Write and the
|
||||
// compiler, the two are quite different, and there is an open
|
||||
// proposal (https://go.dev/issue/69491) to separate these APIs.
|
||||
//
|
||||
// Under that proposal, this package would ultimately provide only the
|
||||
// Read operation for compiler export data, which must be defined in
|
||||
// this module (golang.org/x/tools), not in the standard library, to
|
||||
// avoid version skew for developer tools that need to read compiler
|
||||
// export data both before and after a Go release, such as from Go
|
||||
// 1.23 to Go 1.24. Because this package lives in the tools module,
|
||||
// clients can update their version of the module some time before the
|
||||
// Go 1.24 release and rebuild and redeploy their tools, which will
|
||||
// then be able to consume both Go 1.23 and Go 1.24 export data files,
|
||||
// so they will work before and after the Go update. (See discussion
|
||||
// at https://go.dev/issue/15651.)
|
||||
//
|
||||
// The operations to import and export [go/types] data structures
|
||||
// would be defined in the go/types package as Import and Export.
|
||||
// [Write] would (eventually) delegate to Export,
|
||||
// and [Read], when it detects a file produced by Export,
|
||||
// would delegate to Import.
|
||||
//
|
||||
// # Deprecations
|
||||
//
|
||||
// The [NewImporter] and [Find] functions are deprecated and should
|
||||
// not be used in new code. The [WriteBundle] and [ReadBundle]
|
||||
// functions are experimental, and there is an open proposal to
|
||||
// deprecate them (https://go.dev/issue/69573).
|
||||
package gcexportdata
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) {
|
|||
// additional trailing data beyond the end of the export data.
|
||||
func NewReader(r io.Reader) (io.Reader, error) {
|
||||
buf := bufio.NewReader(r)
|
||||
_, size, err := gcimporter.FindExportData(buf)
|
||||
size, err := gcimporter.FindExportData(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if size >= 0 {
|
||||
// We were given an archive and found the __.PKGDEF in it.
|
||||
// This tells us the size of the export data, and we don't
|
||||
// need to return the entire file.
|
||||
return &io.LimitedReader{
|
||||
R: buf,
|
||||
N: size,
|
||||
}, nil
|
||||
} else {
|
||||
// We were given an object file. As such, we don't know how large
|
||||
// the export data is and must return the entire file.
|
||||
return buf, nil
|
||||
}
|
||||
// We were given an archive and found the __.PKGDEF in it.
|
||||
// This tells us the size of the export data, and we don't
|
||||
// need to return the entire file.
|
||||
return &io.LimitedReader{
|
||||
R: buf,
|
||||
N: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// readAll works the same way as io.ReadAll, but avoids allocations and copies
|
||||
|
@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) {
|
|||
// Read reads export data from in, decodes it, and returns type
|
||||
// information for the package.
|
||||
//
|
||||
// Read is capable of reading export data produced by [Write] at the
|
||||
// same source code version, or by the last two Go releases (plus tip)
|
||||
// of the standard Go compiler. Reading files from older compilers may
|
||||
// produce an error.
|
||||
//
|
||||
// The package path (effectively its linker symbol prefix) is
|
||||
// specified by path, since unlike the package name, this information
|
||||
// may not be recorded in the export data.
|
||||
|
@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
|||
// (from "version"). Select appropriate importer.
|
||||
if len(data) > 0 {
|
||||
switch data[0] {
|
||||
case 'v', 'c', 'd': // binary, till go1.10
|
||||
case 'v', 'c', 'd':
|
||||
// binary, produced by cmd/compile till go1.10
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i': // indexed, till go1.19
|
||||
case 'i':
|
||||
// indexed, produced by cmd/compile till go1.19,
|
||||
// and also by [Write].
|
||||
//
|
||||
// If proposal #69491 is accepted, go/types
|
||||
// serialization will be implemented by
|
||||
// types.Export, to which Write would eventually
|
||||
// delegate (explicitly dropping any pretence at
|
||||
// inter-version Write-Read compatibility).
|
||||
// This [Read] function would delegate to types.Import
|
||||
// when it detects that the file was produced by Export.
|
||||
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
|
||||
case 'u': // unified, from go1.20
|
||||
case 'u':
|
||||
// unified, produced by cmd/compile since go1.20
|
||||
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
|
||||
|
|
54
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
54
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
|
||||
package packagesdriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
// TODO(adonovan): move back into go/packages.
|
||||
func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
|
||||
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
|
||||
var goarch, compiler string
|
||||
if rawErr != nil {
|
||||
rawErrMsg := rawErr.Error()
|
||||
if strings.Contains(rawErrMsg, "cannot find main module") ||
|
||||
strings.Contains(rawErrMsg, "go.mod file not found") {
|
||||
// User's running outside of a module.
|
||||
// All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
inv.Verb = "env"
|
||||
inv.Args = []string{"GOARCH"}
|
||||
envout, enverr := gocmdRunner.Run(ctx, inv)
|
||||
if enverr != nil {
|
||||
return "", "", enverr
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else if friendlyErr != nil {
|
||||
return "", "", friendlyErr
|
||||
} else {
|
||||
// This should be unreachable, but be defensive
|
||||
// in case RunRaw's error results are inconsistent.
|
||||
return "", "", rawErr
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
return compiler, goarch, nil
|
||||
}
|
15
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
15
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
|
@ -64,7 +64,7 @@ graph using the Imports fields.
|
|||
|
||||
The Load function can be configured by passing a pointer to a Config as
|
||||
the first argument. A nil Config is equivalent to the zero Config, which
|
||||
causes Load to run in LoadFiles mode, collecting minimal information.
|
||||
causes Load to run in [LoadFiles] mode, collecting minimal information.
|
||||
See the documentation for type Config for details.
|
||||
|
||||
As noted earlier, the Config.Mode controls the amount of detail
|
||||
|
@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode
|
|||
for details.
|
||||
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
uninterpreted to [Load], so that it can interpret them
|
||||
uninterpreted to Load, so that it can interpret them
|
||||
according to the conventions of the underlying build system.
|
||||
|
||||
See the Example function for typical usage.
|
||||
|
||||
# The driver protocol
|
||||
|
||||
[Load] may be used to load Go packages even in Go projects that use
|
||||
Load may be used to load Go packages even in Go projects that use
|
||||
alternative build systems, by installing an appropriate "driver"
|
||||
program for the build system and specifying its location in the
|
||||
GOPACKAGESDRIVER environment variable.
|
||||
|
@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information
|
|||
is written to the driver's standard input. The driver must write a
|
||||
JSON-encoded [DriverResponse] message to its standard output. (This
|
||||
message differs from the JSON schema produced by 'go list'.)
|
||||
|
||||
The value of the PWD environment variable seen by the driver process
|
||||
is the preferred name of its working directory. (The working directory
|
||||
may have other aliases due to symbolic links; see the comment on the
|
||||
Dir field of [exec.Cmd] for related information.)
|
||||
When the driver process emits in its response the name of a file
|
||||
that is a descendant of this directory, it must use an absolute path
|
||||
that has the value of PWD as a prefix, to ensure that the returned
|
||||
filenames satisfy the original query.
|
||||
*/
|
||||
package packages // import "golang.org/x/tools/go/packages"
|
||||
|
||||
|
|
15
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
15
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -79,10 +80,10 @@ type DriverResponse struct {
|
|||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
|
||||
type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
// the build system package structure, or "" if not found."
|
||||
// the build system package structure, or "" if not found.
|
||||
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
|
||||
func findExternalDriver(cfg *Config) driver {
|
||||
|
@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
return func(cfg *Config, words ...string) (*DriverResponse, error) {
|
||||
return func(cfg *Config, patterns []string) (*DriverResponse, error) {
|
||||
req, err := json.Marshal(DriverRequest{
|
||||
Mode: cfg.Mode,
|
||||
Env: cfg.Env,
|
||||
|
@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
|
||||
buf := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
||||
cmd := exec.CommandContext(cfg.Context, tool, patterns...)
|
||||
cmd.Dir = cfg.Dir
|
||||
// The cwd gets resolved to the real path. On Darwin, where
|
||||
// /tmp is a symlink, this breaks anything that expects the
|
||||
|
@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
// command.
|
||||
//
|
||||
// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
|
||||
cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir)
|
||||
cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
|
||||
cmd.Stdin = bytes.NewReader(req)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = stderr
|
||||
|
@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver {
|
|||
return &response, nil
|
||||
}
|
||||
}
|
||||
|
||||
// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||
// TODO(adonovan): use go1.21 slices.Clip.
|
||||
func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] }
|
||||
|
|
90
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
90
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
|
@ -21,7 +21,6 @@ import (
|
|||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/internal/packagesdriver"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
)
|
||||
|
@ -81,6 +80,12 @@ type golistState struct {
|
|||
cfg *Config
|
||||
ctx context.Context
|
||||
|
||||
runner *gocommand.Runner
|
||||
|
||||
// overlay is the JSON file that encodes the Config.Overlay
|
||||
// mapping, used by 'go list -overlay=...'.
|
||||
overlay string
|
||||
|
||||
envOnce sync.Once
|
||||
goEnvError error
|
||||
goEnv map[string]string
|
||||
|
@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string {
|
|||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
|
||||
//
|
||||
// overlay is the JSON file that encodes the cfg.Overlay
|
||||
// mapping, used by 'go list -overlay=...'
|
||||
func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
|
||||
// Make sure that any asynchronous go commands are killed when we return.
|
||||
parentCtx := cfg.Context
|
||||
if parentCtx == nil {
|
||||
|
@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error
|
|||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
vendorDirs: map[string]bool{},
|
||||
overlay: overlay,
|
||||
runner: runner,
|
||||
}
|
||||
|
||||
// Fill in response.Sizes asynchronously if necessary.
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
|
||||
compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
|
||||
response.dr.Compiler = compiler
|
||||
response.dr.Arch = arch
|
||||
errCh <- err
|
||||
|
@ -312,6 +322,7 @@ type jsonPackage struct {
|
|||
ImportPath string
|
||||
Dir string
|
||||
Name string
|
||||
Target string
|
||||
Export string
|
||||
GoFiles []string
|
||||
CompiledGoFiles []string
|
||||
|
@ -495,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
|
|||
pkg := &Package{
|
||||
Name: p.Name,
|
||||
ID: p.ImportPath,
|
||||
Dir: p.Dir,
|
||||
Target: p.Target,
|
||||
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
||||
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||
EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
|
||||
EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
|
||||
IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
|
||||
forTest: p.ForTest,
|
||||
ForTest: p.ForTest,
|
||||
depsErrors: p.DepsErrors,
|
||||
Module: p.Module,
|
||||
}
|
||||
|
@ -682,7 +695,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
|
|||
// getGoVersion returns the effective minor version of the go command.
|
||||
func (state *golistState) getGoVersion() (int, error) {
|
||||
state.goVersionOnce.Do(func() {
|
||||
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
|
||||
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
|
||||
})
|
||||
return state.goVersion, state.goVersionError
|
||||
}
|
||||
|
@ -752,7 +765,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
}
|
||||
}
|
||||
addFields("Name", "ImportPath", "Error") // These fields are always needed
|
||||
if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
|
||||
addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
|
||||
"CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
|
||||
"SwigFiles", "SwigCXXFiles", "SysoFiles")
|
||||
|
@ -760,7 +773,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
addFields("TestGoFiles", "XTestGoFiles")
|
||||
}
|
||||
}
|
||||
if cfg.Mode&NeedTypes != 0 {
|
||||
if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
|
||||
// CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
|
||||
// even when -compiled isn't passed in.
|
||||
// TODO(#52435): Should we make the test ask for -compiled, or automatically
|
||||
|
@ -785,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
// Request Dir in the unlikely case Export is not absolute.
|
||||
addFields("Dir", "Export")
|
||||
}
|
||||
if cfg.Mode&needInternalForTest != 0 {
|
||||
if cfg.Mode&NeedForTest != 0 {
|
||||
addFields("ForTest")
|
||||
}
|
||||
if cfg.Mode&needInternalDepsErrors != 0 {
|
||||
|
@ -800,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
|||
if cfg.Mode&NeedEmbedPatterns != 0 {
|
||||
addFields("EmbedPatterns")
|
||||
}
|
||||
if cfg.Mode&NeedTarget != 0 {
|
||||
addFields("Target")
|
||||
}
|
||||
return "-json=" + strings.Join(fields, ",")
|
||||
}
|
||||
|
||||
|
@ -841,7 +857,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation {
|
|||
Env: cfg.Env,
|
||||
Logf: cfg.Logf,
|
||||
WorkingDir: cfg.Dir,
|
||||
Overlay: cfg.goListOverlayFile,
|
||||
Overlay: state.overlay,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -852,11 +868,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
|
|||
inv := state.cfgInvocation()
|
||||
inv.Verb = verb
|
||||
inv.Args = args
|
||||
gocmdRunner := cfg.gocmdRunner
|
||||
if gocmdRunner == nil {
|
||||
gocmdRunner = &gocommand.Runner{}
|
||||
}
|
||||
stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
|
||||
|
||||
stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
|
||||
if err != nil {
|
||||
// Check for 'go' executable not being found.
|
||||
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||
|
@ -880,6 +893,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
|
|||
return nil, friendlyErr
|
||||
}
|
||||
|
||||
// Return an error if 'go list' failed due to missing tools in
|
||||
// $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
|
||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
|
||||
return nil, friendlyErr
|
||||
}
|
||||
|
||||
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
|
||||
// and should be suppressed by go list -e.
|
||||
//
|
||||
|
@ -1024,3 +1043,44 @@ func cmdDebugStr(cmd *exec.Cmd) string {
|
|||
}
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// getSizesForArgs queries 'go list' for the appropriate
|
||||
// Compiler and GOARCH arguments to pass to [types.SizesFor].
|
||||
func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
|
||||
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
|
||||
var goarch, compiler string
|
||||
if rawErr != nil {
|
||||
rawErrMsg := rawErr.Error()
|
||||
if strings.Contains(rawErrMsg, "cannot find main module") ||
|
||||
strings.Contains(rawErrMsg, "go.mod file not found") {
|
||||
// User's running outside of a module.
|
||||
// All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
inv.Verb = "env"
|
||||
inv.Args = []string{"GOARCH"}
|
||||
envout, enverr := gocmdRunner.Run(ctx, inv)
|
||||
if enverr != nil {
|
||||
return "", "", enverr
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else if friendlyErr != nil {
|
||||
return "", "", friendlyErr
|
||||
} else {
|
||||
// This should be unreachable, but be defensive
|
||||
// in case RunRaw's error results are inconsistent.
|
||||
return "", "", rawErr
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
return compiler, goarch, nil
|
||||
}
|
||||
|
|
73
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
73
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
|
@ -9,49 +9,48 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
var allModes = []LoadMode{
|
||||
NeedName,
|
||||
NeedFiles,
|
||||
NeedCompiledGoFiles,
|
||||
NeedImports,
|
||||
NeedDeps,
|
||||
NeedExportFile,
|
||||
NeedTypes,
|
||||
NeedSyntax,
|
||||
NeedTypesInfo,
|
||||
NeedTypesSizes,
|
||||
var modes = [...]struct {
|
||||
mode LoadMode
|
||||
name string
|
||||
}{
|
||||
{NeedName, "NeedName"},
|
||||
{NeedFiles, "NeedFiles"},
|
||||
{NeedCompiledGoFiles, "NeedCompiledGoFiles"},
|
||||
{NeedImports, "NeedImports"},
|
||||
{NeedDeps, "NeedDeps"},
|
||||
{NeedExportFile, "NeedExportFile"},
|
||||
{NeedTypes, "NeedTypes"},
|
||||
{NeedSyntax, "NeedSyntax"},
|
||||
{NeedTypesInfo, "NeedTypesInfo"},
|
||||
{NeedTypesSizes, "NeedTypesSizes"},
|
||||
{NeedForTest, "NeedForTest"},
|
||||
{NeedModule, "NeedModule"},
|
||||
{NeedEmbedFiles, "NeedEmbedFiles"},
|
||||
{NeedEmbedPatterns, "NeedEmbedPatterns"},
|
||||
{NeedTarget, "NeedTarget"},
|
||||
}
|
||||
|
||||
var modeStrings = []string{
|
||||
"NeedName",
|
||||
"NeedFiles",
|
||||
"NeedCompiledGoFiles",
|
||||
"NeedImports",
|
||||
"NeedDeps",
|
||||
"NeedExportFile",
|
||||
"NeedTypes",
|
||||
"NeedSyntax",
|
||||
"NeedTypesInfo",
|
||||
"NeedTypesSizes",
|
||||
}
|
||||
|
||||
func (mod LoadMode) String() string {
|
||||
m := mod
|
||||
if m == 0 {
|
||||
func (mode LoadMode) String() string {
|
||||
if mode == 0 {
|
||||
return "LoadMode(0)"
|
||||
}
|
||||
var out []string
|
||||
for i, x := range allModes {
|
||||
if x > m {
|
||||
break
|
||||
}
|
||||
if (m & x) != 0 {
|
||||
out = append(out, modeStrings[i])
|
||||
m = m ^ x
|
||||
// named bits
|
||||
for _, item := range modes {
|
||||
if (mode & item.mode) != 0 {
|
||||
mode ^= item.mode
|
||||
out = append(out, item.name)
|
||||
}
|
||||
}
|
||||
if m != 0 {
|
||||
out = append(out, "Unknown")
|
||||
// unnamed residue
|
||||
if mode != 0 {
|
||||
if out == nil {
|
||||
return fmt.Sprintf("LoadMode(%#x)", int(mode))
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%#x", int(mode)))
|
||||
}
|
||||
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
|
||||
if len(out) == 1 {
|
||||
return out[0]
|
||||
}
|
||||
return "(" + strings.Join(out, "|") + ")"
|
||||
}
|
||||
|
|
413
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
413
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
|
@ -16,13 +16,13 @@ import (
|
|||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -31,7 +31,6 @@ import (
|
|||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
"golang.org/x/tools/internal/typesinternal"
|
||||
"golang.org/x/tools/internal/versions"
|
||||
)
|
||||
|
||||
// A LoadMode controls the amount of detail to return when loading.
|
||||
|
@ -44,20 +43,33 @@ import (
|
|||
// ID and Errors (if present) will always be filled.
|
||||
// [Load] may return more information than requested.
|
||||
//
|
||||
// The Mode flag is a union of several bits named NeedName,
|
||||
// NeedFiles, and so on, each of which determines whether
|
||||
// a given field of Package (Name, Files, etc) should be
|
||||
// populated.
|
||||
//
|
||||
// For convenience, we provide named constants for the most
|
||||
// common combinations of Need flags:
|
||||
//
|
||||
// [LoadFiles] lists of files in each package
|
||||
// [LoadImports] ... plus imports
|
||||
// [LoadTypes] ... plus type information
|
||||
// [LoadSyntax] ... plus type-annotated syntax
|
||||
// [LoadAllSyntax] ... for all dependencies
|
||||
//
|
||||
// Unfortunately there are a number of open bugs related to
|
||||
// interactions among the LoadMode bits:
|
||||
// - https://github.com/golang/go/issues/48226
|
||||
// - https://github.com/golang/go/issues/56633
|
||||
// - https://github.com/golang/go/issues/56677
|
||||
// - https://github.com/golang/go/issues/58726
|
||||
// - https://github.com/golang/go/issues/63517
|
||||
// - https://go.dev/issue/56633
|
||||
// - https://go.dev/issue/56677
|
||||
// - https://go.dev/issue/58726
|
||||
// - https://go.dev/issue/63517
|
||||
type LoadMode int
|
||||
|
||||
const (
|
||||
// NeedName adds Name and PkgPath.
|
||||
NeedName LoadMode = 1 << iota
|
||||
|
||||
// NeedFiles adds GoFiles and OtherFiles.
|
||||
// NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
|
||||
NeedFiles
|
||||
|
||||
// NeedCompiledGoFiles adds CompiledGoFiles.
|
||||
|
@ -76,10 +88,10 @@ const (
|
|||
// NeedTypes adds Types, Fset, and IllTyped.
|
||||
NeedTypes
|
||||
|
||||
// NeedSyntax adds Syntax.
|
||||
// NeedSyntax adds Syntax and Fset.
|
||||
NeedSyntax
|
||||
|
||||
// NeedTypesInfo adds TypesInfo.
|
||||
// NeedTypesInfo adds TypesInfo and Fset.
|
||||
NeedTypesInfo
|
||||
|
||||
// NeedTypesSizes adds TypesSizes.
|
||||
|
@ -88,9 +100,10 @@ const (
|
|||
// needInternalDepsErrors adds the internal deps errors field for use by gopls.
|
||||
needInternalDepsErrors
|
||||
|
||||
// needInternalForTest adds the internal forTest field.
|
||||
// NeedForTest adds ForTest.
|
||||
//
|
||||
// Tests must also be set on the context for this field to be populated.
|
||||
needInternalForTest
|
||||
NeedForTest
|
||||
|
||||
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
|
||||
// Modifies CompiledGoFiles and Types, and has no effect on its own.
|
||||
|
@ -104,27 +117,27 @@ const (
|
|||
|
||||
// NeedEmbedPatterns adds EmbedPatterns.
|
||||
NeedEmbedPatterns
|
||||
|
||||
// NeedTarget adds Target.
|
||||
NeedTarget
|
||||
|
||||
// Be sure to update loadmode_string.go when adding new items!
|
||||
)
|
||||
|
||||
const (
|
||||
// Deprecated: LoadFiles exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadFiles loads the name and file names for the initial packages.
|
||||
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
|
||||
|
||||
// Deprecated: LoadImports exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadImports loads the name, file names, and import mapping for the initial packages.
|
||||
LoadImports = LoadFiles | NeedImports
|
||||
|
||||
// Deprecated: LoadTypes exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadTypes loads exported type information for the initial packages.
|
||||
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
|
||||
|
||||
// Deprecated: LoadSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadSyntax loads typed syntax for the initial packages.
|
||||
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
|
||||
|
||||
// Deprecated: LoadAllSyntax exists for historical compatibility
|
||||
// and should not be used. Please directly specify the needed fields using the Need values.
|
||||
// LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
|
||||
LoadAllSyntax = LoadSyntax | NeedDeps
|
||||
|
||||
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
|
||||
|
@ -134,13 +147,7 @@ const (
|
|||
// A Config specifies details about how packages should be loaded.
|
||||
// The zero value is a valid configuration.
|
||||
//
|
||||
// Calls to Load do not modify this struct.
|
||||
//
|
||||
// TODO(adonovan): #67702: this is currently false: in fact,
|
||||
// calls to [Load] do not modify the public fields of this struct, but
|
||||
// may modify hidden fields, so concurrent calls to [Load] must not
|
||||
// use the same Config. But perhaps we should reestablish the
|
||||
// documented invariant.
|
||||
// Calls to [Load] do not modify this struct.
|
||||
type Config struct {
|
||||
// Mode controls the level of information returned for each package.
|
||||
Mode LoadMode
|
||||
|
@ -171,19 +178,10 @@ type Config struct {
|
|||
//
|
||||
Env []string
|
||||
|
||||
// gocmdRunner guards go command calls from concurrency errors.
|
||||
gocmdRunner *gocommand.Runner
|
||||
|
||||
// BuildFlags is a list of command-line flags to be passed through to
|
||||
// the build system's query tool.
|
||||
BuildFlags []string
|
||||
|
||||
// modFile will be used for -modfile in go command invocations.
|
||||
modFile string
|
||||
|
||||
// modFlag will be used for -modfile in go command invocations.
|
||||
modFlag string
|
||||
|
||||
// Fset provides source position information for syntax trees and types.
|
||||
// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
|
||||
Fset *token.FileSet
|
||||
|
@ -230,21 +228,24 @@ type Config struct {
|
|||
// drivers may vary in their level of support for overlays.
|
||||
Overlay map[string][]byte
|
||||
|
||||
// goListOverlayFile is the JSON file that encodes the Overlay
|
||||
// mapping, used by 'go list -overlay=...'
|
||||
goListOverlayFile string
|
||||
// -- Hidden configuration fields only for use in x/tools --
|
||||
|
||||
// modFile will be used for -modfile in go command invocations.
|
||||
modFile string
|
||||
|
||||
// modFlag will be used for -modfile in go command invocations.
|
||||
modFlag string
|
||||
}
|
||||
|
||||
// Load loads and returns the Go packages named by the given patterns.
|
||||
//
|
||||
// Config specifies loading options;
|
||||
// nil behaves the same as an empty Config.
|
||||
// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
|
||||
//
|
||||
// The [Config.Mode] field is a set of bits that determine what kinds
|
||||
// of information should be computed and returned. Modes that require
|
||||
// more information tend to be slower. See [LoadMode] for details
|
||||
// and important caveats. Its zero value is equivalent to
|
||||
// NeedName | NeedFiles | NeedCompiledGoFiles.
|
||||
// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
|
||||
//
|
||||
// Each call to Load returns a new set of [Package] instances.
|
||||
// The Packages and their Imports form a directed acyclic graph.
|
||||
|
@ -261,7 +262,7 @@ type Config struct {
|
|||
// Errors associated with a particular package are recorded in the
|
||||
// corresponding Package's Errors list, and do not cause Load to
|
||||
// return an error. Clients may need to handle such errors before
|
||||
// proceeding with further analysis. The PrintErrors function is
|
||||
// proceeding with further analysis. The [PrintErrors] function is
|
||||
// provided for convenient display of all errors.
|
||||
func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
||||
ld := newLoader(cfg)
|
||||
|
@ -324,21 +325,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro
|
|||
} else if !response.NotHandled {
|
||||
return response, true, nil
|
||||
}
|
||||
// (fall through)
|
||||
// not handled: fall through
|
||||
}
|
||||
|
||||
// go list fallback
|
||||
//
|
||||
|
||||
// Write overlays once, as there are many calls
|
||||
// to 'go list' (one per chunk plus others too).
|
||||
overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
|
||||
overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
defer cleanupOverlay()
|
||||
cfg.goListOverlayFile = overlay
|
||||
|
||||
response, err := callDriverOnChunks(goListDriver, cfg, chunks)
|
||||
var runner gocommand.Runner // (shared across many 'go list' calls)
|
||||
driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
|
||||
return goListDriver(cfg, &runner, overlayFile, patterns)
|
||||
}
|
||||
response, err := callDriverOnChunks(driver, cfg, chunks)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
@ -376,16 +380,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
|
|||
|
||||
func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
|
||||
if len(chunks) == 0 {
|
||||
return driver(cfg)
|
||||
return driver(cfg, nil)
|
||||
}
|
||||
responses := make([]*DriverResponse, len(chunks))
|
||||
errNotHandled := errors.New("driver returned NotHandled")
|
||||
var g errgroup.Group
|
||||
for i, chunk := range chunks {
|
||||
i := i
|
||||
chunk := chunk
|
||||
g.Go(func() (err error) {
|
||||
responses[i], err = driver(cfg, chunk...)
|
||||
responses[i], err = driver(cfg, chunk)
|
||||
if responses[i] != nil && responses[i].NotHandled {
|
||||
err = errNotHandled
|
||||
}
|
||||
|
@ -435,6 +437,12 @@ type Package struct {
|
|||
// PkgPath is the package path as used by the go/types package.
|
||||
PkgPath string
|
||||
|
||||
// Dir is the directory associated with the package, if it exists.
|
||||
//
|
||||
// For packages listed by the go command, this is the directory containing
|
||||
// the package files.
|
||||
Dir string
|
||||
|
||||
// Errors contains any errors encountered querying the metadata
|
||||
// of the package, or while parsing or type-checking its files.
|
||||
Errors []Error
|
||||
|
@ -474,6 +482,10 @@ type Package struct {
|
|||
// information for the package as provided by the build system.
|
||||
ExportFile string
|
||||
|
||||
// Target is the absolute install path of the .a file, for libraries,
|
||||
// and of the executable file, for binaries.
|
||||
Target string
|
||||
|
||||
// Imports maps import paths appearing in the package's Go source files
|
||||
// to corresponding loaded Packages.
|
||||
Imports map[string]*Package
|
||||
|
@ -522,8 +534,8 @@ type Package struct {
|
|||
|
||||
// -- internal --
|
||||
|
||||
// forTest is the package under test, if any.
|
||||
forTest string
|
||||
// ForTest is the package under test, if any.
|
||||
ForTest string
|
||||
|
||||
// depsErrors is the DepsErrors field from the go list response, if any.
|
||||
depsErrors []*packagesinternal.PackageError
|
||||
|
@ -552,9 +564,6 @@ type ModuleError struct {
|
|||
}
|
||||
|
||||
func init() {
|
||||
packagesinternal.GetForTest = func(p interface{}) string {
|
||||
return p.(*Package).forTest
|
||||
}
|
||||
packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
|
||||
return p.(*Package).depsErrors
|
||||
}
|
||||
|
@ -566,7 +575,6 @@ func init() {
|
|||
}
|
||||
packagesinternal.TypecheckCgo = int(typecheckCgo)
|
||||
packagesinternal.DepsErrors = int(needInternalDepsErrors)
|
||||
packagesinternal.ForTest = int(needInternalForTest)
|
||||
}
|
||||
|
||||
// An Error describes a problem with a package's metadata, syntax, or types.
|
||||
|
@ -682,18 +690,19 @@ func (p *Package) String() string { return p.ID }
|
|||
// loaderPackage augments Package with state used during the loading phase
|
||||
type loaderPackage struct {
|
||||
*Package
|
||||
importErrors map[string]error // maps each bad import to its error
|
||||
loadOnce sync.Once
|
||||
color uint8 // for cycle detection
|
||||
needsrc bool // load from source (Mode >= LoadTypes)
|
||||
needtypes bool // type information is either requested or depended on
|
||||
initial bool // package was matched by a pattern
|
||||
goVersion int // minor version number of go command on PATH
|
||||
importErrors map[string]error // maps each bad import to its error
|
||||
preds []*loaderPackage // packages that import this one
|
||||
unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded
|
||||
color uint8 // for cycle detection
|
||||
needsrc bool // load from source (Mode >= LoadTypes)
|
||||
needtypes bool // type information is either requested or depended on
|
||||
initial bool // package was matched by a pattern
|
||||
goVersion int // minor version number of go command on PATH
|
||||
}
|
||||
|
||||
// loader holds the working state of a single call to load.
|
||||
type loader struct {
|
||||
pkgs map[string]*loaderPackage
|
||||
pkgs map[string]*loaderPackage // keyed by Package.ID
|
||||
Config
|
||||
sizes types.Sizes // non-nil if needed by mode
|
||||
parseCache map[string]*parseValue
|
||||
|
@ -739,9 +748,6 @@ func newLoader(cfg *Config) *loader {
|
|||
if ld.Config.Env == nil {
|
||||
ld.Config.Env = os.Environ()
|
||||
}
|
||||
if ld.Config.gocmdRunner == nil {
|
||||
ld.Config.gocmdRunner = &gocommand.Runner{}
|
||||
}
|
||||
if ld.Context == nil {
|
||||
ld.Context = context.Background()
|
||||
}
|
||||
|
@ -755,7 +761,7 @@ func newLoader(cfg *Config) *loader {
|
|||
ld.requestedMode = ld.Mode
|
||||
ld.Mode = impliedLoadMode(ld.Mode)
|
||||
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
|
||||
if ld.Fset == nil {
|
||||
ld.Fset = token.NewFileSet()
|
||||
}
|
||||
|
@ -764,6 +770,7 @@ func newLoader(cfg *Config) *loader {
|
|||
// because we load source if export data is missing.
|
||||
if ld.ParseFile == nil {
|
||||
ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
|
||||
// We implicitly promise to keep doing ast.Object resolution. :(
|
||||
const mode = parser.AllErrors | parser.ParseComments
|
||||
return parser.ParseFile(fset, filename, src, mode)
|
||||
}
|
||||
|
@ -795,7 +802,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
|
||||
// This package needs type information if the caller requested types and the package is
|
||||
// either a root, or it's a non-root and the user requested dependencies ...
|
||||
needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
|
||||
needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
|
||||
// This package needs source if the call requested source (or types info, which implies source)
|
||||
// and the package is either a root, or itas a non- root and the user requested dependencies...
|
||||
needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
|
||||
|
@ -820,9 +827,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if ld.Mode&NeedImports != 0 {
|
||||
// Materialize the import graph.
|
||||
|
||||
// Materialize the import graph if it is needed (NeedImports),
|
||||
// or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
|
||||
var leaves []*loaderPackage // packages with no unfinished successors
|
||||
if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
|
||||
const (
|
||||
white = 0 // new
|
||||
grey = 1 // in progress
|
||||
|
@ -841,63 +849,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
// dependency on a package that does. These are the only packages
|
||||
// for which we load source code.
|
||||
var stack []*loaderPackage
|
||||
var visit func(lpkg *loaderPackage) bool
|
||||
visit = func(lpkg *loaderPackage) bool {
|
||||
switch lpkg.color {
|
||||
case black:
|
||||
return lpkg.needsrc
|
||||
case grey:
|
||||
var visit func(from, lpkg *loaderPackage) bool
|
||||
visit = func(from, lpkg *loaderPackage) bool {
|
||||
if lpkg.color == grey {
|
||||
panic("internal error: grey node")
|
||||
}
|
||||
lpkg.color = grey
|
||||
stack = append(stack, lpkg) // push
|
||||
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
if lpkg.color == white {
|
||||
lpkg.color = grey
|
||||
stack = append(stack, lpkg) // push
|
||||
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
|
||||
lpkg.Imports = make(map[string]*Package, len(stubs))
|
||||
for importPath, ipkg := range stubs {
|
||||
var importErr error
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
if imp == nil {
|
||||
// (includes package "C" when DisableCgo)
|
||||
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
|
||||
} else if imp.color == grey {
|
||||
importErr = fmt.Errorf("import cycle: %s", stack)
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
if importErr != nil {
|
||||
if lpkg.importErrors == nil {
|
||||
lpkg.importErrors = make(map[string]error)
|
||||
}
|
||||
lpkg.importErrors[importPath] = importErr
|
||||
continue
|
||||
}
|
||||
|
||||
if visit(lpkg, imp) {
|
||||
lpkg.needsrc = true
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
}
|
||||
|
||||
if visit(imp) {
|
||||
lpkg.needsrc = true
|
||||
// -- postorder --
|
||||
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
|
||||
for _, ipkg := range lpkg.Imports {
|
||||
ld.pkgs[ipkg.ID].needtypes = true
|
||||
}
|
||||
}
|
||||
lpkg.Imports[importPath] = imp.Package
|
||||
|
||||
// NeedTypeSizes causes TypeSizes to be set even
|
||||
// on packages for which types aren't needed.
|
||||
if ld.Mode&NeedTypesSizes != 0 {
|
||||
lpkg.TypesSizes = ld.sizes
|
||||
}
|
||||
|
||||
// Add packages with no imports directly to the queue of leaves.
|
||||
if len(lpkg.Imports) == 0 {
|
||||
leaves = append(leaves, lpkg)
|
||||
}
|
||||
|
||||
stack = stack[:len(stack)-1] // pop
|
||||
lpkg.color = black
|
||||
}
|
||||
|
||||
// Complete type information is required for the
|
||||
// immediate dependencies of each source package.
|
||||
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
|
||||
for _, ipkg := range lpkg.Imports {
|
||||
ld.pkgs[ipkg.ID].needtypes = true
|
||||
}
|
||||
// Add edge from predecessor.
|
||||
if from != nil {
|
||||
from.unfinishedSuccs.Add(+1) // incref
|
||||
lpkg.preds = append(lpkg.preds, from)
|
||||
}
|
||||
|
||||
// NeedTypeSizes causes TypeSizes to be set even
|
||||
// on packages for which types aren't needed.
|
||||
if ld.Mode&NeedTypesSizes != 0 {
|
||||
lpkg.TypesSizes = ld.sizes
|
||||
}
|
||||
stack = stack[:len(stack)-1] // pop
|
||||
lpkg.color = black
|
||||
|
||||
return lpkg.needsrc
|
||||
}
|
||||
|
||||
// For each initial package, create its import DAG.
|
||||
for _, lpkg := range initial {
|
||||
visit(lpkg)
|
||||
visit(nil, lpkg)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -910,16 +931,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
|
||||
// Load type data and syntax if needed, starting at
|
||||
// the initial packages (roots of the import DAG).
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
var wg sync.WaitGroup
|
||||
for _, lpkg := range initial {
|
||||
wg.Add(1)
|
||||
go func(lpkg *loaderPackage) {
|
||||
ld.loadRecursive(lpkg)
|
||||
wg.Done()
|
||||
}(lpkg)
|
||||
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
|
||||
|
||||
// We avoid using g.SetLimit to limit concurrency as
|
||||
// it makes g.Go stop accepting work, which prevents
|
||||
// workers from enqeuing, and thus finishing, and thus
|
||||
// allowing the group to make progress: deadlock.
|
||||
//
|
||||
// Instead we use the ioLimit and cpuLimit semaphores.
|
||||
g, _ := errgroup.WithContext(ld.Context)
|
||||
|
||||
// enqueues adds a package to the type-checking queue.
|
||||
// It must have no unfinished successors.
|
||||
var enqueue func(*loaderPackage)
|
||||
enqueue = func(lpkg *loaderPackage) {
|
||||
g.Go(func() error {
|
||||
// Parse and type-check.
|
||||
ld.loadPackage(lpkg)
|
||||
|
||||
// Notify each waiting predecessor,
|
||||
// and enqueue it when it becomes a leaf.
|
||||
for _, pred := range lpkg.preds {
|
||||
if pred.unfinishedSuccs.Add(-1) == 0 { // decref
|
||||
enqueue(pred)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Load leaves first, adding new packages
|
||||
// to the queue as they become leaves.
|
||||
for _, leaf := range leaves {
|
||||
enqueue(leaf)
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return nil, err // cancelled
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// If the context is done, return its error and
|
||||
|
@ -961,12 +1011,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
}
|
||||
if ld.requestedMode&NeedTypes == 0 {
|
||||
ld.pkgs[i].Types = nil
|
||||
ld.pkgs[i].Fset = nil
|
||||
ld.pkgs[i].IllTyped = false
|
||||
}
|
||||
if ld.requestedMode&NeedSyntax == 0 {
|
||||
ld.pkgs[i].Syntax = nil
|
||||
}
|
||||
if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
|
||||
ld.pkgs[i].Fset = nil
|
||||
}
|
||||
if ld.requestedMode&NeedTypesInfo == 0 {
|
||||
ld.pkgs[i].TypesInfo = nil
|
||||
}
|
||||
|
@ -981,31 +1033,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// loadRecursive loads the specified package and its dependencies,
|
||||
// recursively, in parallel, in topological order.
|
||||
// It is atomic and idempotent.
|
||||
// Precondition: ld.Mode&NeedTypes.
|
||||
func (ld *loader) loadRecursive(lpkg *loaderPackage) {
|
||||
lpkg.loadOnce.Do(func() {
|
||||
// Load the direct dependencies, in parallel.
|
||||
var wg sync.WaitGroup
|
||||
for _, ipkg := range lpkg.Imports {
|
||||
imp := ld.pkgs[ipkg.ID]
|
||||
wg.Add(1)
|
||||
go func(imp *loaderPackage) {
|
||||
ld.loadRecursive(imp)
|
||||
wg.Done()
|
||||
}(imp)
|
||||
}
|
||||
wg.Wait()
|
||||
ld.loadPackage(lpkg)
|
||||
})
|
||||
}
|
||||
|
||||
// loadPackage loads the specified package.
|
||||
// loadPackage loads/parses/typechecks the specified package.
|
||||
// It must be called only once per Package,
|
||||
// after immediate dependencies are loaded.
|
||||
// Precondition: ld.Mode & NeedTypes.
|
||||
// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
|
||||
func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
if lpkg.PkgPath == "unsafe" {
|
||||
// Fill in the blanks to avoid surprises.
|
||||
|
@ -1041,6 +1072,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(adonovan): this condition looks wrong:
|
||||
// I think it should be lpkg.needtypes && !lpg.needsrc,
|
||||
// so that NeedSyntax without NeedTypes can be satisfied by export data.
|
||||
if !lpkg.needsrc {
|
||||
if err := ld.loadFromExportData(lpkg); err != nil {
|
||||
lpkg.Errors = append(lpkg.Errors, Error{
|
||||
|
@ -1146,7 +1181,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
}
|
||||
|
||||
lpkg.Syntax = files
|
||||
if ld.Config.Mode&NeedTypes == 0 {
|
||||
if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1157,16 +1192,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
return
|
||||
}
|
||||
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Instances: make(map[*ast.Ident]types.Instance),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
// Populate TypesInfo only if needed, as it
|
||||
// causes the type checker to work much harder.
|
||||
if ld.Config.Mode&NeedTypesInfo != 0 {
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Instances: make(map[*ast.Ident]types.Instance),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
FileVersions: make(map[*ast.File]string),
|
||||
}
|
||||
}
|
||||
versions.InitFileVersions(lpkg.TypesInfo)
|
||||
lpkg.TypesSizes = ld.sizes
|
||||
|
||||
importer := importerFunc(func(path string) (*types.Package, error) {
|
||||
|
@ -1219,6 +1258,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
}
|
||||
}
|
||||
|
||||
// Type-checking is CPU intensive.
|
||||
cpuLimit <- unit{} // acquire a token
|
||||
defer func() { <-cpuLimit }() // release a token
|
||||
|
||||
typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
|
||||
lpkg.importErrors = nil // no longer needed
|
||||
|
||||
|
@ -1283,8 +1326,11 @@ type importerFunc func(path string) (*types.Package, error)
|
|||
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
|
||||
|
||||
// We use a counting semaphore to limit
|
||||
// the number of parallel I/O calls per process.
|
||||
var ioLimit = make(chan bool, 20)
|
||||
// the number of parallel I/O calls or CPU threads per process.
|
||||
var (
|
||||
ioLimit = make(chan unit, 20)
|
||||
cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
|
||||
)
|
||||
|
||||
func (ld *loader) parseFile(filename string) (*ast.File, error) {
|
||||
ld.parseCacheMu.Lock()
|
||||
|
@ -1301,20 +1347,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
|
|||
|
||||
var src []byte
|
||||
for f, contents := range ld.Config.Overlay {
|
||||
// TODO(adonovan): Inefficient for large overlays.
|
||||
// Do an exact name-based map lookup
|
||||
// (for nonexistent files) followed by a
|
||||
// FileID-based map lookup (for existing ones).
|
||||
if sameFile(f, filename) {
|
||||
src = contents
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if src == nil {
|
||||
ioLimit <- true // wait
|
||||
ioLimit <- unit{} // acquire a token
|
||||
src, err = os.ReadFile(filename)
|
||||
<-ioLimit // signal
|
||||
<-ioLimit // release a token
|
||||
}
|
||||
if err != nil {
|
||||
v.err = err
|
||||
} else {
|
||||
// Parsing is CPU intensive.
|
||||
cpuLimit <- unit{} // acquire a token
|
||||
v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
|
||||
<-cpuLimit // release a token
|
||||
}
|
||||
|
||||
close(v.ready)
|
||||
|
@ -1329,18 +1383,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
|
|||
// Because files are scanned in parallel, the token.Pos
|
||||
// positions of the resulting ast.Files are not ordered.
|
||||
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
|
||||
var wg sync.WaitGroup
|
||||
n := len(filenames)
|
||||
parsed := make([]*ast.File, n)
|
||||
errors := make([]error, n)
|
||||
for i, file := range filenames {
|
||||
wg.Add(1)
|
||||
go func(i int, filename string) {
|
||||
var (
|
||||
n = len(filenames)
|
||||
parsed = make([]*ast.File, n)
|
||||
errors = make([]error, n)
|
||||
)
|
||||
var g errgroup.Group
|
||||
for i, filename := range filenames {
|
||||
// This creates goroutines unnecessarily in the
|
||||
// cache-hit case, but that case is uncommon.
|
||||
g.Go(func() error {
|
||||
parsed[i], errors[i] = ld.parseFile(filename)
|
||||
wg.Done()
|
||||
}(i, file)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
g.Wait()
|
||||
|
||||
// Eliminate nils, preserving order.
|
||||
var o int
|
||||
|
@ -1499,6 +1556,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
|
|||
// All these things require knowing the import graph.
|
||||
loadMode |= NeedImports
|
||||
}
|
||||
if loadMode&NeedTypes != 0 {
|
||||
// Types require the GoVersion from Module.
|
||||
loadMode |= NeedModule
|
||||
}
|
||||
|
||||
return loadMode
|
||||
}
|
||||
|
@ -1507,4 +1568,4 @@ func usesExportData(cfg *Config) bool {
|
|||
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
|
||||
}
|
||||
|
||||
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
|
||||
type unit struct{}
|
||||
|
|
9
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
9
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
|
@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
|
|||
// PrintErrors returns the number of errors printed.
|
||||
func PrintErrors(pkgs []*Package) int {
|
||||
var n int
|
||||
errModules := make(map[*Module]bool)
|
||||
Visit(pkgs, nil, func(pkg *Package) {
|
||||
for _, err := range pkg.Errors {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
n++
|
||||
}
|
||||
|
||||
// Print pkg.Module.Error once if present.
|
||||
mod := pkg.Module
|
||||
if mod != nil && mod.Error != nil && !errModules[mod] {
|
||||
errModules[mod] = true
|
||||
fmt.Fprintln(os.Stderr, mod.Error.Err)
|
||||
n++
|
||||
}
|
||||
})
|
||||
return n
|
||||
}
|
||||
|
|
186
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
186
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
|
@ -51,7 +51,7 @@ type Path string
|
|||
//
|
||||
// PO package->object Package.Scope.Lookup
|
||||
// OT object->type Object.Type
|
||||
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
|
||||
// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
|
||||
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
|
||||
//
|
||||
// All valid paths start with a package and end at an object
|
||||
|
@ -63,8 +63,8 @@ type Path string
|
|||
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
|
||||
// - The only OT operator is Object.Type,
|
||||
// which we encode as '.' because dot cannot appear in an identifier.
|
||||
// - The TT operators are encoded as [EKPRUTC];
|
||||
// one of these (TypeParam) requires an integer operand,
|
||||
// - The TT operators are encoded as [EKPRUTrCa];
|
||||
// two of these ({,Recv}TypeParams) require an integer operand,
|
||||
// which is encoded as a string of decimal digits.
|
||||
// - The TO operators are encoded as [AFMO];
|
||||
// three of these (At,Field,Method) require an integer operand,
|
||||
|
@ -98,19 +98,21 @@ const (
|
|||
opType = '.' // .Type() (Object)
|
||||
|
||||
// type->type operators
|
||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
||||
opKey = 'K' // .Key() (Map)
|
||||
opParams = 'P' // .Params() (Signature)
|
||||
opResults = 'R' // .Results() (Signature)
|
||||
opUnderlying = 'U' // .Underlying() (Named)
|
||||
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
|
||||
opConstraint = 'C' // .Constraint() (TypeParam)
|
||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
||||
opKey = 'K' // .Key() (Map)
|
||||
opParams = 'P' // .Params() (Signature)
|
||||
opResults = 'R' // .Results() (Signature)
|
||||
opUnderlying = 'U' // .Underlying() (Named)
|
||||
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
|
||||
opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
|
||||
opConstraint = 'C' // .Constraint() (TypeParam)
|
||||
opRhs = 'a' // .Rhs() (Alias)
|
||||
|
||||
// type->object operators
|
||||
opAt = 'A' // .At(i) (Tuple)
|
||||
opField = 'F' // .Field(i) (Struct)
|
||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
||||
opObj = 'O' // .Obj() (Named, TypeParam)
|
||||
opAt = 'A' // .At(i) (Tuple)
|
||||
opField = 'F' // .Field(i) (Struct)
|
||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
||||
opObj = 'O' // .Obj() (Named, TypeParam)
|
||||
)
|
||||
|
||||
// For is equivalent to new(Encoder).For(obj).
|
||||
|
@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
// Reject obviously non-viable cases.
|
||||
switch obj := obj.(type) {
|
||||
case *types.TypeName:
|
||||
if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok {
|
||||
if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
|
||||
// With the exception of type parameters, only package-level type names
|
||||
// have a path.
|
||||
return "", fmt.Errorf("no path for %v", obj)
|
||||
|
@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
path = append(path, opType)
|
||||
|
||||
T := o.Type()
|
||||
|
||||
if tname.IsAlias() {
|
||||
// type alias
|
||||
if r := find(obj, T, path, nil); r != nil {
|
||||
if alias, ok := T.(*types.Alias); ok {
|
||||
if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
} else {
|
||||
if named, _ := T.(*types.Named); named != nil {
|
||||
if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil {
|
||||
// generic named type
|
||||
return Path(r), nil
|
||||
}
|
||||
if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
|
||||
} else if tname.IsAlias() {
|
||||
// legacy alias
|
||||
if r := find(obj, T, path); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
|
||||
} else if named, ok := T.(*types.Named); ok {
|
||||
// defined (named) type
|
||||
if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
|
||||
if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
|
@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
if _, ok := o.(*types.TypeName); !ok {
|
||||
if o.Exported() {
|
||||
// exported non-type (const, var, func)
|
||||
if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
|
||||
if r := find(obj, o.Type(), append(path, opType)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
|
@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
}
|
||||
|
||||
// Inspect declared methods of defined types.
|
||||
if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok {
|
||||
if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
|
||||
path = append(path, opType)
|
||||
// The method index here is always with respect
|
||||
// to the underlying go/types data structures,
|
||||
|
@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
|
|||
if m == obj {
|
||||
return Path(path2), nil // found declared method
|
||||
}
|
||||
if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
|
||||
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
|
||||
return Path(r), nil
|
||||
}
|
||||
}
|
||||
|
@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
|
|||
//
|
||||
// The seen map is used to short circuit cycles through type parameters. If
|
||||
// nil, it will be allocated as necessary.
|
||||
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
|
||||
//
|
||||
// The seenMethods map is used internally to short circuit cycles through
|
||||
// interface methods, such as occur in the following example:
|
||||
//
|
||||
// type I interface { f() interface{I} }
|
||||
//
|
||||
// See golang/go#68046 for details.
|
||||
func find(obj types.Object, T types.Type, path []byte) []byte {
|
||||
return (&finder{obj: obj}).find(T, path)
|
||||
}
|
||||
|
||||
// finder closes over search state for a call to find.
|
||||
type finder struct {
|
||||
obj types.Object // the sought object
|
||||
seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
|
||||
seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
|
||||
}
|
||||
|
||||
func (f *finder) find(T types.Type, path []byte) []byte {
|
||||
switch T := T.(type) {
|
||||
case *aliases.Alias:
|
||||
return find(obj, aliases.Unalias(T), path, seen)
|
||||
case *types.Alias:
|
||||
return f.find(types.Unalias(T), path)
|
||||
case *types.Basic, *types.Named:
|
||||
// Named types belonging to pkg were handled already,
|
||||
// so T must belong to another package. No path.
|
||||
return nil
|
||||
case *types.Pointer:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Slice:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Array:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Chan:
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Map:
|
||||
if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
|
||||
if r := f.find(T.Key(), append(path, opKey)); r != nil {
|
||||
return r
|
||||
}
|
||||
return find(obj, T.Elem(), append(path, opElem), seen)
|
||||
return f.find(T.Elem(), append(path, opElem))
|
||||
case *types.Signature:
|
||||
if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil {
|
||||
if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
|
||||
return r
|
||||
}
|
||||
if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
|
||||
if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
|
||||
return r
|
||||
}
|
||||
return find(obj, T.Results(), append(path, opResults), seen)
|
||||
if r := f.find(T.Params(), append(path, opParams)); r != nil {
|
||||
return r
|
||||
}
|
||||
return f.find(T.Results(), append(path, opResults))
|
||||
case *types.Struct:
|
||||
for i := 0; i < T.NumFields(); i++ {
|
||||
fld := T.Field(i)
|
||||
path2 := appendOpArg(path, opField, i)
|
||||
if fld == obj {
|
||||
if fld == f.obj {
|
||||
return path2 // found field var
|
||||
}
|
||||
if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
|
||||
if r := f.find(fld.Type(), append(path2, opType)); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
|
|||
for i := 0; i < T.Len(); i++ {
|
||||
v := T.At(i)
|
||||
path2 := appendOpArg(path, opAt, i)
|
||||
if v == obj {
|
||||
if v == f.obj {
|
||||
return path2 // found param/result var
|
||||
}
|
||||
if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
|
||||
if r := f.find(v.Type(), append(path2, opType)); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
|
|||
case *types.Interface:
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
m := T.Method(i)
|
||||
if f.seenMethods[m] {
|
||||
return nil
|
||||
}
|
||||
path2 := appendOpArg(path, opMethod, i)
|
||||
if m == obj {
|
||||
if m == f.obj {
|
||||
return path2 // found interface method
|
||||
}
|
||||
if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
|
||||
if f.seenMethods == nil {
|
||||
f.seenMethods = make(map[*types.Func]bool)
|
||||
}
|
||||
f.seenMethods[m] = true
|
||||
if r := f.find(m.Type(), append(path2, opType)); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case *types.TypeParam:
|
||||
name := T.Obj()
|
||||
if name == obj {
|
||||
return append(path, opObj)
|
||||
}
|
||||
if seen[name] {
|
||||
if f.seenTParamNames[name] {
|
||||
return nil
|
||||
}
|
||||
if seen == nil {
|
||||
seen = make(map[*types.TypeName]bool)
|
||||
if name == f.obj {
|
||||
return append(path, opObj)
|
||||
}
|
||||
seen[name] = true
|
||||
if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
|
||||
if f.seenTParamNames == nil {
|
||||
f.seenTParamNames = make(map[*types.TypeName]bool)
|
||||
}
|
||||
f.seenTParamNames[name] = true
|
||||
if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
|
@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
|
|||
panic(T)
|
||||
}
|
||||
|
||||
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
|
||||
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
|
||||
return (&finder{obj: obj}).findTypeParam(list, path, op)
|
||||
}
|
||||
|
||||
func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
|
||||
for i := 0; i < list.Len(); i++ {
|
||||
tparam := list.At(i)
|
||||
path2 := appendOpArg(path, opTypeParam, i)
|
||||
if r := find(obj, tparam, path2, seen); r != nil {
|
||||
path2 := appendOpArg(path, op, i)
|
||||
if r := f.find(tparam, path2); r != nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
@ -580,10 +619,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
code := suffix[0]
|
||||
suffix = suffix[1:]
|
||||
|
||||
// Codes [AFM] have an integer operand.
|
||||
// Codes [AFMTr] have an integer operand.
|
||||
var index int
|
||||
switch code {
|
||||
case opAt, opField, opMethod, opTypeParam:
|
||||
case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
|
||||
rest := strings.TrimLeft(suffix, "0123456789")
|
||||
numerals := suffix[:len(suffix)-len(rest)]
|
||||
suffix = rest
|
||||
|
@ -616,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
|
||||
// Inv: t != nil, obj == nil
|
||||
|
||||
t = aliases.Unalias(t)
|
||||
t = types.Unalias(t)
|
||||
switch code {
|
||||
case opElem:
|
||||
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
|
||||
|
@ -653,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
}
|
||||
t = named.Underlying()
|
||||
|
||||
case opRhs:
|
||||
if alias, ok := t.(*types.Alias); ok {
|
||||
t = aliases.Rhs(alias)
|
||||
} else if false && aliases.Enabled() {
|
||||
// The Enabled check is too expensive, so for now we
|
||||
// simply assume that aliases are not enabled.
|
||||
// TODO(adonovan): replace with "if true {" when go1.24 is assured.
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
|
||||
}
|
||||
|
||||
case opTypeParam:
|
||||
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
|
||||
if !ok {
|
||||
|
@ -664,6 +713,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
}
|
||||
t = tparams.At(index)
|
||||
|
||||
case opRecvTypeParam:
|
||||
sig, ok := t.(*types.Signature) // Signature
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
||||
}
|
||||
rtparams := sig.RecvTypeParams()
|
||||
if n := rtparams.Len(); index >= n {
|
||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
||||
}
|
||||
t = rtparams.At(index)
|
||||
|
||||
case opConstraint:
|
||||
tparam, ok := t.(*types.TypeParam)
|
||||
if !ok {
|
||||
|
@ -725,6 +785,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|||
}
|
||||
}
|
||||
|
||||
if obj == nil {
|
||||
panic(p) // path does not end in an object-valued operator
|
||||
}
|
||||
|
||||
if obj.Pkg() != pkg {
|
||||
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
|
||||
}
|
||||
|
|
68
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
Normal file
68
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
// Callee returns the named target of a function call, if any:
|
||||
// a function, method, builtin, or variable.
|
||||
//
|
||||
// Functions and methods may potentially have type parameters.
|
||||
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
|
||||
fun := ast.Unparen(call.Fun)
|
||||
|
||||
// Look through type instantiation if necessary.
|
||||
isInstance := false
|
||||
switch fun.(type) {
|
||||
case *ast.IndexExpr, *ast.IndexListExpr:
|
||||
// When extracting the callee from an *IndexExpr, we need to check that
|
||||
// it is a *types.Func and not a *types.Var.
|
||||
// Example: Don't match a slice m within the expression `m[0]()`.
|
||||
isInstance = true
|
||||
fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
|
||||
}
|
||||
|
||||
var obj types.Object
|
||||
switch fun := fun.(type) {
|
||||
case *ast.Ident:
|
||||
obj = info.Uses[fun] // type, var, builtin, or declared func
|
||||
case *ast.SelectorExpr:
|
||||
if sel, ok := info.Selections[fun]; ok {
|
||||
obj = sel.Obj() // method or field
|
||||
} else {
|
||||
obj = info.Uses[fun.Sel] // qualified identifier?
|
||||
}
|
||||
}
|
||||
if _, ok := obj.(*types.TypeName); ok {
|
||||
return nil // T(x) is a conversion, not a call
|
||||
}
|
||||
// A Func is required to match instantiations.
|
||||
if _, ok := obj.(*types.Func); isInstance && !ok {
|
||||
return nil // Was not a Func.
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// StaticCallee returns the target (function or method) of a static function
|
||||
// call, if any. It returns nil for calls to builtins.
|
||||
//
|
||||
// Note: for calls of instantiated functions and methods, StaticCallee returns
|
||||
// the corresponding generic function or method on the generic type.
|
||||
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
|
||||
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func interfaceMethod(f *types.Func) bool {
|
||||
recv := f.Type().(*types.Signature).Recv()
|
||||
return recv != nil && types.IsInterface(recv.Type())
|
||||
}
|
30
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
Normal file
30
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import "go/types"
|
||||
|
||||
// Dependencies returns all dependencies of the specified packages.
|
||||
//
|
||||
// Dependent packages appear in topological order: if package P imports
|
||||
// package Q, Q appears earlier than P in the result.
|
||||
// The algorithm follows import statements in the order they
|
||||
// appear in the source code, so the result is a total order.
|
||||
func Dependencies(pkgs ...*types.Package) []*types.Package {
|
||||
var result []*types.Package
|
||||
seen := make(map[*types.Package]bool)
|
||||
var visit func(pkgs []*types.Package)
|
||||
visit = func(pkgs []*types.Package) {
|
||||
for _, p := range pkgs {
|
||||
if !seen[p] {
|
||||
seen[p] = true
|
||||
visit(p.Imports())
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
visit(pkgs)
|
||||
return result
|
||||
}
|
470
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
Normal file
470
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
Normal file
|
@ -0,0 +1,470 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeutil defines various utilities for types, such as [Map],
|
||||
// a hash table that maps [types.Type] to any value.
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"hash/maphash"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
// Map is a hash-table-based mapping from types (types.Type) to
|
||||
// arbitrary values. The concrete types that implement
|
||||
// the Type interface are pointers. Since they are not canonicalized,
|
||||
// == cannot be used to check for equivalence, and thus we cannot
|
||||
// simply use a Go map.
|
||||
//
|
||||
// Just as with map[K]V, a nil *Map is a valid empty map.
|
||||
//
|
||||
// Read-only map operations ([Map.At], [Map.Len], and so on) may
|
||||
// safely be called concurrently.
|
||||
//
|
||||
// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
|
||||
// and 69559, if the latter proposals for a generic hash-map type and
|
||||
// a types.Hash function are accepted.
|
||||
type Map struct {
|
||||
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
|
||||
length int // number of map entries
|
||||
}
|
||||
|
||||
// entry is an entry (key/value association) in a hash bucket.
|
||||
type entry struct {
|
||||
key types.Type
|
||||
value any
|
||||
}
|
||||
|
||||
// SetHasher has no effect.
|
||||
//
|
||||
// It is a relic of an optimization that is no longer profitable. Do
|
||||
// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
|
||||
func (m *Map) SetHasher(Hasher) {}
|
||||
|
||||
// Delete removes the entry with the given key, if any.
|
||||
// It returns true if the entry was found.
|
||||
func (m *Map) Delete(key types.Type) bool {
|
||||
if m != nil && m.table != nil {
|
||||
hash := hash(key)
|
||||
bucket := m.table[hash]
|
||||
for i, e := range bucket {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
// We can't compact the bucket as it
|
||||
// would disturb iterators.
|
||||
bucket[i] = entry{}
|
||||
m.length--
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// At returns the map entry for the given key.
|
||||
// The result is nil if the entry is not present.
|
||||
func (m *Map) At(key types.Type) any {
|
||||
if m != nil && m.table != nil {
|
||||
for _, e := range m.table[hash(key)] {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
return e.value
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the map entry for key to val,
|
||||
// and returns the previous entry, if any.
|
||||
func (m *Map) Set(key types.Type, value any) (prev any) {
|
||||
if m.table != nil {
|
||||
hash := hash(key)
|
||||
bucket := m.table[hash]
|
||||
var hole *entry
|
||||
for i, e := range bucket {
|
||||
if e.key == nil {
|
||||
hole = &bucket[i]
|
||||
} else if types.Identical(key, e.key) {
|
||||
prev = e.value
|
||||
bucket[i].value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if hole != nil {
|
||||
*hole = entry{key, value} // overwrite deleted entry
|
||||
} else {
|
||||
m.table[hash] = append(bucket, entry{key, value})
|
||||
}
|
||||
} else {
|
||||
hash := hash(key)
|
||||
m.table = map[uint32][]entry{hash: {entry{key, value}}}
|
||||
}
|
||||
|
||||
m.length++
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of map entries.
|
||||
func (m *Map) Len() int {
|
||||
if m != nil {
|
||||
return m.length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Iterate calls function f on each entry in the map in unspecified order.
|
||||
//
|
||||
// If f should mutate the map, Iterate provides the same guarantees as
|
||||
// Go maps: if f deletes a map entry that Iterate has not yet reached,
|
||||
// f will not be invoked for it, but if f inserts a map entry that
|
||||
// Iterate has not yet reached, whether or not f will be invoked for
|
||||
// it is unspecified.
|
||||
func (m *Map) Iterate(f func(key types.Type, value any)) {
|
||||
if m != nil {
|
||||
for _, bucket := range m.table {
|
||||
for _, e := range bucket {
|
||||
if e.key != nil {
|
||||
f(e.key, e.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Keys returns a new slice containing the set of map keys.
|
||||
// The order is unspecified.
|
||||
func (m *Map) Keys() []types.Type {
|
||||
keys := make([]types.Type, 0, m.Len())
|
||||
m.Iterate(func(key types.Type, _ any) {
|
||||
keys = append(keys, key)
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *Map) toString(values bool) string {
|
||||
if m == nil {
|
||||
return "{}"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "{")
|
||||
sep := ""
|
||||
m.Iterate(func(key types.Type, value any) {
|
||||
fmt.Fprint(&buf, sep)
|
||||
sep = ", "
|
||||
fmt.Fprint(&buf, key)
|
||||
if values {
|
||||
fmt.Fprintf(&buf, ": %q", value)
|
||||
}
|
||||
})
|
||||
fmt.Fprint(&buf, "}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// String returns a string representation of the map's entries.
|
||||
// Values are printed using fmt.Sprintf("%v", v).
|
||||
// Order is unspecified.
|
||||
func (m *Map) String() string {
|
||||
return m.toString(true)
|
||||
}
|
||||
|
||||
// KeysString returns a string representation of the map's key set.
|
||||
// Order is unspecified.
|
||||
func (m *Map) KeysString() string {
|
||||
return m.toString(false)
|
||||
}
|
||||
|
||||
// -- Hasher --
|
||||
|
||||
// hash returns the hash of type t.
|
||||
// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
|
||||
func hash(t types.Type) uint32 {
|
||||
return theHasher.Hash(t)
|
||||
}
|
||||
|
||||
// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
|
||||
// Hashers are stateless, and all are equivalent.
|
||||
type Hasher struct{}
|
||||
|
||||
var theHasher Hasher
|
||||
|
||||
// MakeHasher returns Hasher{}.
|
||||
// Hashers are stateless; all are equivalent.
|
||||
func MakeHasher() Hasher { return theHasher }
|
||||
|
||||
// Hash computes a hash value for the given type t such that
|
||||
// Identical(t, t') => Hash(t) == Hash(t').
|
||||
func (h Hasher) Hash(t types.Type) uint32 {
|
||||
return hasher{inGenericSig: false}.hash(t)
|
||||
}
|
||||
|
||||
// hasher holds the state of a single Hash traversal: whether we are
|
||||
// inside the signature of a generic function; this is used to
|
||||
// optimize [hasher.hashTypeParam].
|
||||
type hasher struct{ inGenericSig bool }
|
||||
|
||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
var h uint32
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hash computes the hash of t.
|
||||
func (h hasher) hash(t types.Type) uint32 {
|
||||
// See Identical for rationale.
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
return uint32(t.Kind())
|
||||
|
||||
case *types.Alias:
|
||||
return h.hash(types.Unalias(t))
|
||||
|
||||
case *types.Array:
|
||||
return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
return 9049 + 2*h.hash(t.Elem())
|
||||
|
||||
case *types.Struct:
|
||||
var hash uint32 = 9059
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous() {
|
||||
hash += 8861
|
||||
}
|
||||
hash += hashString(t.Tag(i))
|
||||
hash += hashString(f.Name()) // (ignore f.Pkg)
|
||||
hash += h.hash(f.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Pointer:
|
||||
return 9067 + 2*h.hash(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 9091
|
||||
if t.Variadic() {
|
||||
hash *= 8863
|
||||
}
|
||||
|
||||
tparams := t.TypeParams()
|
||||
if n := tparams.Len(); n > 0 {
|
||||
h.inGenericSig = true // affects constraints, params, and results
|
||||
|
||||
for i := range n {
|
||||
tparam := tparams.At(i)
|
||||
hash += 7 * h.hash(tparam.Constraint())
|
||||
}
|
||||
}
|
||||
|
||||
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
|
||||
|
||||
case *types.Union:
|
||||
return h.hashUnion(t)
|
||||
|
||||
case *types.Interface:
|
||||
// Interfaces are identical if they have the same set of methods, with
|
||||
// identical names and types, and they have the same set of type
|
||||
// restrictions. See go/types.identical for more details.
|
||||
var hash uint32 = 9103
|
||||
|
||||
// Hash methods.
|
||||
for i, n := 0, t.NumMethods(); i < n; i++ {
|
||||
// Method order is not significant.
|
||||
// Ignore m.Pkg().
|
||||
m := t.Method(i)
|
||||
// Use shallow hash on method signature to
|
||||
// avoid anonymous interface cycles.
|
||||
hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
|
||||
}
|
||||
|
||||
// Hash type restrictions.
|
||||
terms, err := typeparams.InterfaceTermSet(t)
|
||||
// if err != nil t has invalid type restrictions.
|
||||
if err == nil {
|
||||
hash += h.hashTermSet(terms)
|
||||
}
|
||||
|
||||
return hash
|
||||
|
||||
case *types.Map:
|
||||
return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
|
||||
|
||||
case *types.Named:
|
||||
hash := h.hashTypeName(t.Obj())
|
||||
targs := t.TypeArgs()
|
||||
for i := 0; i < targs.Len(); i++ {
|
||||
targ := targs.At(i)
|
||||
hash += 2 * h.hash(targ)
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.TypeParam:
|
||||
return h.hashTypeParam(t)
|
||||
|
||||
case *types.Tuple:
|
||||
return h.hashTuple(t)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("%T: %v", t, t))
|
||||
}
|
||||
|
||||
func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
|
||||
// See go/types.identicalTypes for rationale.
|
||||
n := tuple.Len()
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := range n {
|
||||
hash += 3 * h.hash(tuple.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
func (h hasher) hashUnion(t *types.Union) uint32 {
|
||||
// Hash type restrictions.
|
||||
terms, err := typeparams.UnionTermSet(t)
|
||||
// if err != nil t has invalid type restrictions. Fall back on a non-zero
|
||||
// hash.
|
||||
if err != nil {
|
||||
return 9151
|
||||
}
|
||||
return h.hashTermSet(terms)
|
||||
}
|
||||
|
||||
func (h hasher) hashTermSet(terms []*types.Term) uint32 {
|
||||
hash := 9157 + 2*uint32(len(terms))
|
||||
for _, term := range terms {
|
||||
// term order is not significant.
|
||||
termHash := h.hash(term.Type())
|
||||
if term.Tilde() {
|
||||
termHash *= 9161
|
||||
}
|
||||
hash += 3 * termHash
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// hashTypeParam returns the hash of a type parameter.
|
||||
func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
|
||||
// Within the signature of a generic function, TypeParams are
|
||||
// identical if they have the same index and constraint, so we
|
||||
// hash them based on index.
|
||||
//
|
||||
// When we are outside a generic function, free TypeParams are
|
||||
// identical iff they are the same object, so we can use a
|
||||
// more discriminating hash consistent with object identity.
|
||||
// This optimization saves [Map] about 4% when hashing all the
|
||||
// types.Info.Types in the forward closure of net/http.
|
||||
if !h.inGenericSig {
|
||||
// Optimization: outside a generic function signature,
|
||||
// use a more discrimating hash consistent with object identity.
|
||||
return h.hashTypeName(t.Obj())
|
||||
}
|
||||
return 9173 + 3*uint32(t.Index())
|
||||
}
|
||||
|
||||
var theSeed = maphash.MakeSeed()
|
||||
|
||||
// hashTypeName hashes the pointer of tname.
|
||||
func (hasher) hashTypeName(tname *types.TypeName) uint32 {
|
||||
// Since types.Identical uses == to compare TypeNames,
|
||||
// the Hash function uses maphash.Comparable.
|
||||
// TODO(adonovan): or will, when it becomes available in go1.24.
|
||||
// In the meantime we use the pointer's numeric value.
|
||||
//
|
||||
// hash := maphash.Comparable(theSeed, tname)
|
||||
//
|
||||
// (Another approach would be to hash the name and package
|
||||
// path, and whether or not it is a package-level typename. It
|
||||
// is rare for a package to define multiple local types with
|
||||
// the same name.)
|
||||
hash := uintptr(unsafe.Pointer(tname))
|
||||
return uint32(hash ^ (hash >> 32))
|
||||
}
|
||||
|
||||
// shallowHash computes a hash of t without looking at any of its
|
||||
// element Types, to avoid potential anonymous cycles in the types of
|
||||
// interface methods.
|
||||
//
|
||||
// When an unnamed non-empty interface type appears anywhere among the
|
||||
// arguments or results of an interface method, there is a potential
|
||||
// for endless recursion. Consider:
|
||||
//
|
||||
// type X interface { m() []*interface { X } }
|
||||
//
|
||||
// The problem is that the Methods of the interface in m's result type
|
||||
// include m itself; there is no mention of the named type X that
|
||||
// might help us break the cycle.
|
||||
// (See comment in go/types.identical, case *Interface, for more.)
|
||||
func (h hasher) shallowHash(t types.Type) uint32 {
|
||||
// t is the type of an interface method (Signature),
|
||||
// its params or results (Tuples), or their immediate
|
||||
// elements (mostly Slice, Pointer, Basic, Named),
|
||||
// so there's no need to optimize anything else.
|
||||
switch t := t.(type) {
|
||||
case *types.Alias:
|
||||
return h.shallowHash(types.Unalias(t))
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 604171
|
||||
if t.Variadic() {
|
||||
hash *= 971767
|
||||
}
|
||||
// The Signature/Tuple recursion is always finite
|
||||
// and invariably shallow.
|
||||
return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
|
||||
|
||||
case *types.Tuple:
|
||||
n := t.Len()
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := range n {
|
||||
hash += 53471161 * h.shallowHash(t.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Basic:
|
||||
return 45212177 * uint32(t.Kind())
|
||||
|
||||
case *types.Array:
|
||||
return 1524181 + 2*uint32(t.Len())
|
||||
|
||||
case *types.Slice:
|
||||
return 2690201
|
||||
|
||||
case *types.Struct:
|
||||
return 3326489
|
||||
|
||||
case *types.Pointer:
|
||||
return 4393139
|
||||
|
||||
case *types.Union:
|
||||
return 562448657
|
||||
|
||||
case *types.Interface:
|
||||
return 2124679 // no recursion here
|
||||
|
||||
case *types.Map:
|
||||
return 9109
|
||||
|
||||
case *types.Chan:
|
||||
return 9127
|
||||
|
||||
case *types.Named:
|
||||
return h.hashTypeName(t.Obj())
|
||||
|
||||
case *types.TypeParam:
|
||||
return h.hashTypeParam(t)
|
||||
}
|
||||
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
|
||||
}
|
71
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
Normal file
71
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements a cache of method sets.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A MethodSetCache records the method set of each type T for which
|
||||
// MethodSet(T) is called so that repeat queries are fast.
|
||||
// The zero value is a ready-to-use cache instance.
|
||||
type MethodSetCache struct {
|
||||
mu sync.Mutex
|
||||
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
|
||||
others map[types.Type]*types.MethodSet // all other types
|
||||
}
|
||||
|
||||
// MethodSet returns the method set of type T. It is thread-safe.
|
||||
//
|
||||
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
|
||||
// Utility functions can thus expose an optional *MethodSetCache
|
||||
// parameter to clients that care about performance.
|
||||
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
|
||||
if cache == nil {
|
||||
return types.NewMethodSet(T)
|
||||
}
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
switch T := types.Unalias(T).(type) {
|
||||
case *types.Named:
|
||||
return cache.lookupNamed(T).value
|
||||
|
||||
case *types.Pointer:
|
||||
if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
|
||||
return cache.lookupNamed(N).pointer
|
||||
}
|
||||
}
|
||||
|
||||
// all other types
|
||||
// (The map uses pointer equivalence, not type identity.)
|
||||
mset := cache.others[T]
|
||||
if mset == nil {
|
||||
mset = types.NewMethodSet(T)
|
||||
if cache.others == nil {
|
||||
cache.others = make(map[types.Type]*types.MethodSet)
|
||||
}
|
||||
cache.others[T] = mset
|
||||
}
|
||||
return mset
|
||||
}
|
||||
|
||||
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
|
||||
if cache.named == nil {
|
||||
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
|
||||
}
|
||||
// Avoid recomputing mset(*T) for each distinct Pointer
|
||||
// instance whose underlying type is a named type.
|
||||
msets, ok := cache.named[named]
|
||||
if !ok {
|
||||
msets.value = types.NewMethodSet(named)
|
||||
msets.pointer = types.NewMethodSet(types.NewPointer(named))
|
||||
cache.named[named] = msets
|
||||
}
|
||||
return msets
|
||||
}
|
53
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
Normal file
53
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
// This file defines utilities for user interfaces that display types.
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// IntuitiveMethodSet returns the intuitive method set of a type T,
|
||||
// which is the set of methods you can call on an addressable value of
|
||||
// that type.
|
||||
//
|
||||
// The result always contains MethodSet(T), and is exactly MethodSet(T)
|
||||
// for interface types and for pointer-to-concrete types.
|
||||
// For all other concrete types T, the result additionally
|
||||
// contains each method belonging to *T if there is no identically
|
||||
// named method on T itself.
|
||||
//
|
||||
// This corresponds to user intuition about method sets;
|
||||
// this function is intended only for user interfaces.
|
||||
//
|
||||
// The order of the result is as for types.MethodSet(T).
|
||||
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
|
||||
isPointerToConcrete := func(T types.Type) bool {
|
||||
ptr, ok := types.Unalias(T).(*types.Pointer)
|
||||
return ok && !types.IsInterface(ptr.Elem())
|
||||
}
|
||||
|
||||
var result []*types.Selection
|
||||
mset := msets.MethodSet(T)
|
||||
if types.IsInterface(T) || isPointerToConcrete(T) {
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
result = append(result, mset.At(i))
|
||||
}
|
||||
} else {
|
||||
// T is some other concrete type.
|
||||
// Report methods of T and *T, preferring those of T.
|
||||
pmset := msets.MethodSet(types.NewPointer(T))
|
||||
for i, n := 0, pmset.Len(); i < n; i++ {
|
||||
meth := pmset.At(i)
|
||||
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
|
||||
meth = m
|
||||
}
|
||||
result = append(result, meth)
|
||||
}
|
||||
|
||||
}
|
||||
return result
|
||||
}
|
10
vendor/golang.org/x/tools/internal/aliases/aliases.go
generated
vendored
10
vendor/golang.org/x/tools/internal/aliases/aliases.go
generated
vendored
|
@ -22,11 +22,17 @@ import (
|
|||
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
|
||||
// function is expensive and should be called once per task (e.g.
|
||||
// package import), not once per call to NewAlias.
|
||||
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
|
||||
//
|
||||
// Precondition: enabled || len(tparams)==0.
|
||||
// If materialized aliases are disabled, there must not be any type parameters.
|
||||
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
|
||||
if enabled {
|
||||
tname := types.NewTypeName(pos, pkg, name, nil)
|
||||
newAlias(tname, rhs)
|
||||
SetTypeParams(types.NewAlias(tname, rhs), tparams)
|
||||
return tname
|
||||
}
|
||||
if len(tparams) > 0 {
|
||||
panic("cannot create an alias with type parameters when gotypesalias is not enabled")
|
||||
}
|
||||
return types.NewTypeName(pos, pkg, name, rhs)
|
||||
}
|
||||
|
|
31
vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
generated
vendored
31
vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.22
|
||||
// +build !go1.22
|
||||
|
||||
package aliases
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// Alias is a placeholder for a go/types.Alias for <=1.21.
|
||||
// It will never be created by go/types.
|
||||
type Alias struct{}
|
||||
|
||||
func (*Alias) String() string { panic("unreachable") }
|
||||
func (*Alias) Underlying() types.Type { panic("unreachable") }
|
||||
func (*Alias) Obj() *types.TypeName { panic("unreachable") }
|
||||
func Rhs(alias *Alias) types.Type { panic("unreachable") }
|
||||
|
||||
// Unalias returns the type t for go <=1.21.
|
||||
func Unalias(t types.Type) types.Type { return t }
|
||||
|
||||
func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
|
||||
|
||||
// Enabled reports whether [NewAlias] should create [types.Alias] types.
|
||||
//
|
||||
// Before go1.22, this function always returns false.
|
||||
func Enabled() bool { return false }
|
55
vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
generated
vendored
55
vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
generated
vendored
|
@ -2,9 +2,6 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.22
|
||||
// +build go1.22
|
||||
|
||||
package aliases
|
||||
|
||||
import (
|
||||
|
@ -14,31 +11,51 @@ import (
|
|||
"go/types"
|
||||
)
|
||||
|
||||
// Alias is an alias of types.Alias.
|
||||
type Alias = types.Alias
|
||||
|
||||
// Rhs returns the type on the right-hand side of the alias declaration.
|
||||
func Rhs(alias *Alias) types.Type {
|
||||
func Rhs(alias *types.Alias) types.Type {
|
||||
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
|
||||
return alias.Rhs() // go1.23+
|
||||
}
|
||||
|
||||
// go1.22's Alias didn't have the Rhs method,
|
||||
// so Unalias is the best we can do.
|
||||
return Unalias(alias)
|
||||
return types.Unalias(alias)
|
||||
}
|
||||
|
||||
// Unalias is a wrapper of types.Unalias.
|
||||
func Unalias(t types.Type) types.Type { return types.Unalias(t) }
|
||||
// TypeParams returns the type parameter list of the alias.
|
||||
func TypeParams(alias *types.Alias) *types.TypeParamList {
|
||||
if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
|
||||
return alias.TypeParams() // go1.23+
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newAlias is an internal alias around types.NewAlias.
|
||||
// Direct usage is discouraged as the moment.
|
||||
// Try to use NewAlias instead.
|
||||
func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
|
||||
a := types.NewAlias(tname, rhs)
|
||||
// TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
|
||||
Unalias(a)
|
||||
return a
|
||||
// SetTypeParams sets the type parameters of the alias type.
|
||||
func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
|
||||
if alias, ok := any(alias).(interface {
|
||||
SetTypeParams(tparams []*types.TypeParam)
|
||||
}); ok {
|
||||
alias.SetTypeParams(tparams) // go1.23+
|
||||
} else if len(tparams) > 0 {
|
||||
panic("cannot set type parameters of an Alias type in go1.22")
|
||||
}
|
||||
}
|
||||
|
||||
// TypeArgs returns the type arguments used to instantiate the Alias type.
|
||||
func TypeArgs(alias *types.Alias) *types.TypeList {
|
||||
if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
|
||||
return alias.TypeArgs() // go1.23+
|
||||
}
|
||||
return nil // empty (go1.22)
|
||||
}
|
||||
|
||||
// Origin returns the generic Alias type of which alias is an instance.
|
||||
// If alias is not an instance of a generic alias, Origin returns alias.
|
||||
func Origin(alias *types.Alias) *types.Alias {
|
||||
if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
|
||||
return alias.Origin() // go1.23+
|
||||
}
|
||||
return alias // not an instance of a generic alias (go1.22)
|
||||
}
|
||||
|
||||
// Enabled reports whether [NewAlias] should create [types.Alias] types.
|
||||
|
@ -56,7 +73,7 @@ func Enabled() bool {
|
|||
// many tests. Therefore any attempt to cache the result
|
||||
// is just incorrect.
|
||||
fset := token.NewFileSet()
|
||||
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
|
||||
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
|
||||
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
|
||||
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
|
||||
return enabled
|
||||
|
|
295
vendor/golang.org/x/tools/internal/astutil/edge/edge.go
generated
vendored
Normal file
295
vendor/golang.org/x/tools/internal/astutil/edge/edge.go
generated
vendored
Normal file
|
@ -0,0 +1,295 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package edge defines identifiers for each field of an ast.Node
|
||||
// struct type that refers to another Node.
|
||||
package edge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A Kind describes a field of an ast.Node struct.
|
||||
type Kind uint8
|
||||
|
||||
// String returns a description of the edge kind.
|
||||
func (k Kind) String() string {
|
||||
if k == Invalid {
|
||||
return "<invalid>"
|
||||
}
|
||||
info := fieldInfos[k]
|
||||
return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name)
|
||||
}
|
||||
|
||||
// NodeType returns the pointer-to-struct type of the ast.Node implementation.
|
||||
func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType }
|
||||
|
||||
// FieldName returns the name of the field.
|
||||
func (k Kind) FieldName() string { return fieldInfos[k].name }
|
||||
|
||||
// FieldType returns the declared type of the field.
|
||||
func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType }
|
||||
|
||||
// Get returns the direct child of n identified by (k, idx).
|
||||
// n's type must match k.NodeType().
|
||||
// idx must be a valid slice index, or -1 for a non-slice.
|
||||
func (k Kind) Get(n ast.Node, idx int) ast.Node {
|
||||
if k.NodeType() != reflect.TypeOf(n) {
|
||||
panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n))
|
||||
}
|
||||
v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index)
|
||||
if idx != -1 {
|
||||
v = v.Index(idx) // asserts valid index
|
||||
} else {
|
||||
// (The type assertion below asserts that v is not a slice.)
|
||||
}
|
||||
return v.Interface().(ast.Node) // may be nil
|
||||
}
|
||||
|
||||
const (
|
||||
Invalid Kind = iota // for nodes at the root of the traversal
|
||||
|
||||
// Kinds are sorted alphabetically.
|
||||
// Numbering is not stable.
|
||||
// Each is named Type_Field, where Type is the
|
||||
// ast.Node struct type and Field is the name of the field
|
||||
|
||||
ArrayType_Elt
|
||||
ArrayType_Len
|
||||
AssignStmt_Lhs
|
||||
AssignStmt_Rhs
|
||||
BinaryExpr_X
|
||||
BinaryExpr_Y
|
||||
BlockStmt_List
|
||||
BranchStmt_Label
|
||||
CallExpr_Args
|
||||
CallExpr_Fun
|
||||
CaseClause_Body
|
||||
CaseClause_List
|
||||
ChanType_Value
|
||||
CommClause_Body
|
||||
CommClause_Comm
|
||||
CommentGroup_List
|
||||
CompositeLit_Elts
|
||||
CompositeLit_Type
|
||||
DeclStmt_Decl
|
||||
DeferStmt_Call
|
||||
Ellipsis_Elt
|
||||
ExprStmt_X
|
||||
FieldList_List
|
||||
Field_Comment
|
||||
Field_Doc
|
||||
Field_Names
|
||||
Field_Tag
|
||||
Field_Type
|
||||
File_Decls
|
||||
File_Doc
|
||||
File_Name
|
||||
ForStmt_Body
|
||||
ForStmt_Cond
|
||||
ForStmt_Init
|
||||
ForStmt_Post
|
||||
FuncDecl_Body
|
||||
FuncDecl_Doc
|
||||
FuncDecl_Name
|
||||
FuncDecl_Recv
|
||||
FuncDecl_Type
|
||||
FuncLit_Body
|
||||
FuncLit_Type
|
||||
FuncType_Params
|
||||
FuncType_Results
|
||||
FuncType_TypeParams
|
||||
GenDecl_Doc
|
||||
GenDecl_Specs
|
||||
GoStmt_Call
|
||||
IfStmt_Body
|
||||
IfStmt_Cond
|
||||
IfStmt_Else
|
||||
IfStmt_Init
|
||||
ImportSpec_Comment
|
||||
ImportSpec_Doc
|
||||
ImportSpec_Name
|
||||
ImportSpec_Path
|
||||
IncDecStmt_X
|
||||
IndexExpr_Index
|
||||
IndexExpr_X
|
||||
IndexListExpr_Indices
|
||||
IndexListExpr_X
|
||||
InterfaceType_Methods
|
||||
KeyValueExpr_Key
|
||||
KeyValueExpr_Value
|
||||
LabeledStmt_Label
|
||||
LabeledStmt_Stmt
|
||||
MapType_Key
|
||||
MapType_Value
|
||||
ParenExpr_X
|
||||
RangeStmt_Body
|
||||
RangeStmt_Key
|
||||
RangeStmt_Value
|
||||
RangeStmt_X
|
||||
ReturnStmt_Results
|
||||
SelectStmt_Body
|
||||
SelectorExpr_Sel
|
||||
SelectorExpr_X
|
||||
SendStmt_Chan
|
||||
SendStmt_Value
|
||||
SliceExpr_High
|
||||
SliceExpr_Low
|
||||
SliceExpr_Max
|
||||
SliceExpr_X
|
||||
StarExpr_X
|
||||
StructType_Fields
|
||||
SwitchStmt_Body
|
||||
SwitchStmt_Init
|
||||
SwitchStmt_Tag
|
||||
TypeAssertExpr_Type
|
||||
TypeAssertExpr_X
|
||||
TypeSpec_Comment
|
||||
TypeSpec_Doc
|
||||
TypeSpec_Name
|
||||
TypeSpec_Type
|
||||
TypeSpec_TypeParams
|
||||
TypeSwitchStmt_Assign
|
||||
TypeSwitchStmt_Body
|
||||
TypeSwitchStmt_Init
|
||||
UnaryExpr_X
|
||||
ValueSpec_Comment
|
||||
ValueSpec_Doc
|
||||
ValueSpec_Names
|
||||
ValueSpec_Type
|
||||
ValueSpec_Values
|
||||
|
||||
maxKind
|
||||
)
|
||||
|
||||
// Assert that the encoding fits in 7 bits,
|
||||
// as the inspector relies on this.
|
||||
// (We are currently at 104.)
|
||||
var _ = [1 << 7]struct{}{}[maxKind]
|
||||
|
||||
type fieldInfo struct {
|
||||
nodeType reflect.Type // pointer-to-struct type of ast.Node implementation
|
||||
name string
|
||||
index int
|
||||
fieldType reflect.Type
|
||||
}
|
||||
|
||||
func info[N ast.Node](fieldName string) fieldInfo {
|
||||
nodePtrType := reflect.TypeFor[N]()
|
||||
f, ok := nodePtrType.Elem().FieldByName(fieldName)
|
||||
if !ok {
|
||||
panic(fieldName)
|
||||
}
|
||||
return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type}
|
||||
}
|
||||
|
||||
var fieldInfos = [...]fieldInfo{
|
||||
Invalid: {},
|
||||
ArrayType_Elt: info[*ast.ArrayType]("Elt"),
|
||||
ArrayType_Len: info[*ast.ArrayType]("Len"),
|
||||
AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"),
|
||||
AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"),
|
||||
BinaryExpr_X: info[*ast.BinaryExpr]("X"),
|
||||
BinaryExpr_Y: info[*ast.BinaryExpr]("Y"),
|
||||
BlockStmt_List: info[*ast.BlockStmt]("List"),
|
||||
BranchStmt_Label: info[*ast.BranchStmt]("Label"),
|
||||
CallExpr_Args: info[*ast.CallExpr]("Args"),
|
||||
CallExpr_Fun: info[*ast.CallExpr]("Fun"),
|
||||
CaseClause_Body: info[*ast.CaseClause]("Body"),
|
||||
CaseClause_List: info[*ast.CaseClause]("List"),
|
||||
ChanType_Value: info[*ast.ChanType]("Value"),
|
||||
CommClause_Body: info[*ast.CommClause]("Body"),
|
||||
CommClause_Comm: info[*ast.CommClause]("Comm"),
|
||||
CommentGroup_List: info[*ast.CommentGroup]("List"),
|
||||
CompositeLit_Elts: info[*ast.CompositeLit]("Elts"),
|
||||
CompositeLit_Type: info[*ast.CompositeLit]("Type"),
|
||||
DeclStmt_Decl: info[*ast.DeclStmt]("Decl"),
|
||||
DeferStmt_Call: info[*ast.DeferStmt]("Call"),
|
||||
Ellipsis_Elt: info[*ast.Ellipsis]("Elt"),
|
||||
ExprStmt_X: info[*ast.ExprStmt]("X"),
|
||||
FieldList_List: info[*ast.FieldList]("List"),
|
||||
Field_Comment: info[*ast.Field]("Comment"),
|
||||
Field_Doc: info[*ast.Field]("Doc"),
|
||||
Field_Names: info[*ast.Field]("Names"),
|
||||
Field_Tag: info[*ast.Field]("Tag"),
|
||||
Field_Type: info[*ast.Field]("Type"),
|
||||
File_Decls: info[*ast.File]("Decls"),
|
||||
File_Doc: info[*ast.File]("Doc"),
|
||||
File_Name: info[*ast.File]("Name"),
|
||||
ForStmt_Body: info[*ast.ForStmt]("Body"),
|
||||
ForStmt_Cond: info[*ast.ForStmt]("Cond"),
|
||||
ForStmt_Init: info[*ast.ForStmt]("Init"),
|
||||
ForStmt_Post: info[*ast.ForStmt]("Post"),
|
||||
FuncDecl_Body: info[*ast.FuncDecl]("Body"),
|
||||
FuncDecl_Doc: info[*ast.FuncDecl]("Doc"),
|
||||
FuncDecl_Name: info[*ast.FuncDecl]("Name"),
|
||||
FuncDecl_Recv: info[*ast.FuncDecl]("Recv"),
|
||||
FuncDecl_Type: info[*ast.FuncDecl]("Type"),
|
||||
FuncLit_Body: info[*ast.FuncLit]("Body"),
|
||||
FuncLit_Type: info[*ast.FuncLit]("Type"),
|
||||
FuncType_Params: info[*ast.FuncType]("Params"),
|
||||
FuncType_Results: info[*ast.FuncType]("Results"),
|
||||
FuncType_TypeParams: info[*ast.FuncType]("TypeParams"),
|
||||
GenDecl_Doc: info[*ast.GenDecl]("Doc"),
|
||||
GenDecl_Specs: info[*ast.GenDecl]("Specs"),
|
||||
GoStmt_Call: info[*ast.GoStmt]("Call"),
|
||||
IfStmt_Body: info[*ast.IfStmt]("Body"),
|
||||
IfStmt_Cond: info[*ast.IfStmt]("Cond"),
|
||||
IfStmt_Else: info[*ast.IfStmt]("Else"),
|
||||
IfStmt_Init: info[*ast.IfStmt]("Init"),
|
||||
ImportSpec_Comment: info[*ast.ImportSpec]("Comment"),
|
||||
ImportSpec_Doc: info[*ast.ImportSpec]("Doc"),
|
||||
ImportSpec_Name: info[*ast.ImportSpec]("Name"),
|
||||
ImportSpec_Path: info[*ast.ImportSpec]("Path"),
|
||||
IncDecStmt_X: info[*ast.IncDecStmt]("X"),
|
||||
IndexExpr_Index: info[*ast.IndexExpr]("Index"),
|
||||
IndexExpr_X: info[*ast.IndexExpr]("X"),
|
||||
IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"),
|
||||
IndexListExpr_X: info[*ast.IndexListExpr]("X"),
|
||||
InterfaceType_Methods: info[*ast.InterfaceType]("Methods"),
|
||||
KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"),
|
||||
KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"),
|
||||
LabeledStmt_Label: info[*ast.LabeledStmt]("Label"),
|
||||
LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"),
|
||||
MapType_Key: info[*ast.MapType]("Key"),
|
||||
MapType_Value: info[*ast.MapType]("Value"),
|
||||
ParenExpr_X: info[*ast.ParenExpr]("X"),
|
||||
RangeStmt_Body: info[*ast.RangeStmt]("Body"),
|
||||
RangeStmt_Key: info[*ast.RangeStmt]("Key"),
|
||||
RangeStmt_Value: info[*ast.RangeStmt]("Value"),
|
||||
RangeStmt_X: info[*ast.RangeStmt]("X"),
|
||||
ReturnStmt_Results: info[*ast.ReturnStmt]("Results"),
|
||||
SelectStmt_Body: info[*ast.SelectStmt]("Body"),
|
||||
SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"),
|
||||
SelectorExpr_X: info[*ast.SelectorExpr]("X"),
|
||||
SendStmt_Chan: info[*ast.SendStmt]("Chan"),
|
||||
SendStmt_Value: info[*ast.SendStmt]("Value"),
|
||||
SliceExpr_High: info[*ast.SliceExpr]("High"),
|
||||
SliceExpr_Low: info[*ast.SliceExpr]("Low"),
|
||||
SliceExpr_Max: info[*ast.SliceExpr]("Max"),
|
||||
SliceExpr_X: info[*ast.SliceExpr]("X"),
|
||||
StarExpr_X: info[*ast.StarExpr]("X"),
|
||||
StructType_Fields: info[*ast.StructType]("Fields"),
|
||||
SwitchStmt_Body: info[*ast.SwitchStmt]("Body"),
|
||||
SwitchStmt_Init: info[*ast.SwitchStmt]("Init"),
|
||||
SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"),
|
||||
TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"),
|
||||
TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"),
|
||||
TypeSpec_Comment: info[*ast.TypeSpec]("Comment"),
|
||||
TypeSpec_Doc: info[*ast.TypeSpec]("Doc"),
|
||||
TypeSpec_Name: info[*ast.TypeSpec]("Name"),
|
||||
TypeSpec_Type: info[*ast.TypeSpec]("Type"),
|
||||
TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"),
|
||||
TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"),
|
||||
TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"),
|
||||
TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"),
|
||||
UnaryExpr_X: info[*ast.UnaryExpr]("X"),
|
||||
ValueSpec_Comment: info[*ast.ValueSpec]("Comment"),
|
||||
ValueSpec_Doc: info[*ast.ValueSpec]("Doc"),
|
||||
ValueSpec_Names: info[*ast.ValueSpec]("Names"),
|
||||
ValueSpec_Type: info[*ast.ValueSpec]("Type"),
|
||||
ValueSpec_Values: info[*ast.ValueSpec]("Values"),
|
||||
}
|
61
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
61
vendor/golang.org/x/tools/internal/gcimporter/bimport.go
generated
vendored
|
@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir {
|
|||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
var predeclOnce sync.Once
|
||||
var predecl []types.Type // initialized lazily
|
||||
|
||||
func predeclared() []types.Type {
|
||||
predeclOnce.Do(func() {
|
||||
// initialize lazily to be sure that all
|
||||
// elements have been initialized before
|
||||
predecl = []types.Type{ // basic types
|
||||
types.Typ[types.Bool],
|
||||
types.Typ[types.Int],
|
||||
types.Typ[types.Int8],
|
||||
types.Typ[types.Int16],
|
||||
types.Typ[types.Int32],
|
||||
types.Typ[types.Int64],
|
||||
types.Typ[types.Uint],
|
||||
types.Typ[types.Uint8],
|
||||
types.Typ[types.Uint16],
|
||||
types.Typ[types.Uint32],
|
||||
types.Typ[types.Uint64],
|
||||
types.Typ[types.Uintptr],
|
||||
types.Typ[types.Float32],
|
||||
types.Typ[types.Float64],
|
||||
types.Typ[types.Complex64],
|
||||
types.Typ[types.Complex128],
|
||||
types.Typ[types.String],
|
||||
|
||||
// basic type aliases
|
||||
types.Universe.Lookup("byte").Type(),
|
||||
types.Universe.Lookup("rune").Type(),
|
||||
|
||||
// error
|
||||
types.Universe.Lookup("error").Type(),
|
||||
|
||||
// untyped types
|
||||
types.Typ[types.UntypedBool],
|
||||
types.Typ[types.UntypedInt],
|
||||
types.Typ[types.UntypedRune],
|
||||
types.Typ[types.UntypedFloat],
|
||||
types.Typ[types.UntypedComplex],
|
||||
types.Typ[types.UntypedString],
|
||||
types.Typ[types.UntypedNil],
|
||||
|
||||
// package unsafe
|
||||
types.Typ[types.UnsafePointer],
|
||||
|
||||
// invalid type
|
||||
types.Typ[types.Invalid], // only appears in packages with errors
|
||||
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
anyType{},
|
||||
}
|
||||
predecl = append(predecl, additionalPredeclared()...)
|
||||
})
|
||||
return predecl
|
||||
}
|
||||
|
||||
type anyType struct{}
|
||||
|
||||
func (t anyType) Underlying() types.Type { return t }
|
||||
func (t anyType) String() string { return "any" }
|
||||
|
|
448
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
448
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
|
@ -2,49 +2,183 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
|
||||
|
||||
// This file implements FindExportData.
|
||||
// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
|
||||
// This file also additionally implements FindExportData for gcexportdata.NewReader.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"strconv"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
|
||||
// See $GOROOT/include/ar.h.
|
||||
hdr := make([]byte, 16+12+6+6+8+10+2)
|
||||
_, err = io.ReadFull(r, hdr)
|
||||
// FindExportData positions the reader r at the beginning of the
|
||||
// export data section of an underlying cmd/compile created archive
|
||||
// file by reading from it. The reader must be positioned at the
|
||||
// start of the file before calling this function.
|
||||
// This returns the length of the export data in bytes.
|
||||
//
|
||||
// This function is needed by [gcexportdata.Read], which must
|
||||
// accept inputs produced by the last two releases of cmd/compile,
|
||||
// plus tip.
|
||||
func FindExportData(r *bufio.Reader) (size int64, err error) {
|
||||
arsize, err := FindPackageDefinition(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// leave for debugging
|
||||
if false {
|
||||
fmt.Printf("header: %s", hdr)
|
||||
}
|
||||
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
|
||||
length, err := strconv.Atoi(s)
|
||||
size = int64(length)
|
||||
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
|
||||
err = fmt.Errorf("invalid archive header")
|
||||
size = int64(arsize)
|
||||
|
||||
objapi, headers, err := ReadObjectHeaders(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
name = strings.TrimSpace(string(hdr[:16]))
|
||||
size -= int64(len(objapi))
|
||||
for _, h := range headers {
|
||||
size -= int64(len(h))
|
||||
}
|
||||
|
||||
// Check for the binary export data section header "$$B\n".
|
||||
// TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
hdr := string(line)
|
||||
if hdr != "$$B\n" {
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
return
|
||||
}
|
||||
size -= int64(len(hdr))
|
||||
|
||||
// For files with a binary export data header "$$B\n",
|
||||
// these are always terminated by an end-of-section marker "\n$$\n".
|
||||
// So the last bytes must always be this constant.
|
||||
//
|
||||
// The end-of-section marker is not a part of the export data itself.
|
||||
// Do not include these in size.
|
||||
//
|
||||
// It would be nice to have sanity check that the final bytes after
|
||||
// the export data are indeed the end-of-section marker. The split
|
||||
// of gcexportdata.NewReader and gcexportdata.Read make checking this
|
||||
// ugly so gcimporter gives up enforcing this. The compiler and go/types
|
||||
// importer do enforce this, which seems good enough.
|
||||
const endofsection = "\n$$\n"
|
||||
size -= int64(len(endofsection))
|
||||
|
||||
if size < 0 {
|
||||
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// FindExportData positions the reader r at the beginning of the
|
||||
// export data section of an underlying GC-created object/archive
|
||||
// file by reading from it. The reader must be positioned at the
|
||||
// start of the file before calling this function. The hdr result
|
||||
// is the string before the export data, either "$$" or "$$B".
|
||||
// The size result is the length of the export data in bytes, or -1 if not known.
|
||||
func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
||||
// ReadUnified reads the contents of the unified export data from a reader r
|
||||
// that contains the contents of a GC-created archive file.
|
||||
//
|
||||
// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
|
||||
//
|
||||
// Supported GC-created archive files have 4 layers of nesting:
|
||||
// - An archive file containing a package definition file.
|
||||
// - The package definition file contains headers followed by a data section.
|
||||
// Headers are lines (≤ 4kb) that do not start with "$$".
|
||||
// - The data section starts with "$$B\n" followed by export data followed
|
||||
// by an end of section marker "\n$$\n". (The section start "$$\n" is no
|
||||
// longer supported.)
|
||||
// - The export data starts with a format byte ('u') followed by the <data> in
|
||||
// the given format. (See ReadExportDataHeader for older formats.)
|
||||
//
|
||||
// Putting this together, the bytes in a GC-created archive files are expected
|
||||
// to look like the following.
|
||||
// See cmd/internal/archive for more details on ar file headers.
|
||||
//
|
||||
// | <!arch>\n | ar file signature
|
||||
// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
|
||||
// | go object <...>\n | objabi header
|
||||
// | <optional headers>\n | other headers such as build id
|
||||
// | $$B\n | binary format marker
|
||||
// | u<data>\n | unified export <data>
|
||||
// | $$\n | end-of-section marker
|
||||
// | [optional padding] | padding byte (0x0A) if size is odd
|
||||
// | [ar file header] | other ar files
|
||||
// | [ar file data] |
|
||||
func ReadUnified(r *bufio.Reader) (data []byte, err error) {
|
||||
// We historically guaranteed headers at the default buffer size (4096) work.
|
||||
// This ensures we can use ReadSlice throughout.
|
||||
const minBufferSize = 4096
|
||||
r = bufio.NewReaderSize(r, minBufferSize)
|
||||
|
||||
size, err := FindPackageDefinition(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n := size
|
||||
|
||||
objapi, headers, err := ReadObjectHeaders(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n -= len(objapi)
|
||||
for _, h := range headers {
|
||||
n -= len(h)
|
||||
}
|
||||
|
||||
hdrlen, err := ReadExportDataHeader(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n -= hdrlen
|
||||
|
||||
// size also includes the end of section marker. Remove that many bytes from the end.
|
||||
const marker = "\n$$\n"
|
||||
n -= len(marker)
|
||||
|
||||
if n < 0 {
|
||||
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
|
||||
return
|
||||
}
|
||||
|
||||
// Read n bytes from buf.
|
||||
data = make([]byte, n)
|
||||
_, err = io.ReadFull(r, data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for marker at the end.
|
||||
var suffix [len(marker)]byte
|
||||
_, err = io.ReadFull(r, suffix[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s := string(suffix[:]); s != marker {
|
||||
err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// FindPackageDefinition positions the reader r at the beginning of a package
|
||||
// definition file ("__.PKGDEF") within a GC-created archive by reading
|
||||
// from it, and returns the size of the package definition file in the archive.
|
||||
//
|
||||
// The reader must be positioned at the start of the archive file before calling
|
||||
// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
|
||||
//
|
||||
// See cmd/internal/archive for details on the archive format.
|
||||
func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
|
||||
// Uses ReadSlice to limit risk of malformed inputs.
|
||||
|
||||
// Read first line to make sure this is an object file.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
|
@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if string(line) == "!<arch>\n" {
|
||||
// Archive file. Scan to __.PKGDEF.
|
||||
var name string
|
||||
if name, size, err = readGopackHeader(r); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// First entry should be __.PKGDEF.
|
||||
if name != "__.PKGDEF" {
|
||||
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first line of __.PKGDEF data, so that line
|
||||
// is once again the first line of the input.
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
}
|
||||
|
||||
// Now at __.PKGDEF in archive or still at beginning of file.
|
||||
// Either way, line should begin with "go object ".
|
||||
if !strings.HasPrefix(string(line), "go object ") {
|
||||
err = fmt.Errorf("not a Go object file")
|
||||
// Is the first line an archive file signature?
|
||||
if string(line) != "!<arch>\n" {
|
||||
err = fmt.Errorf("not the start of an archive file (%q)", line)
|
||||
return
|
||||
}
|
||||
|
||||
// Skip over object header to export data.
|
||||
// Begins after first line starting with $$.
|
||||
for line[0] != '$' {
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
}
|
||||
hdr = string(line)
|
||||
if size < 0 {
|
||||
size = -1
|
||||
// package export block should be first
|
||||
size = readArchiveHeader(r, "__.PKGDEF")
|
||||
if size <= 0 {
|
||||
err = fmt.Errorf("not a package file")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadObjectHeaders reads object headers from the reader. Object headers are
|
||||
// lines that do not start with an end-of-section marker "$$". The first header
|
||||
// is the objabi header. On success, the reader will be positioned at the beginning
|
||||
// of the end-of-section marker.
|
||||
//
|
||||
// It returns an error if any header does not fit in r.Size() bytes.
|
||||
func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
|
||||
// line is a temporary buffer for headers.
|
||||
// Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
|
||||
var line []byte
|
||||
|
||||
// objapi header should be the first line
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
objapi = string(line)
|
||||
|
||||
// objapi header begins with "go object ".
|
||||
if !strings.HasPrefix(objapi, "go object ") {
|
||||
err = fmt.Errorf("not a go object file: %s", objapi)
|
||||
return
|
||||
}
|
||||
|
||||
// process remaining object header lines
|
||||
for {
|
||||
// check for an end of section marker "$$"
|
||||
line, err = r.Peek(2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if string(line) == "$$" {
|
||||
return // stop
|
||||
}
|
||||
|
||||
// read next header
|
||||
line, err = r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
headers = append(headers, string(line))
|
||||
}
|
||||
}
|
||||
|
||||
// ReadExportDataHeader reads the export data header and format from r.
|
||||
// It returns the number of bytes read, or an error if the format is no longer
|
||||
// supported or it failed to read.
|
||||
//
|
||||
// The only currently supported format is binary export data in the
|
||||
// unified export format.
|
||||
func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
|
||||
// Read export data header.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hdr := string(line)
|
||||
switch hdr {
|
||||
case "$$\n":
|
||||
err = fmt.Errorf("old textual export format no longer supported (recompile package)")
|
||||
return
|
||||
|
||||
case "$$B\n":
|
||||
var format byte
|
||||
format, err = r.ReadByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// The unified export format starts with a 'u'.
|
||||
switch format {
|
||||
case 'u':
|
||||
default:
|
||||
// Older no longer supported export formats include:
|
||||
// indexed export format which started with an 'i'; and
|
||||
// the older binary export format which started with a 'c',
|
||||
// 'd', or 'v' (from "version").
|
||||
err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
return
|
||||
}
|
||||
|
||||
n = len(hdr) + 1 // + 1 is for 'u'
|
||||
return
|
||||
}
|
||||
|
||||
// FindPkg returns the filename and unique package id for an import
|
||||
// path based on package information provided by build.Import (using
|
||||
// the build.Default build.Context). A relative srcDir is interpreted
|
||||
// relative to the current working directory.
|
||||
//
|
||||
// FindPkg is only used in tests within x/tools.
|
||||
func FindPkg(path, srcDir string) (filename, id string, err error) {
|
||||
// TODO(taking): Move internal/exportdata.FindPkg into its own file,
|
||||
// and then this copy into a _test package.
|
||||
if path == "" {
|
||||
return "", "", errors.New("path is empty")
|
||||
}
|
||||
|
||||
var noext string
|
||||
switch {
|
||||
default:
|
||||
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
|
||||
// Don't require the source files to be present.
|
||||
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
|
||||
srcDir = abs
|
||||
}
|
||||
var bp *build.Package
|
||||
bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
||||
if bp.PkgObj == "" {
|
||||
if bp.Goroot && bp.Dir != "" {
|
||||
filename, err = lookupGorootExport(bp.Dir)
|
||||
if err == nil {
|
||||
_, err = os.Stat(filename)
|
||||
}
|
||||
if err == nil {
|
||||
return filename, bp.ImportPath, nil
|
||||
}
|
||||
}
|
||||
goto notfound
|
||||
} else {
|
||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||
}
|
||||
id = bp.ImportPath
|
||||
|
||||
case build.IsLocalImport(path):
|
||||
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
||||
noext = filepath.Join(srcDir, path)
|
||||
id = noext
|
||||
|
||||
case filepath.IsAbs(path):
|
||||
// for completeness only - go/build.Import
|
||||
// does not support absolute imports
|
||||
// "/x" -> "/x.ext", "/x"
|
||||
noext = path
|
||||
id = path
|
||||
}
|
||||
|
||||
if false { // for debugging
|
||||
if path != id {
|
||||
fmt.Printf("%s -> %s\n", path, id)
|
||||
}
|
||||
}
|
||||
|
||||
// try extensions
|
||||
for _, ext := range pkgExts {
|
||||
filename = noext + ext
|
||||
f, statErr := os.Stat(filename)
|
||||
if statErr == nil && !f.IsDir() {
|
||||
return filename, id, nil
|
||||
}
|
||||
if err == nil {
|
||||
err = statErr
|
||||
}
|
||||
}
|
||||
|
||||
notfound:
|
||||
if err == nil {
|
||||
return "", path, fmt.Errorf("can't find import: %q", path)
|
||||
}
|
||||
return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
|
||||
}
|
||||
|
||||
var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
|
||||
|
||||
var exportMap sync.Map // package dir → func() (string, error)
|
||||
|
||||
// lookupGorootExport returns the location of the export data
|
||||
// (normally found in the build cache, but located in GOROOT/pkg
|
||||
// in prior Go releases) for the package located in pkgDir.
|
||||
//
|
||||
// (We use the package's directory instead of its import path
|
||||
// mainly to simplify handling of the packages in src/vendor
|
||||
// and cmd/vendor.)
|
||||
//
|
||||
// lookupGorootExport is only used in tests within x/tools.
|
||||
func lookupGorootExport(pkgDir string) (string, error) {
|
||||
f, ok := exportMap.Load(pkgDir)
|
||||
if !ok {
|
||||
var (
|
||||
listOnce sync.Once
|
||||
exportPath string
|
||||
err error
|
||||
)
|
||||
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
|
||||
listOnce.Do(func() {
|
||||
cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
|
||||
cmd.Dir = build.Default.GOROOT
|
||||
cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
|
||||
var output []byte
|
||||
output, err = cmd.Output()
|
||||
if err != nil {
|
||||
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
|
||||
err = errors.New(string(ee.Stderr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
|
||||
if len(exports) != 1 {
|
||||
err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
|
||||
return
|
||||
}
|
||||
|
||||
exportPath = exports[0]
|
||||
})
|
||||
|
||||
return exportPath, err
|
||||
})
|
||||
}
|
||||
|
||||
return f.(func() (string, error))()
|
||||
}
|
||||
|
|
182
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
182
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
|
@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -45,125 +39,14 @@ const (
|
|||
trace = false
|
||||
)
|
||||
|
||||
var exportMap sync.Map // package dir → func() (string, bool)
|
||||
|
||||
// lookupGorootExport returns the location of the export data
|
||||
// (normally found in the build cache, but located in GOROOT/pkg
|
||||
// in prior Go releases) for the package located in pkgDir.
|
||||
//
|
||||
// (We use the package's directory instead of its import path
|
||||
// mainly to simplify handling of the packages in src/vendor
|
||||
// and cmd/vendor.)
|
||||
func lookupGorootExport(pkgDir string) (string, bool) {
|
||||
f, ok := exportMap.Load(pkgDir)
|
||||
if !ok {
|
||||
var (
|
||||
listOnce sync.Once
|
||||
exportPath string
|
||||
)
|
||||
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
|
||||
listOnce.Do(func() {
|
||||
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
|
||||
cmd.Dir = build.Default.GOROOT
|
||||
var output []byte
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
|
||||
if len(exports) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
exportPath = exports[0]
|
||||
})
|
||||
|
||||
return exportPath, exportPath != ""
|
||||
})
|
||||
}
|
||||
|
||||
return f.(func() (string, bool))()
|
||||
}
|
||||
|
||||
var pkgExts = [...]string{".a", ".o"}
|
||||
|
||||
// FindPkg returns the filename and unique package id for an import
|
||||
// path based on package information provided by build.Import (using
|
||||
// the build.Default build.Context). A relative srcDir is interpreted
|
||||
// relative to the current working directory.
|
||||
// If no file was found, an empty filename is returned.
|
||||
func FindPkg(path, srcDir string) (filename, id string) {
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
var noext string
|
||||
switch {
|
||||
default:
|
||||
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
|
||||
// Don't require the source files to be present.
|
||||
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
|
||||
srcDir = abs
|
||||
}
|
||||
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
||||
if bp.PkgObj == "" {
|
||||
var ok bool
|
||||
if bp.Goroot && bp.Dir != "" {
|
||||
filename, ok = lookupGorootExport(bp.Dir)
|
||||
}
|
||||
if !ok {
|
||||
id = path // make sure we have an id to print in error message
|
||||
return
|
||||
}
|
||||
} else {
|
||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||
id = bp.ImportPath
|
||||
}
|
||||
|
||||
case build.IsLocalImport(path):
|
||||
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
||||
noext = filepath.Join(srcDir, path)
|
||||
id = noext
|
||||
|
||||
case filepath.IsAbs(path):
|
||||
// for completeness only - go/build.Import
|
||||
// does not support absolute imports
|
||||
// "/x" -> "/x.ext", "/x"
|
||||
noext = path
|
||||
id = path
|
||||
}
|
||||
|
||||
if false { // for debugging
|
||||
if path != id {
|
||||
fmt.Printf("%s -> %s\n", path, id)
|
||||
}
|
||||
}
|
||||
|
||||
if filename != "" {
|
||||
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// try extensions
|
||||
for _, ext := range pkgExts {
|
||||
filename = noext + ext
|
||||
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
filename = "" // not found
|
||||
return
|
||||
}
|
||||
|
||||
// Import imports a gc-generated package given its import path and srcDir, adds
|
||||
// the corresponding package object to the packages map, and returns the object.
|
||||
// The packages map must contain all packages already imported.
|
||||
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
|
||||
//
|
||||
// Import is only used in tests.
|
||||
func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
|
||||
var rc io.ReadCloser
|
||||
var filename, id string
|
||||
var id string
|
||||
if lookup != nil {
|
||||
// With custom lookup specified, assume that caller has
|
||||
// converted path to a canonical import path for use in the map.
|
||||
|
@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
|||
}
|
||||
rc = f
|
||||
} else {
|
||||
filename, id = FindPkg(path, srcDir)
|
||||
var filename string
|
||||
filename, id, err = FindPkg(path, srcDir)
|
||||
if filename == "" {
|
||||
if path == "unsafe" {
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find import: %q", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// no need to re-import if the package was imported completely before
|
||||
|
@ -210,57 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
|||
}
|
||||
defer rc.Close()
|
||||
|
||||
var hdr string
|
||||
var size int64
|
||||
buf := bufio.NewReader(rc)
|
||||
if hdr, size, err = FindExportData(buf); err != nil {
|
||||
data, err := ReadUnified(buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("import %q: %v", path, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch hdr {
|
||||
case "$$B\n":
|
||||
var data []byte
|
||||
data, err = io.ReadAll(buf)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// TODO(gri): allow clients of go/importer to provide a FileSet.
|
||||
// Or, define a new standard go/types/gcexportdata package.
|
||||
fset := token.NewFileSet()
|
||||
|
||||
// Select appropriate importer.
|
||||
if len(data) > 0 {
|
||||
switch data[0] {
|
||||
case 'v', 'c', 'd': // binary, till go1.10
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i': // indexed, till go1.19
|
||||
_, pkg, err := IImportData(fset, packages, data[1:], id)
|
||||
return pkg, err
|
||||
|
||||
case 'u': // unified, from go1.20
|
||||
_, pkg, err := UImportData(fset, packages, data[1:size], id)
|
||||
return pkg, err
|
||||
|
||||
default:
|
||||
l := len(data)
|
||||
if l > 10 {
|
||||
l = 10
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
}
|
||||
// unified: emitted by cmd/compile since go1.20.
|
||||
_, pkg, err = UImportData(fset, packages, data, id)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
|
||||
|
|
284
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
284
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
|
@ -2,9 +2,227 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Indexed binary package export.
|
||||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
|
||||
// see that file for specification of the format.
|
||||
// Indexed package export.
|
||||
//
|
||||
// The indexed export data format is an evolution of the previous
|
||||
// binary export data format. Its chief contribution is introducing an
|
||||
// index table, which allows efficient random access of individual
|
||||
// declarations and inline function bodies. In turn, this allows
|
||||
// avoiding unnecessary work for compilation units that import large
|
||||
// packages.
|
||||
//
|
||||
//
|
||||
// The top-level data format is structured as:
|
||||
//
|
||||
// Header struct {
|
||||
// Tag byte // 'i'
|
||||
// Version uvarint
|
||||
// StringSize uvarint
|
||||
// DataSize uvarint
|
||||
// }
|
||||
//
|
||||
// Strings [StringSize]byte
|
||||
// Data [DataSize]byte
|
||||
//
|
||||
// MainIndex []struct{
|
||||
// PkgPath stringOff
|
||||
// PkgName stringOff
|
||||
// PkgHeight uvarint
|
||||
//
|
||||
// Decls []struct{
|
||||
// Name stringOff
|
||||
// Offset declOff
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Fingerprint [8]byte
|
||||
//
|
||||
// uvarint means a uint64 written out using uvarint encoding.
|
||||
//
|
||||
// []T means a uvarint followed by that many T objects. In other
|
||||
// words:
|
||||
//
|
||||
// Len uvarint
|
||||
// Elems [Len]T
|
||||
//
|
||||
// stringOff means a uvarint that indicates an offset within the
|
||||
// Strings section. At that offset is another uvarint, followed by
|
||||
// that many bytes, which form the string value.
|
||||
//
|
||||
// declOff means a uvarint that indicates an offset within the Data
|
||||
// section where the associated declaration can be found.
|
||||
//
|
||||
//
|
||||
// There are five kinds of declarations, distinguished by their first
|
||||
// byte:
|
||||
//
|
||||
// type Var struct {
|
||||
// Tag byte // 'V'
|
||||
// Pos Pos
|
||||
// Type typeOff
|
||||
// }
|
||||
//
|
||||
// type Func struct {
|
||||
// Tag byte // 'F' or 'G'
|
||||
// Pos Pos
|
||||
// TypeParams []typeOff // only present if Tag == 'G'
|
||||
// Signature Signature
|
||||
// }
|
||||
//
|
||||
// type Const struct {
|
||||
// Tag byte // 'C'
|
||||
// Pos Pos
|
||||
// Value Value
|
||||
// }
|
||||
//
|
||||
// type Type struct {
|
||||
// Tag byte // 'T' or 'U'
|
||||
// Pos Pos
|
||||
// TypeParams []typeOff // only present if Tag == 'U'
|
||||
// Underlying typeOff
|
||||
//
|
||||
// Methods []struct{ // omitted if Underlying is an interface type
|
||||
// Pos Pos
|
||||
// Name stringOff
|
||||
// Recv Param
|
||||
// Signature Signature
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// type Alias struct {
|
||||
// Tag byte // 'A' or 'B'
|
||||
// Pos Pos
|
||||
// TypeParams []typeOff // only present if Tag == 'B'
|
||||
// Type typeOff
|
||||
// }
|
||||
//
|
||||
// // "Automatic" declaration of each typeparam
|
||||
// type TypeParam struct {
|
||||
// Tag byte // 'P'
|
||||
// Pos Pos
|
||||
// Implicit bool
|
||||
// Constraint typeOff
|
||||
// }
|
||||
//
|
||||
// typeOff means a uvarint that either indicates a predeclared type,
|
||||
// or an offset into the Data section. If the uvarint is less than
|
||||
// predeclReserved, then it indicates the index into the predeclared
|
||||
// types list (see predeclared in bexport.go for order). Otherwise,
|
||||
// subtracting predeclReserved yields the offset of a type descriptor.
|
||||
//
|
||||
// Value means a type, kind, and type-specific value. See
|
||||
// (*exportWriter).value for details.
|
||||
//
|
||||
//
|
||||
// There are twelve kinds of type descriptors, distinguished by an itag:
|
||||
//
|
||||
// type DefinedType struct {
|
||||
// Tag itag // definedType
|
||||
// Name stringOff
|
||||
// PkgPath stringOff
|
||||
// }
|
||||
//
|
||||
// type PointerType struct {
|
||||
// Tag itag // pointerType
|
||||
// Elem typeOff
|
||||
// }
|
||||
//
|
||||
// type SliceType struct {
|
||||
// Tag itag // sliceType
|
||||
// Elem typeOff
|
||||
// }
|
||||
//
|
||||
// type ArrayType struct {
|
||||
// Tag itag // arrayType
|
||||
// Len uint64
|
||||
// Elem typeOff
|
||||
// }
|
||||
//
|
||||
// type ChanType struct {
|
||||
// Tag itag // chanType
|
||||
// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
|
||||
// Elem typeOff
|
||||
// }
|
||||
//
|
||||
// type MapType struct {
|
||||
// Tag itag // mapType
|
||||
// Key typeOff
|
||||
// Elem typeOff
|
||||
// }
|
||||
//
|
||||
// type FuncType struct {
|
||||
// Tag itag // signatureType
|
||||
// PkgPath stringOff
|
||||
// Signature Signature
|
||||
// }
|
||||
//
|
||||
// type StructType struct {
|
||||
// Tag itag // structType
|
||||
// PkgPath stringOff
|
||||
// Fields []struct {
|
||||
// Pos Pos
|
||||
// Name stringOff
|
||||
// Type typeOff
|
||||
// Embedded bool
|
||||
// Note stringOff
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// type InterfaceType struct {
|
||||
// Tag itag // interfaceType
|
||||
// PkgPath stringOff
|
||||
// Embeddeds []struct {
|
||||
// Pos Pos
|
||||
// Type typeOff
|
||||
// }
|
||||
// Methods []struct {
|
||||
// Pos Pos
|
||||
// Name stringOff
|
||||
// Signature Signature
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Reference to a type param declaration
|
||||
// type TypeParamType struct {
|
||||
// Tag itag // typeParamType
|
||||
// Name stringOff
|
||||
// PkgPath stringOff
|
||||
// }
|
||||
//
|
||||
// // Instantiation of a generic type (like List[T2] or List[int])
|
||||
// type InstanceType struct {
|
||||
// Tag itag // instanceType
|
||||
// Pos pos
|
||||
// TypeArgs []typeOff
|
||||
// BaseType typeOff
|
||||
// }
|
||||
//
|
||||
// type UnionType struct {
|
||||
// Tag itag // interfaceType
|
||||
// Terms []struct {
|
||||
// tilde bool
|
||||
// Type typeOff
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//
|
||||
//
|
||||
// type Signature struct {
|
||||
// Params []Param
|
||||
// Results []Param
|
||||
// Variadic bool // omitted if Results is empty
|
||||
// }
|
||||
//
|
||||
// type Param struct {
|
||||
// Pos Pos
|
||||
// Name stringOff
|
||||
// Type typOff
|
||||
// }
|
||||
//
|
||||
//
|
||||
// Pos encodes a file:line:column triple, incorporating a simple delta
|
||||
// encoding scheme within a data object. See exportWriter.pos for
|
||||
// details.
|
||||
|
||||
package gcimporter
|
||||
|
||||
|
@ -24,11 +242,30 @@ import (
|
|||
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
"golang.org/x/tools/internal/aliases"
|
||||
"golang.org/x/tools/internal/tokeninternal"
|
||||
)
|
||||
|
||||
// IExportShallow encodes "shallow" export data for the specified package.
|
||||
//
|
||||
// For types, we use "shallow" export data. Historically, the Go
|
||||
// compiler always produced a summary of the types for a given package
|
||||
// that included types from other packages that it indirectly
|
||||
// referenced: "deep" export data. This had the advantage that the
|
||||
// compiler (and analogous tools such as gopls) need only load one
|
||||
// file per direct import. However, it meant that the files tended to
|
||||
// get larger based on the level of the package in the import
|
||||
// graph. For example, higher-level packages in the kubernetes module
|
||||
// have over 1MB of "deep" export data, even when they have almost no
|
||||
// content of their own, merely because they mention a major type that
|
||||
// references many others. In pathological cases the export data was
|
||||
// 300x larger than the source for a package due to this quadratic
|
||||
// growth.
|
||||
//
|
||||
// "Shallow" export data means that the serialized types describe only
|
||||
// a single package. If those types mention types from other packages,
|
||||
// the type checker may need to request additional packages beyond
|
||||
// just the direct imports. Type information for the entire transitive
|
||||
// closure of imports is provided (lazily) by the DAG.
|
||||
//
|
||||
// No promises are made about the encoding other than that it can be decoded by
|
||||
// the same version of IIExportShallow. If you plan to save export data in the
|
||||
// file system, be sure to include a cryptographic digest of the executable in
|
||||
|
@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
|
|||
}
|
||||
|
||||
// IImportShallow decodes "shallow" types.Package data encoded by
|
||||
// IExportShallow in the same executable. This function cannot import data from
|
||||
// cmd/compile or gcexportdata.Write.
|
||||
// [IExportShallow] in the same executable. This function cannot import data
|
||||
// from cmd/compile or gcexportdata.Write.
|
||||
//
|
||||
// The importer calls getPackages to obtain package symbols for all
|
||||
// packages mentioned in the export data, including the one being
|
||||
|
@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64)
|
|||
// Sort the set of needed offsets. Duplicates are harmless.
|
||||
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
|
||||
|
||||
lines := tokeninternal.GetLines(file) // byte offset of each line start
|
||||
lines := file.Lines() // byte offset of each line start
|
||||
w.uint64(uint64(len(lines)))
|
||||
|
||||
// Rather than record the entire array of line start offsets,
|
||||
|
@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) {
|
|||
case *types.TypeName:
|
||||
t := obj.Type()
|
||||
|
||||
if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
|
||||
if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
|
||||
w.tag(typeParamTag)
|
||||
w.pos(obj.Pos())
|
||||
constraint := tparam.Constraint()
|
||||
if p.version >= iexportVersionGo1_18 {
|
||||
implicit := false
|
||||
if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
|
||||
if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
|
||||
implicit = iface.IsImplicit()
|
||||
}
|
||||
w.bool(implicit)
|
||||
|
@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) {
|
|||
}
|
||||
|
||||
if obj.IsAlias() {
|
||||
w.tag(aliasTag)
|
||||
alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
|
||||
|
||||
var tparams *types.TypeParamList
|
||||
if materialized {
|
||||
tparams = aliases.TypeParams(alias)
|
||||
}
|
||||
if tparams.Len() == 0 {
|
||||
w.tag(aliasTag)
|
||||
} else {
|
||||
w.tag(genericAliasTag)
|
||||
}
|
||||
w.pos(obj.Pos())
|
||||
if alias, ok := t.(*aliases.Alias); ok {
|
||||
if tparams.Len() > 0 {
|
||||
w.tparamList(obj.Name(), tparams, obj.Pkg())
|
||||
}
|
||||
if materialized {
|
||||
// Preserve materialized aliases,
|
||||
// even of non-exported types.
|
||||
t = aliases.Rhs(alias)
|
||||
|
@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
|||
}()
|
||||
}
|
||||
switch t := t.(type) {
|
||||
case *aliases.Alias:
|
||||
// TODO(adonovan): support parameterized aliases, following *types.Named.
|
||||
case *types.Alias:
|
||||
if targs := aliases.TypeArgs(t); targs.Len() > 0 {
|
||||
w.startType(instanceType)
|
||||
w.pos(t.Obj().Pos())
|
||||
w.typeList(targs, pkg)
|
||||
w.typ(aliases.Origin(t), pkg)
|
||||
return
|
||||
}
|
||||
w.startType(aliasType)
|
||||
w.qualifiedType(t.Obj())
|
||||
|
||||
|
@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
|||
for i := 0; i < n; i++ {
|
||||
ft := t.EmbeddedType(i)
|
||||
tPkg := pkg
|
||||
if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
|
||||
if named, _ := types.Unalias(ft).(*types.Named); named != nil {
|
||||
w.pos(named.Obj().Pos())
|
||||
} else {
|
||||
w.pos(token.NoPos)
|
||||
|
|
55
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
55
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
|
@ -3,9 +3,7 @@
|
|||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Indexed package import.
|
||||
// See cmd/compile/internal/gc/iexport.go for the export data format.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
|
||||
// See iexport.go for the export data format.
|
||||
|
||||
package gcimporter
|
||||
|
||||
|
@ -53,6 +51,7 @@ const (
|
|||
iexportVersionPosCol = 1
|
||||
iexportVersionGo1_18 = 2
|
||||
iexportVersionGenerics = 2
|
||||
iexportVersion = iexportVersionGenerics
|
||||
|
||||
iexportVersionCurrent = 2
|
||||
)
|
||||
|
@ -540,7 +539,7 @@ func canReuse(def *types.Named, rhs types.Type) bool {
|
|||
if def == nil {
|
||||
return true
|
||||
}
|
||||
iface, _ := aliases.Unalias(rhs).(*types.Interface)
|
||||
iface, _ := types.Unalias(rhs).(*types.Interface)
|
||||
if iface == nil {
|
||||
return true
|
||||
}
|
||||
|
@ -557,19 +556,28 @@ type importReader struct {
|
|||
prevColumn int64
|
||||
}
|
||||
|
||||
// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
|
||||
//
|
||||
// If TypeNames are not marked black (in the sense of go/types cycle
|
||||
// detection), they may be mutated when dot-imported. Fix this by punching a
|
||||
// hole through the type, when compiling with Go 1.23. (The bug has been fixed
|
||||
// for 1.24, but the fix was not worth back-porting).
|
||||
var markBlack = func(name *types.TypeName) {}
|
||||
|
||||
func (r *importReader) obj(name string) {
|
||||
tag := r.byte()
|
||||
pos := r.pos()
|
||||
|
||||
switch tag {
|
||||
case aliasTag:
|
||||
case aliasTag, genericAliasTag:
|
||||
var tparams []*types.TypeParam
|
||||
if tag == genericAliasTag {
|
||||
tparams = r.tparamList()
|
||||
}
|
||||
typ := r.typ()
|
||||
// TODO(adonovan): support generic aliases:
|
||||
// if tag == genericAliasTag {
|
||||
// tparams := r.tparamList()
|
||||
// alias.SetTypeParams(tparams)
|
||||
// }
|
||||
r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
|
||||
obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
|
||||
markBlack(obj) // workaround for golang/go#69912
|
||||
r.declare(obj)
|
||||
|
||||
case constTag:
|
||||
typ, val := r.value()
|
||||
|
@ -589,6 +597,9 @@ func (r *importReader) obj(name string) {
|
|||
// declaration before recursing.
|
||||
obj := types.NewTypeName(pos, r.currPkg, name, nil)
|
||||
named := types.NewNamed(obj, nil, nil)
|
||||
|
||||
markBlack(obj) // workaround for golang/go#69912
|
||||
|
||||
// Declare obj before calling r.tparamList, so the new type name is recognized
|
||||
// if used in the constraint of one of its own typeparams (see #48280).
|
||||
r.declare(obj)
|
||||
|
@ -615,7 +626,7 @@ func (r *importReader) obj(name string) {
|
|||
if targs.Len() > 0 {
|
||||
rparams = make([]*types.TypeParam, targs.Len())
|
||||
for i := range rparams {
|
||||
rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
|
||||
rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
|
||||
}
|
||||
}
|
||||
msig := r.signature(recv, rparams, nil)
|
||||
|
@ -645,7 +656,7 @@ func (r *importReader) obj(name string) {
|
|||
}
|
||||
constraint := r.typ()
|
||||
if implicit {
|
||||
iface, _ := aliases.Unalias(constraint).(*types.Interface)
|
||||
iface, _ := types.Unalias(constraint).(*types.Interface)
|
||||
if iface == nil {
|
||||
errorf("non-interface constraint marked implicit")
|
||||
}
|
||||
|
@ -660,7 +671,9 @@ func (r *importReader) obj(name string) {
|
|||
case varTag:
|
||||
typ := r.typ()
|
||||
|
||||
r.declare(types.NewVar(pos, r.currPkg, name, typ))
|
||||
v := types.NewVar(pos, r.currPkg, name, typ)
|
||||
typesinternal.SetVarKind(v, typesinternal.PackageVar)
|
||||
r.declare(v)
|
||||
|
||||
default:
|
||||
errorf("unexpected tag: %v", tag)
|
||||
|
@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type {
|
|||
}
|
||||
|
||||
func isInterface(t types.Type) bool {
|
||||
_, ok := aliases.Unalias(t).(*types.Interface)
|
||||
_, ok := types.Unalias(t).(*types.Interface)
|
||||
return ok
|
||||
}
|
||||
|
||||
|
@ -862,7 +875,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
|
|||
func (r *importReader) doType(base *types.Named) (res types.Type) {
|
||||
k := r.kind()
|
||||
if debug {
|
||||
r.p.trace("importing type %d (base: %s)", k, base)
|
||||
r.p.trace("importing type %d (base: %v)", k, base)
|
||||
r.p.indent++
|
||||
defer func() {
|
||||
r.p.indent--
|
||||
|
@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {
|
|||
methods[i] = method
|
||||
}
|
||||
|
||||
typ := newInterface(methods, embeddeds)
|
||||
typ := types.NewInterfaceType(methods, embeddeds)
|
||||
r.p.interfaceList = append(r.p.interfaceList, typ)
|
||||
return typ
|
||||
|
||||
|
@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam {
|
|||
for i := range xs {
|
||||
// Note: the standard library importer is tolerant of nil types here,
|
||||
// though would panic in SetTypeParams.
|
||||
xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
|
||||
xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
|
||||
}
|
||||
return xs
|
||||
}
|
||||
|
@ -1098,3 +1111,9 @@ func (r *importReader) byte() byte {
|
|||
}
|
||||
return x
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
|
||||
|
|
53
vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
generated
vendored
Normal file
53
vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.22 && !go1.24
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"go/types"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// TODO(rfindley): delete this workaround once go1.24 is assured.
|
||||
|
||||
func init() {
|
||||
// Update markBlack so that it correctly sets the color
|
||||
// of imported TypeNames.
|
||||
//
|
||||
// See the doc comment for markBlack for details.
|
||||
|
||||
type color uint32
|
||||
const (
|
||||
white color = iota
|
||||
black
|
||||
grey
|
||||
)
|
||||
type object struct {
|
||||
_ *types.Scope
|
||||
_ token.Pos
|
||||
_ *types.Package
|
||||
_ string
|
||||
_ types.Type
|
||||
_ uint32
|
||||
color_ color
|
||||
_ token.Pos
|
||||
}
|
||||
type typeName struct {
|
||||
object
|
||||
}
|
||||
|
||||
// If the size of types.TypeName changes, this will fail to compile.
|
||||
const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
|
||||
var _ [-delta * delta]int
|
||||
|
||||
markBlack = func(obj *types.TypeName) {
|
||||
type uP = unsafe.Pointer
|
||||
var ptr *typeName
|
||||
*(*uP)(uP(&ptr)) = uP(obj)
|
||||
ptr.color_ = black
|
||||
}
|
||||
}
|
22
vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
generated
vendored
22
vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.11
|
||||
// +build !go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||
named := make([]*types.Named, len(embeddeds))
|
||||
for i, e := range embeddeds {
|
||||
var ok bool
|
||||
named[i], ok = e.(*types.Named)
|
||||
if !ok {
|
||||
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
|
||||
}
|
||||
}
|
||||
return types.NewInterface(methods, named)
|
||||
}
|
14
vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
generated
vendored
14
vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.11
|
||||
// +build go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||
return types.NewInterfaceType(methods, embeddeds)
|
||||
}
|
91
vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
generated
vendored
Normal file
91
vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// predecl is a cache for the predeclared types in types.Universe.
|
||||
//
|
||||
// Cache a distinct result based on the runtime value of any.
|
||||
// The pointer value of the any type varies based on GODEBUG settings.
|
||||
var predeclMu sync.Mutex
|
||||
var predecl map[types.Type][]types.Type
|
||||
|
||||
func predeclared() []types.Type {
|
||||
anyt := types.Universe.Lookup("any").Type()
|
||||
|
||||
predeclMu.Lock()
|
||||
defer predeclMu.Unlock()
|
||||
|
||||
if pre, ok := predecl[anyt]; ok {
|
||||
return pre
|
||||
}
|
||||
|
||||
if predecl == nil {
|
||||
predecl = make(map[types.Type][]types.Type)
|
||||
}
|
||||
|
||||
decls := []types.Type{ // basic types
|
||||
types.Typ[types.Bool],
|
||||
types.Typ[types.Int],
|
||||
types.Typ[types.Int8],
|
||||
types.Typ[types.Int16],
|
||||
types.Typ[types.Int32],
|
||||
types.Typ[types.Int64],
|
||||
types.Typ[types.Uint],
|
||||
types.Typ[types.Uint8],
|
||||
types.Typ[types.Uint16],
|
||||
types.Typ[types.Uint32],
|
||||
types.Typ[types.Uint64],
|
||||
types.Typ[types.Uintptr],
|
||||
types.Typ[types.Float32],
|
||||
types.Typ[types.Float64],
|
||||
types.Typ[types.Complex64],
|
||||
types.Typ[types.Complex128],
|
||||
types.Typ[types.String],
|
||||
|
||||
// basic type aliases
|
||||
types.Universe.Lookup("byte").Type(),
|
||||
types.Universe.Lookup("rune").Type(),
|
||||
|
||||
// error
|
||||
types.Universe.Lookup("error").Type(),
|
||||
|
||||
// untyped types
|
||||
types.Typ[types.UntypedBool],
|
||||
types.Typ[types.UntypedInt],
|
||||
types.Typ[types.UntypedRune],
|
||||
types.Typ[types.UntypedFloat],
|
||||
types.Typ[types.UntypedComplex],
|
||||
types.Typ[types.UntypedString],
|
||||
types.Typ[types.UntypedNil],
|
||||
|
||||
// package unsafe
|
||||
types.Typ[types.UnsafePointer],
|
||||
|
||||
// invalid type
|
||||
types.Typ[types.Invalid], // only appears in packages with errors
|
||||
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
anyType{},
|
||||
|
||||
// comparable
|
||||
types.Universe.Lookup("comparable").Type(),
|
||||
|
||||
// any
|
||||
anyt,
|
||||
}
|
||||
|
||||
predecl[anyt] = decls
|
||||
return decls
|
||||
}
|
||||
|
||||
type anyType struct{}
|
||||
|
||||
func (t anyType) Underlying() types.Type { return t }
|
||||
func (t anyType) String() string { return "any" }
|
30
vendor/golang.org/x/tools/internal/gcimporter/support.go
generated
vendored
Normal file
30
vendor/golang.org/x/tools/internal/gcimporter/support.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
|
||||
func readArchiveHeader(b *bufio.Reader, name string) int {
|
||||
// architecture-independent object file output
|
||||
const HeaderSize = 60
|
||||
|
||||
var buf [HeaderSize]byte
|
||||
if _, err := io.ReadFull(b, buf[:]); err != nil {
|
||||
return -1
|
||||
}
|
||||
aname := strings.Trim(string(buf[0:16]), " ")
|
||||
if !strings.HasPrefix(aname, name) {
|
||||
return -1
|
||||
}
|
||||
asize := strings.Trim(string(buf[48:58]), " ")
|
||||
i, _ := strconv.Atoi(asize)
|
||||
return i
|
||||
}
|
34
vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
generated
vendored
34
vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
generated
vendored
|
@ -1,34 +0,0 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
const iexportVersion = iexportVersionGenerics
|
||||
|
||||
// additionalPredeclared returns additional predeclared types in go.1.18.
|
||||
func additionalPredeclared() []types.Type {
|
||||
return []types.Type{
|
||||
// comparable
|
||||
types.Universe.Lookup("comparable").Type(),
|
||||
|
||||
// any
|
||||
types.Universe.Lookup("any").Type(),
|
||||
}
|
||||
}
|
||||
|
||||
// See cmd/compile/internal/types.SplitVargenSuffix.
|
||||
func splitVargenSuffix(name string) (base, suffix string) {
|
||||
i := len(name)
|
||||
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
|
||||
i--
|
||||
}
|
||||
const dot = "·"
|
||||
if i >= len(dot) && name[i-len(dot):i] == dot {
|
||||
i -= len(dot)
|
||||
return name[:i], name[i:]
|
||||
}
|
||||
return name, ""
|
||||
}
|
10
vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
generated
vendored
10
vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !goexperiment.unified
|
||||
// +build !goexperiment.unified
|
||||
|
||||
package gcimporter
|
||||
|
||||
const unifiedIR = false
|
10
vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
generated
vendored
10
vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.unified
|
||||
// +build goexperiment.unified
|
||||
|
||||
package gcimporter
|
||||
|
||||
const unifiedIR = true
|
59
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
59
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
|
@ -11,10 +11,10 @@ import (
|
|||
"go/token"
|
||||
"go/types"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/aliases"
|
||||
"golang.org/x/tools/internal/pkgbits"
|
||||
"golang.org/x/tools/internal/typesinternal"
|
||||
)
|
||||
|
||||
// A pkgReader holds the shared state for reading a unified IR package
|
||||
|
@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) {
|
|||
|
||||
// See cmd/compile/internal/noder.derivedInfo.
|
||||
type derivedInfo struct {
|
||||
idx pkgbits.Index
|
||||
needed bool
|
||||
idx pkgbits.Index
|
||||
}
|
||||
|
||||
// See cmd/compile/internal/noder.typeInfo.
|
||||
|
@ -72,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||
}
|
||||
|
||||
s := string(data)
|
||||
s = s[:strings.LastIndex(s, "\n$$\n")]
|
||||
input := pkgbits.NewPkgDecoder(path, s)
|
||||
pkg = readUnifiedPackage(fset, nil, imports, input)
|
||||
return
|
||||
|
@ -110,13 +108,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
|
|||
|
||||
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
|
||||
pkg := r.pkg()
|
||||
r.Bool() // has init
|
||||
if r.Version().Has(pkgbits.HasInit) {
|
||||
r.Bool()
|
||||
}
|
||||
|
||||
for i, n := 0, r.Len(); i < n; i++ {
|
||||
// As if r.obj(), but avoiding the Scope.Lookup call,
|
||||
// to avoid eager loading of imports.
|
||||
r.Sync(pkgbits.SyncObject)
|
||||
assert(!r.Bool())
|
||||
if r.Version().Has(pkgbits.DerivedFuncInstance) {
|
||||
assert(!r.Bool())
|
||||
}
|
||||
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
|
||||
assert(r.Len() == 0)
|
||||
}
|
||||
|
@ -165,7 +167,7 @@ type readerDict struct {
|
|||
// tparams is a slice of the constructed TypeParams for the element.
|
||||
tparams []*types.TypeParam
|
||||
|
||||
// devived is a slice of types derived from tparams, which may be
|
||||
// derived is a slice of types derived from tparams, which may be
|
||||
// instantiated while reading the current element.
|
||||
derived []derivedInfo
|
||||
derivedTypes []types.Type // lazily instantiated from derived
|
||||
|
@ -263,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
|
|||
func (r *reader) doPkg() *types.Package {
|
||||
path := r.String()
|
||||
switch path {
|
||||
case "":
|
||||
// cmd/compile emits path="main" for main packages because
|
||||
// that's the linker symbol prefix it used; but we need
|
||||
// the package's path as it would be reported by go list,
|
||||
// hence "main" below.
|
||||
// See test at go/packages.TestMainPackagePathInModeTypes.
|
||||
case "", "main":
|
||||
path = r.p.PkgPath()
|
||||
case "builtin":
|
||||
return nil // universe
|
||||
|
@ -471,7 +478,9 @@ func (r *reader) param() *types.Var {
|
|||
func (r *reader) obj() (types.Object, []types.Type) {
|
||||
r.Sync(pkgbits.SyncObject)
|
||||
|
||||
assert(!r.Bool())
|
||||
if r.Version().Has(pkgbits.DerivedFuncInstance) {
|
||||
assert(!r.Bool())
|
||||
}
|
||||
|
||||
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
|
||||
obj := pkgScope(pkg).Lookup(name)
|
||||
|
@ -525,8 +534,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
|||
|
||||
case pkgbits.ObjAlias:
|
||||
pos := r.pos()
|
||||
var tparams []*types.TypeParam
|
||||
if r.Version().Has(pkgbits.AliasTypeParamNames) {
|
||||
tparams = r.typeParamNames()
|
||||
}
|
||||
typ := r.typ()
|
||||
declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
|
||||
declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
|
||||
|
||||
case pkgbits.ObjConst:
|
||||
pos := r.pos()
|
||||
|
@ -553,13 +566,14 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
|||
// If the underlying type is an interface, we need to
|
||||
// duplicate its methods so we can replace the receiver
|
||||
// parameter's type (#49906).
|
||||
if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
|
||||
if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
|
||||
methods := make([]*types.Func, iface.NumExplicitMethods())
|
||||
for i := range methods {
|
||||
fn := iface.ExplicitMethod(i)
|
||||
sig := fn.Type().(*types.Signature)
|
||||
|
||||
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
|
||||
typesinternal.SetVarKind(recv, typesinternal.RecvVar)
|
||||
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
|
||||
}
|
||||
|
||||
|
@ -607,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
|||
case pkgbits.ObjVar:
|
||||
pos := r.pos()
|
||||
typ := r.typ()
|
||||
declare(types.NewVar(pos, objPkg, objName, typ))
|
||||
v := types.NewVar(pos, objPkg, objName, typ)
|
||||
typesinternal.SetVarKind(v, typesinternal.PackageVar)
|
||||
declare(v)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -632,7 +648,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
|
|||
dict.derived = make([]derivedInfo, r.Len())
|
||||
dict.derivedTypes = make([]types.Type, len(dict.derived))
|
||||
for i := range dict.derived {
|
||||
dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
|
||||
dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
|
||||
if r.Version().Has(pkgbits.DerivedInfoNeeded) {
|
||||
assert(!r.Bool())
|
||||
}
|
||||
}
|
||||
|
||||
pr.retireReader(r)
|
||||
|
@ -726,3 +745,17 @@ func pkgScope(pkg *types.Package) *types.Scope {
|
|||
}
|
||||
return types.Universe
|
||||
}
|
||||
|
||||
// See cmd/compile/internal/types.SplitVargenSuffix.
|
||||
func splitVargenSuffix(name string) (base, suffix string) {
|
||||
i := len(name)
|
||||
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
|
||||
i--
|
||||
}
|
||||
const dot = "·"
|
||||
if i >= len(dot) && name[i-len(dot):i] == dot {
|
||||
i -= len(dot)
|
||||
return name[:i], name[i:]
|
||||
}
|
||||
return name, ""
|
||||
}
|
||||
|
|
79
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
79
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
|
@ -16,7 +16,6 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -29,7 +28,7 @@ import (
|
|||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// An Runner will run go command invocations and serialize
|
||||
// A Runner will run go command invocations and serialize
|
||||
// them if it sees a concurrency error.
|
||||
type Runner struct {
|
||||
// once guards the runner initialization.
|
||||
|
@ -180,7 +179,7 @@ type Invocation struct {
|
|||
CleanEnv bool
|
||||
Env []string
|
||||
WorkingDir string
|
||||
Logf func(format string, args ...interface{})
|
||||
Logf func(format string, args ...any)
|
||||
}
|
||||
|
||||
// Postcondition: both error results have same nilness.
|
||||
|
@ -200,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io
|
|||
return
|
||||
}
|
||||
|
||||
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
log := i.Logf
|
||||
if log == nil {
|
||||
log = func(string, ...interface{}) {}
|
||||
// logf logs if i.Logf is non-nil.
|
||||
func (i *Invocation) logf(format string, args ...any) {
|
||||
if i.Logf != nil {
|
||||
i.Logf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
goArgs := []string{i.Verb}
|
||||
|
||||
appendModFile := func() {
|
||||
|
@ -248,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
|||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
// cmd.WaitDelay was added only in go1.20 (see #50436).
|
||||
if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
|
||||
// https://go.dev/issue/59541: don't wait forever copying stderr
|
||||
// after the command has exited.
|
||||
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
|
||||
// soon as ctx is done. However, we also don't want to wait around forever
|
||||
// for stderr. Give a much-longer-than-reasonable delay and then assume that
|
||||
// something has wedged in the kernel or runtime.
|
||||
waitDelay.Set(reflect.ValueOf(30 * time.Second))
|
||||
}
|
||||
// https://go.dev/issue/59541: don't wait forever copying stderr
|
||||
// after the command has exited.
|
||||
// After CL 484741 we copy stdout manually, so we we'll stop reading that as
|
||||
// soon as ctx is done. However, we also don't want to wait around forever
|
||||
// for stderr. Give a much-longer-than-reasonable delay and then assume that
|
||||
// something has wedged in the kernel or runtime.
|
||||
cmd.WaitDelay = 30 * time.Second
|
||||
|
||||
// The cwd gets resolved to the real path. On Darwin, where
|
||||
// /tmp is a symlink, this breaks anything that expects the
|
||||
|
@ -277,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
|||
cmd.Dir = i.WorkingDir
|
||||
}
|
||||
|
||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||
debugStr := cmdDebugStr(cmd)
|
||||
i.logf("starting %v", debugStr)
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
i.logf("%s for %v", time.Since(start), debugStr)
|
||||
}()
|
||||
|
||||
return runCmdContext(ctx, cmd)
|
||||
}
|
||||
|
@ -385,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
|
|||
case err := <-resChan:
|
||||
return err
|
||||
case <-timer.C:
|
||||
HandleHangingGoCommand(startTime, cmd)
|
||||
// HandleHangingGoCommand terminates this process.
|
||||
// Pass off resChan in case we can collect the command error.
|
||||
handleHangingGoCommand(startTime, cmd, resChan)
|
||||
case <-ctx.Done():
|
||||
}
|
||||
} else {
|
||||
|
@ -410,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
|
|||
}
|
||||
|
||||
// Didn't shut down in response to interrupt. Kill it hard.
|
||||
// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
|
||||
// on certain platforms, such as unix.
|
||||
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
|
||||
log.Printf("error killing the Go command: %v", err)
|
||||
}
|
||||
|
@ -419,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
|
|||
return <-resChan
|
||||
}
|
||||
|
||||
func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
|
||||
// handleHangingGoCommand outputs debugging information to help diagnose the
|
||||
// cause of a hanging Go command, and then exits with log.Fatalf.
|
||||
func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux", "darwin", "freebsd", "netbsd":
|
||||
case "linux", "darwin", "freebsd", "netbsd", "openbsd":
|
||||
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
|
||||
|
||||
The gopls test runner has detected a hanging go command. In order to debug
|
||||
this, the output of ps and lsof/fstat is printed below.
|
||||
The gopls test runner has detected a hanging go command. In order to debug
|
||||
this, the output of ps and lsof/fstat is printed below.
|
||||
|
||||
See golang/go#54461 for more details.`)
|
||||
See golang/go#54461 for more details.`)
|
||||
|
||||
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
|
||||
fmt.Fprintln(os.Stderr, "-------------------------")
|
||||
|
@ -435,7 +440,7 @@ See golang/go#54461 for more details.`)
|
|||
psCmd.Stdout = os.Stderr
|
||||
psCmd.Stderr = os.Stderr
|
||||
if err := psCmd.Run(); err != nil {
|
||||
panic(fmt.Sprintf("running ps: %v", err))
|
||||
log.Printf("Handling hanging Go command: running ps: %v", err)
|
||||
}
|
||||
|
||||
listFiles := "lsof"
|
||||
|
@ -449,10 +454,24 @@ See golang/go#54461 for more details.`)
|
|||
listFilesCmd.Stdout = os.Stderr
|
||||
listFilesCmd.Stderr = os.Stderr
|
||||
if err := listFilesCmd.Run(); err != nil {
|
||||
panic(fmt.Sprintf("running %s: %v", listFiles, err))
|
||||
log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
|
||||
}
|
||||
// Try to extract information about the slow go process by issuing a SIGQUIT.
|
||||
if err := cmd.Process.Signal(sigStuckProcess); err == nil {
|
||||
select {
|
||||
case err := <-resChan:
|
||||
stderr := "not a bytes.Buffer"
|
||||
if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
|
||||
stderr = buf.String()
|
||||
}
|
||||
log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
} else {
|
||||
log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
|
||||
log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
|
||||
}
|
||||
|
||||
func cmdDebugStr(cmd *exec.Cmd) string {
|
||||
|
@ -514,7 +533,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(),
|
|||
for k, v := range overlay {
|
||||
// Use a unique basename for each file (001-foo.go),
|
||||
// to avoid creating nested directories.
|
||||
base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
|
||||
base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
|
||||
filename := filepath.Join(dir, base)
|
||||
err := os.WriteFile(filename, v, 0666)
|
||||
if err != nil {
|
||||
|
|
13
vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !unix
|
||||
|
||||
package gocommand
|
||||
|
||||
import "os"
|
||||
|
||||
// sigStuckProcess is the signal to send to kill a hanging subprocess.
|
||||
// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
|
||||
var sigStuckProcess = os.Kill
|
13
vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package gocommand
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Sigstuckprocess is the signal to send to kill a hanging subprocess.
|
||||
// Send SIGQUIT to get a stack trace.
|
||||
var sigStuckProcess = syscall.SIGQUIT
|
536
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
536
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
|
@ -90,18 +90,6 @@ type ImportFix struct {
|
|||
Relevance float64 // see pkg
|
||||
}
|
||||
|
||||
// An ImportInfo represents a single import statement.
|
||||
type ImportInfo struct {
|
||||
ImportPath string // import path, e.g. "crypto/rand".
|
||||
Name string // import name, e.g. "crand", or "" if none.
|
||||
}
|
||||
|
||||
// A packageInfo represents what's known about a package.
|
||||
type packageInfo struct {
|
||||
name string // real package name, if known.
|
||||
exports map[string]bool // known exports.
|
||||
}
|
||||
|
||||
// parseOtherFiles parses all the Go files in srcDir except filename, including
|
||||
// test files if filename looks like a test.
|
||||
//
|
||||
|
@ -130,7 +118,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename
|
|||
continue
|
||||
}
|
||||
|
||||
f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
|
||||
f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -161,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
|
|||
|
||||
// collectReferences builds a map of selector expressions, from
|
||||
// left hand side (X) to a set of right hand sides (Sel).
|
||||
func collectReferences(f *ast.File) references {
|
||||
refs := references{}
|
||||
func collectReferences(f *ast.File) References {
|
||||
refs := References{}
|
||||
|
||||
var visitor visitFn
|
||||
visitor = func(node ast.Node) ast.Visitor {
|
||||
|
@ -232,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
|
|||
|
||||
allFound := true
|
||||
for right := range syms {
|
||||
if !pkgInfo.exports[right] {
|
||||
if !pkgInfo.Exports[right] {
|
||||
allFound = false
|
||||
break
|
||||
}
|
||||
|
@ -245,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
|
|||
return nil
|
||||
}
|
||||
|
||||
// references is set of references found in a Go file. The first map key is the
|
||||
// left hand side of a selector expression, the second key is the right hand
|
||||
// side, and the value should always be true.
|
||||
type references map[string]map[string]bool
|
||||
|
||||
// A pass contains all the inputs and state necessary to fix a file's imports.
|
||||
// It can be modified in some ways during use; see comments below.
|
||||
type pass struct {
|
||||
|
@ -257,27 +240,29 @@ type pass struct {
|
|||
fset *token.FileSet // fset used to parse f and its siblings.
|
||||
f *ast.File // the file being fixed.
|
||||
srcDir string // the directory containing f.
|
||||
env *ProcessEnv // the environment to use for go commands, etc.
|
||||
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
|
||||
otherFiles []*ast.File // sibling files.
|
||||
logf func(string, ...any)
|
||||
source Source // the environment to use for go commands, etc.
|
||||
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
|
||||
otherFiles []*ast.File // sibling files.
|
||||
goroot string
|
||||
|
||||
// Intermediate state, generated by load.
|
||||
existingImports map[string][]*ImportInfo
|
||||
allRefs references
|
||||
missingRefs references
|
||||
allRefs References
|
||||
missingRefs References
|
||||
|
||||
// Inputs to fix. These can be augmented between successive fix calls.
|
||||
lastTry bool // indicates that this is the last call and fix should clean up as best it can.
|
||||
candidates []*ImportInfo // candidate imports in priority order.
|
||||
knownPackages map[string]*packageInfo // information about all known packages.
|
||||
knownPackages map[string]*PackageInfo // information about all known packages.
|
||||
}
|
||||
|
||||
// loadPackageNames saves the package names for everything referenced by imports.
|
||||
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
|
||||
if p.env.Logf != nil {
|
||||
p.env.Logf("loading package names for %v packages", len(imports))
|
||||
func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
|
||||
if p.logf != nil {
|
||||
p.logf("loading package names for %v packages", len(imports))
|
||||
defer func() {
|
||||
p.env.Logf("done loading package names for %v packages", len(imports))
|
||||
p.logf("done loading package names for %v packages", len(imports))
|
||||
}()
|
||||
}
|
||||
var unknown []string
|
||||
|
@ -288,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
|
|||
unknown = append(unknown, imp.ImportPath)
|
||||
}
|
||||
|
||||
resolver, err := p.env.GetResolver()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
names, err := resolver.loadPackageNames(unknown, p.srcDir)
|
||||
names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(rfindley): revisit this. Why do we need to store known packages with
|
||||
// no exports? The inconsistent data is confusing.
|
||||
for path, name := range names {
|
||||
p.knownPackages[path] = &packageInfo{
|
||||
name: name,
|
||||
exports: map[string]bool{},
|
||||
p.knownPackages[path] = &PackageInfo{
|
||||
Name: name,
|
||||
Exports: map[string]bool{},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -329,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
|
|||
return imp.Name
|
||||
}
|
||||
known := p.knownPackages[imp.ImportPath]
|
||||
if known != nil && known.name != "" {
|
||||
return withoutVersion(known.name)
|
||||
if known != nil && known.Name != "" {
|
||||
return withoutVersion(known.Name)
|
||||
}
|
||||
return ImportPathToAssumedName(imp.ImportPath)
|
||||
}
|
||||
|
@ -338,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
|
|||
// load reads in everything necessary to run a pass, and reports whether the
|
||||
// file already has all the imports it needs. It fills in p.missingRefs with the
|
||||
// file's missing symbols, if any, or removes unused imports if not.
|
||||
func (p *pass) load() ([]*ImportFix, bool) {
|
||||
p.knownPackages = map[string]*packageInfo{}
|
||||
p.missingRefs = references{}
|
||||
func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
|
||||
p.knownPackages = map[string]*PackageInfo{}
|
||||
p.missingRefs = References{}
|
||||
p.existingImports = map[string][]*ImportInfo{}
|
||||
|
||||
// Load basic information about the file in question.
|
||||
|
@ -363,10 +345,10 @@ func (p *pass) load() ([]*ImportFix, bool) {
|
|||
// f's imports by the identifier they introduce.
|
||||
imports := collectImports(p.f)
|
||||
if p.loadRealPackageNames {
|
||||
err := p.loadPackageNames(append(imports, p.candidates...))
|
||||
err := p.loadPackageNames(ctx, append(imports, p.candidates...))
|
||||
if err != nil {
|
||||
if p.env.Logf != nil {
|
||||
p.env.Logf("loading package names: %v", err)
|
||||
if p.logf != nil {
|
||||
p.logf("loading package names: %v", err)
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
@ -536,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
|
|||
// We have the stdlib in memory; no need to guess.
|
||||
rights = symbolNameSet(m)
|
||||
}
|
||||
p.addCandidate(imp, &packageInfo{
|
||||
// TODO(rfindley): we should set package name here, for consistency.
|
||||
p.addCandidate(imp, &PackageInfo{
|
||||
// no name; we already know it.
|
||||
exports: rights,
|
||||
Exports: rights,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -547,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
|
|||
|
||||
// addCandidate adds a candidate import to p, and merges in the information
|
||||
// in pkg.
|
||||
func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
|
||||
func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
|
||||
p.candidates = append(p.candidates, imp)
|
||||
if existing, ok := p.knownPackages[imp.ImportPath]; ok {
|
||||
if existing.name == "" {
|
||||
existing.name = pkg.name
|
||||
if existing.Name == "" {
|
||||
existing.Name = pkg.Name
|
||||
}
|
||||
for export := range pkg.exports {
|
||||
existing.exports[export] = true
|
||||
for export := range pkg.Exports {
|
||||
existing.Exports[export] = true
|
||||
}
|
||||
} else {
|
||||
p.knownPackages[imp.ImportPath] = pkg
|
||||
|
@ -563,7 +546,14 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
|
|||
|
||||
// fixImports adds and removes imports from f so that all its references are
|
||||
// satisfied and there are no unused imports.
|
||||
func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
|
||||
//
|
||||
// This is declared as a variable rather than a function so goimports can
|
||||
// easily be extended by adding a file with an init function.
|
||||
//
|
||||
// DO NOT REMOVE: used internally at Google.
|
||||
var fixImports = fixImportsDefault
|
||||
|
||||
func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
|
||||
fixes, err := getFixes(context.Background(), fset, f, filename, env)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -575,21 +565,42 @@ func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessE
|
|||
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
|
||||
// It does not modify the ast.
|
||||
func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
|
||||
source, err := NewProcessEnvSource(env, filename, f.Name.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
goEnv, err := env.goEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
|
||||
}
|
||||
|
||||
func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
|
||||
// This logic is defensively duplicated from getFixes.
|
||||
abs, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcDir := filepath.Dir(abs)
|
||||
if env.Logf != nil {
|
||||
env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
|
||||
|
||||
if logf != nil {
|
||||
logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
|
||||
}
|
||||
|
||||
// First pass: looking only at f, and using the naive algorithm to
|
||||
// derive package names from import paths, see if the file is already
|
||||
// complete. We can't add any imports yet, because we don't know
|
||||
// if missing references are actually package vars.
|
||||
p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
|
||||
if fixes, done := p.load(); done {
|
||||
p := &pass{
|
||||
fset: fset,
|
||||
f: f,
|
||||
srcDir: srcDir,
|
||||
logf: logf,
|
||||
goroot: goroot,
|
||||
source: source,
|
||||
}
|
||||
if fixes, done := p.load(ctx); done {
|
||||
return fixes, nil
|
||||
}
|
||||
|
||||
|
@ -601,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
|
|||
// Second pass: add information from other files in the same package,
|
||||
// like their package vars and imports.
|
||||
p.otherFiles = otherFiles
|
||||
if fixes, done := p.load(); done {
|
||||
if fixes, done := p.load(ctx); done {
|
||||
return fixes, nil
|
||||
}
|
||||
|
||||
|
@ -614,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
|
|||
|
||||
// Third pass: get real package names where we had previously used
|
||||
// the naive algorithm.
|
||||
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
|
||||
p = &pass{
|
||||
fset: fset,
|
||||
f: f,
|
||||
srcDir: srcDir,
|
||||
logf: logf,
|
||||
goroot: goroot,
|
||||
source: p.source, // safe to reuse, as it's just a wrapper around env
|
||||
}
|
||||
p.loadRealPackageNames = true
|
||||
p.otherFiles = otherFiles
|
||||
if fixes, done := p.load(); done {
|
||||
if fixes, done := p.load(ctx); done {
|
||||
return fixes, nil
|
||||
}
|
||||
|
||||
|
@ -762,7 +780,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix
|
|||
return true
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
if !canUse(filename, pkg.dir) {
|
||||
if !CanUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
// Try the assumed package name first, then a simpler path match
|
||||
|
@ -797,7 +815,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix,
|
|||
return true
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
if !canUse(filename, pkg.dir) {
|
||||
if !CanUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(pkg.importPathShort, searchPrefix)
|
||||
|
@ -831,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
|
|||
return true
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
|
||||
return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return pkg.packageName == searchPkg
|
||||
|
@ -909,7 +927,7 @@ type ProcessEnv struct {
|
|||
WorkingDir string
|
||||
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
Logf func(format string, args ...any)
|
||||
|
||||
// If set, ModCache holds a shared cache of directory info to use across
|
||||
// multiple ProcessEnvs.
|
||||
|
@ -1014,16 +1032,26 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
|
|||
// already know the view type.
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
e.logf("created gopath resolver")
|
||||
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
|
||||
e.resolverErr = err
|
||||
e.logf("failed to create module resolver: %v", err)
|
||||
} else {
|
||||
e.resolver = Resolver(r)
|
||||
e.logf("created module resolver")
|
||||
}
|
||||
}
|
||||
|
||||
return e.resolver, e.resolverErr
|
||||
}
|
||||
|
||||
// logf logs if e.Logf is non-nil.
|
||||
func (e *ProcessEnv) logf(format string, args ...any) {
|
||||
if e.Logf != nil {
|
||||
e.Logf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// buildContext returns the build.Context to use for matching files.
|
||||
//
|
||||
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
|
||||
|
@ -1072,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
|
|||
return e.GocmdRunner.Run(ctx, inv)
|
||||
}
|
||||
|
||||
func addStdlibCandidates(pass *pass, refs references) error {
|
||||
goenv, err := pass.env.goEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func addStdlibCandidates(pass *pass, refs References) error {
|
||||
localbase := func(nm string) string {
|
||||
ans := path.Base(nm)
|
||||
if ans[0] == 'v' {
|
||||
|
@ -1091,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
|
|||
}
|
||||
add := func(pkg string) {
|
||||
// Prevent self-imports.
|
||||
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
|
||||
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
|
||||
return
|
||||
}
|
||||
exports := symbolNameSet(stdlib.PackageSymbols[pkg])
|
||||
pass.addCandidate(
|
||||
&ImportInfo{ImportPath: pkg},
|
||||
&packageInfo{name: localbase(pkg), exports: exports})
|
||||
&PackageInfo{Name: localbase(pkg), Exports: exports})
|
||||
}
|
||||
for left := range refs {
|
||||
if left == "rand" {
|
||||
|
@ -1108,6 +1132,9 @@ func addStdlibCandidates(pass *pass, refs references) error {
|
|||
// but we have no way of figuring out what the user is using
|
||||
// TODO: investigate using the toolchain version to disambiguate in the stdlib
|
||||
add("math/rand/v2")
|
||||
// math/rand has an overlapping API
|
||||
// TestIssue66407 fails without this
|
||||
add("math/rand")
|
||||
continue
|
||||
}
|
||||
for importPath := range stdlib.PackageSymbols {
|
||||
|
@ -1127,8 +1154,8 @@ type Resolver interface {
|
|||
// scan works with callback to search for packages. See scanCallback for details.
|
||||
scan(ctx context.Context, callback *scanCallback) error
|
||||
|
||||
// loadExports returns the set of exported symbols in the package at dir.
|
||||
// loadExports may be called concurrently.
|
||||
// loadExports returns the package name and set of exported symbols in the
|
||||
// package at dir. loadExports may be called concurrently.
|
||||
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
|
||||
|
||||
// scoreImportPath returns the relevance for an import path.
|
||||
|
@ -1161,101 +1188,22 @@ type scanCallback struct {
|
|||
exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
|
||||
}
|
||||
|
||||
func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
|
||||
func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
|
||||
ctx, done := event.Start(ctx, "imports.addExternalCandidates")
|
||||
defer done()
|
||||
|
||||
var mu sync.Mutex
|
||||
found := make(map[string][]pkgDistance)
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true // We want everything.
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return pkgIsCandidate(filename, refs, pkg)
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
if _, want := refs[pkg.packageName]; !want {
|
||||
return false
|
||||
}
|
||||
if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
|
||||
// The candidate is in the same directory and has the
|
||||
// same package name. Don't try to import ourselves.
|
||||
return false
|
||||
}
|
||||
if !canUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
|
||||
return false // We'll do our own loading after we sort.
|
||||
},
|
||||
}
|
||||
resolver, err := pass.env.GetResolver()
|
||||
results, err := pass.source.ResolveReferences(ctx, filename, refs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = resolver.scan(ctx, callback); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Search for imports matching potential package references.
|
||||
type result struct {
|
||||
imp *ImportInfo
|
||||
pkg *packageInfo
|
||||
}
|
||||
results := make(chan result, len(refs))
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
cancel()
|
||||
wg.Wait()
|
||||
}()
|
||||
var (
|
||||
firstErr error
|
||||
firstErrOnce sync.Once
|
||||
)
|
||||
for pkgName, symbols := range refs {
|
||||
wg.Add(1)
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
|
||||
|
||||
if err != nil {
|
||||
firstErrOnce.Do(func() {
|
||||
firstErr = err
|
||||
cancel()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if found == nil {
|
||||
return // No matching package.
|
||||
}
|
||||
|
||||
imp := &ImportInfo{
|
||||
ImportPath: found.importPathShort,
|
||||
}
|
||||
|
||||
pkg := &packageInfo{
|
||||
name: pkgName,
|
||||
exports: symbols,
|
||||
}
|
||||
results <- result{imp, pkg}
|
||||
}(pkgName, symbols)
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(results)
|
||||
}()
|
||||
|
||||
for result := range results {
|
||||
for _, result := range results {
|
||||
if result == nil {
|
||||
continue
|
||||
}
|
||||
// Don't offer completions that would shadow predeclared
|
||||
// names, such as github.com/coreos/etcd/error.
|
||||
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
|
||||
if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
|
||||
// Ideally we would skip this candidate only
|
||||
// if the predeclared name is actually
|
||||
// referenced by the file, but that's a lot
|
||||
|
@ -1264,9 +1212,9 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
|
|||
// user before long.
|
||||
continue
|
||||
}
|
||||
pass.addCandidate(result.imp, result.pkg)
|
||||
pass.addCandidate(result.Import, result.Package)
|
||||
}
|
||||
return firstErr
|
||||
return nil
|
||||
}
|
||||
|
||||
// notIdentifier reports whether ch is an invalid identifier character.
|
||||
|
@ -1608,11 +1556,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
|
|||
}
|
||||
|
||||
fullFile := filepath.Join(dir, fi.Name())
|
||||
// Legacy ast.Object resolution is needed here.
|
||||
f, err := parser.ParseFile(fset, fullFile, nil, 0)
|
||||
if err != nil {
|
||||
if env.Logf != nil {
|
||||
env.Logf("error parsing %v: %v", fullFile, err)
|
||||
}
|
||||
env.logf("error parsing %v: %v", fullFile, err)
|
||||
continue
|
||||
}
|
||||
if f.Name.Name == "documentation" {
|
||||
|
@ -1648,9 +1595,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
|
|||
}
|
||||
sortSymbols(exports)
|
||||
|
||||
if env.Logf != nil {
|
||||
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
|
||||
}
|
||||
env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
|
||||
return pkgName, exports, nil
|
||||
}
|
||||
|
||||
|
@ -1660,25 +1605,39 @@ func sortSymbols(syms []stdlib.Symbol) {
|
|||
})
|
||||
}
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns ("", false, nil)
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
|
||||
// A symbolSearcher searches for a package with a set of symbols, among a set
|
||||
// of candidates. See [symbolSearcher.search].
|
||||
//
|
||||
// The search occurs within the scope of a single file, with context captured
|
||||
// in srcDir and xtest.
|
||||
type symbolSearcher struct {
|
||||
logf func(string, ...any)
|
||||
srcDir string // directory containing the file
|
||||
xtest bool // if set, the file containing is an x_test file
|
||||
loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
|
||||
}
|
||||
|
||||
// search searches the provided candidates for a package containing all
|
||||
// exported symbols.
|
||||
//
|
||||
// If successful, returns the resulting package.
|
||||
func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
|
||||
// Sort the candidates by their import package length,
|
||||
// assuming that shorter package names are better than long
|
||||
// ones. Note that this sorts by the de-vendored name, so
|
||||
// there's no "penalty" for vendoring.
|
||||
sort.Sort(byDistanceOrImportPathShortLength(candidates))
|
||||
if pass.env.Logf != nil {
|
||||
if s.logf != nil {
|
||||
for i, c := range candidates {
|
||||
pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
|
||||
s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
|
||||
}
|
||||
}
|
||||
resolver, err := pass.env.GetResolver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Collect exports for packages with matching names.
|
||||
// Arrange rescv so that we can we can await results in order of relevance
|
||||
// and exit as soon as we find the first match.
|
||||
//
|
||||
// Search with bounded concurrency, returning as soon as the first result
|
||||
// among rescv is non-nil.
|
||||
rescv := make([]chan *pkg, len(candidates))
|
||||
for i := range candidates {
|
||||
rescv[i] = make(chan *pkg, 1)
|
||||
|
@ -1686,6 +1645,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
|
|||
const maxConcurrentPackageImport = 4
|
||||
loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
|
||||
|
||||
// Ensure that all work is completed at exit.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
var wg sync.WaitGroup
|
||||
defer func() {
|
||||
|
@ -1693,6 +1653,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
|
|||
wg.Wait()
|
||||
}()
|
||||
|
||||
// Start the search.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
@ -1703,55 +1664,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
|
|||
return
|
||||
}
|
||||
|
||||
i := i
|
||||
c := c
|
||||
wg.Add(1)
|
||||
go func(c pkgDistance, resc chan<- *pkg) {
|
||||
go func() {
|
||||
defer func() {
|
||||
<-loadExportsSem
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
if pass.env.Logf != nil {
|
||||
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
|
||||
if s.logf != nil {
|
||||
s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
|
||||
}
|
||||
// If we're an x_test, load the package under test's test variant.
|
||||
includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
|
||||
_, exports, err := resolver.loadExports(ctx, c.pkg, includeTest)
|
||||
pkg, err := s.searchOne(ctx, c, symbols)
|
||||
if err != nil {
|
||||
if pass.env.Logf != nil {
|
||||
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
|
||||
if s.logf != nil && ctx.Err() == nil {
|
||||
s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
|
||||
}
|
||||
resc <- nil
|
||||
return
|
||||
pkg = nil
|
||||
}
|
||||
|
||||
exportsMap := make(map[string]bool, len(exports))
|
||||
for _, sym := range exports {
|
||||
exportsMap[sym.Name] = true
|
||||
}
|
||||
|
||||
// If it doesn't have the right
|
||||
// symbols, send nil to mean no match.
|
||||
for symbol := range symbols {
|
||||
if !exportsMap[symbol] {
|
||||
resc <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
resc <- c.pkg
|
||||
}(c, rescv[i])
|
||||
rescv[i] <- pkg // may be nil
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
// Await the first (best) result.
|
||||
for _, resc := range rescv {
|
||||
pkg := <-resc
|
||||
if pkg == nil {
|
||||
continue
|
||||
select {
|
||||
case r := <-resc:
|
||||
if r != nil {
|
||||
return r, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return pkg, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
// If we're considering the package under test from an x_test, load the
|
||||
// test variant.
|
||||
includeTest := s.xtest && c.pkg.dir == s.srcDir
|
||||
_, exports, err := s.loadExports(ctx, c.pkg, includeTest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exportsMap := make(map[string]bool, len(exports))
|
||||
for _, sym := range exports {
|
||||
exportsMap[sym.Name] = true
|
||||
}
|
||||
for symbol := range symbols {
|
||||
if !exportsMap[symbol] {
|
||||
return nil, nil // no match
|
||||
}
|
||||
}
|
||||
return c.pkg, nil
|
||||
}
|
||||
|
||||
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
|
||||
// finding which package pkgIdent in the file named by filename is trying
|
||||
// to refer to.
|
||||
|
@ -1764,68 +1737,34 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
|
|||
// filename is the file being formatted.
|
||||
// pkgIdent is the package being searched for, like "client" (if
|
||||
// searching for "client.New")
|
||||
func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
|
||||
func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
|
||||
// Check "internal" and "vendor" visibility:
|
||||
if !canUse(filename, pkg.dir) {
|
||||
if !CanUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Speed optimization to minimize disk I/O:
|
||||
// the last two components on disk must contain the
|
||||
// package name somewhere.
|
||||
//
|
||||
// This permits mismatch naming like directory
|
||||
// "go-foo" being package "foo", or "pkg.v3" being "pkg",
|
||||
// or directory "google.golang.org/api/cloudbilling/v1"
|
||||
// being package "cloudbilling", but doesn't
|
||||
// permit a directory "foo" to be package
|
||||
// "bar", which is strongly discouraged
|
||||
// anyway. There's no reason goimports needs
|
||||
// to be slow just to accommodate that.
|
||||
// Use the matchesPath heuristic to filter to package paths that could
|
||||
// reasonably match a dangling reference.
|
||||
//
|
||||
// This permits mismatch naming like directory "go-foo" being package "foo",
|
||||
// or "pkg.v3" being "pkg", or directory
|
||||
// "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but
|
||||
// doesn't permit a directory "foo" to be package "bar", which is strongly
|
||||
// discouraged anyway. There's no reason goimports needs to be slow just to
|
||||
// accommodate that.
|
||||
for pkgIdent := range refs {
|
||||
lastTwo := lastTwoComponents(pkg.importPathShort)
|
||||
if strings.Contains(lastTwo, pkgIdent) {
|
||||
return true
|
||||
}
|
||||
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
|
||||
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
|
||||
if strings.Contains(lastTwo, pkgIdent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func hasHyphenOrUpperASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if b == '-' || ('A' <= b && b <= 'Z') {
|
||||
if matchesPath(pkgIdent, pkg.importPathShort) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
|
||||
buf := make([]byte, 0, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case b == '-':
|
||||
continue
|
||||
case 'A' <= b && b <= 'Z':
|
||||
buf = append(buf, b+('a'-'A'))
|
||||
default:
|
||||
buf = append(buf, b)
|
||||
}
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// canUse reports whether the package in dir is usable from filename,
|
||||
// CanUse reports whether the package in dir is usable from filename,
|
||||
// respecting the Go "internal" and "vendor" visibility rules.
|
||||
func canUse(filename, dir string) bool {
|
||||
func CanUse(filename, dir string) bool {
|
||||
// Fast path check, before any allocations. If it doesn't contain vendor
|
||||
// or internal, it's not tricky:
|
||||
// Note that this can false-negative on directories like "notinternal",
|
||||
|
@ -1863,19 +1802,84 @@ func canUse(filename, dir string) bool {
|
|||
return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
|
||||
}
|
||||
|
||||
// lastTwoComponents returns at most the last two path components
|
||||
// of v, using either / or \ as the path separator.
|
||||
func lastTwoComponents(v string) string {
|
||||
// matchesPath reports whether ident may match a potential package name
|
||||
// referred to by path, using heuristics to filter out unidiomatic package
|
||||
// names.
|
||||
//
|
||||
// Specifically, it checks whether either of the last two '/'- or '\'-delimited
|
||||
// path segments matches the identifier. The segment-matching heuristic must
|
||||
// allow for various conventions around segment naming, including go-foo,
|
||||
// foo-go, and foo.v3. To handle all of these, matching considers both (1) the
|
||||
// entire segment, ignoring '-' and '.', as well as (2) the last subsegment
|
||||
// separated by '-' or '.'. So the segment foo-go matches all of the following
|
||||
// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII
|
||||
// identifiers).
|
||||
//
|
||||
// See the docstring for [pkgIsCandidate] for an explanation of how this
|
||||
// heuristic filters potential candidate packages.
|
||||
func matchesPath(ident, path string) bool {
|
||||
// Ignore case, for ASCII.
|
||||
lowerIfASCII := func(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// match reports whether path[start:end] matches ident, ignoring [.-].
|
||||
match := func(start, end int) bool {
|
||||
ii := len(ident) - 1 // current byte in ident
|
||||
pi := end - 1 // current byte in path
|
||||
for ; pi >= start && ii >= 0; pi-- {
|
||||
pb := path[pi]
|
||||
if pb == '-' || pb == '.' {
|
||||
continue
|
||||
}
|
||||
pb = lowerIfASCII(pb)
|
||||
ib := lowerIfASCII(ident[ii])
|
||||
if pb != ib {
|
||||
return false
|
||||
}
|
||||
ii--
|
||||
}
|
||||
return ii < 0 && pi < start // all bytes matched
|
||||
}
|
||||
|
||||
// segmentEnd and subsegmentEnd hold the end points of the current segment
|
||||
// and subsegment intervals.
|
||||
segmentEnd := len(path)
|
||||
subsegmentEnd := len(path)
|
||||
|
||||
// Count slashes; we only care about the last two segments.
|
||||
nslash := 0
|
||||
for i := len(v) - 1; i >= 0; i-- {
|
||||
if v[i] == '/' || v[i] == '\\' {
|
||||
|
||||
for i := len(path) - 1; i >= 0; i-- {
|
||||
switch b := path[i]; b {
|
||||
// TODO(rfindley): we handle backlashes here only because the previous
|
||||
// heuristic handled backslashes. This is perhaps overly defensive, but is
|
||||
// the result of many lessons regarding Chesterton's fence and the
|
||||
// goimports codebase.
|
||||
//
|
||||
// However, this function is only ever called with something called an
|
||||
// 'importPath'. Is it possible that this is a real import path, and
|
||||
// therefore we need only consider forward slashes?
|
||||
case '/', '\\':
|
||||
if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) {
|
||||
return true
|
||||
}
|
||||
nslash++
|
||||
if nslash == 2 {
|
||||
return v[i:]
|
||||
return false // did not match above
|
||||
}
|
||||
segmentEnd, subsegmentEnd = i, i // reset
|
||||
case '-', '.':
|
||||
if match(i+1, subsegmentEnd) {
|
||||
return true
|
||||
}
|
||||
subsegmentEnd = i
|
||||
}
|
||||
}
|
||||
return v
|
||||
return match(0, segmentEnd) || match(0, subsegmentEnd)
|
||||
}
|
||||
|
||||
type visitFn func(node ast.Node) ast.Visitor
|
||||
|
|
33
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
33
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
|
@ -47,7 +47,14 @@ type Options struct {
|
|||
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
|
||||
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
var parserMode parser.Mode
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
|
|||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
|
||||
func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
|
||||
ctx, done := event.Start(ctx, "imports.FixImports")
|
||||
defer done()
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, _, err := parse(fileSet, filename, src, opt)
|
||||
// TODO(rfindley): these default values for ParseComments and AllErrors were
|
||||
// extracted from gopls, but are they even needed?
|
||||
file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getFixes(ctx, fileSet, file, filename, opt.Env)
|
||||
return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
|
||||
}
|
||||
|
||||
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
|
||||
|
@ -86,7 +95,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
|
|||
// Don't use parse() -- we don't care about fragments or statement lists
|
||||
// here, and we need to work with unparseable files.
|
||||
fileSet := token.NewFileSet()
|
||||
parserMode := parser.Mode(0)
|
||||
parserMode := parser.SkipObjectResolution
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
|
@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
|
|||
// formatted file, and returns the postpocessed result.
|
||||
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
|
||||
mergeImports(file)
|
||||
sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
|
||||
sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
|
||||
var spacesBefore []string // import paths we need spaces before
|
||||
for _, impSection := range astutil.Imports(fset, file) {
|
||||
// Within each block of contiguous imports, see if any
|
||||
|
@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
|
|||
|
||||
// parse parses src, which was read from filename,
|
||||
// as a Go source file or statement list.
|
||||
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
|
||||
if parserMode&parser.SkipObjectResolution != 0 {
|
||||
panic("legacy ast.Object resolution is required")
|
||||
}
|
||||
|
||||
// Try as whole source file.
|
||||
|
@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
|
|||
// If the error is that the source file didn't begin with a
|
||||
// package line and we accept fragmented input, fall through to
|
||||
// try as a source fragment. Stop and return on any other error.
|
||||
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
|
||||
if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
|
17
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
17
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
|
@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe
|
|||
// 2. Use this to separate module cache scanning from other scanning.
|
||||
func gomodcacheForEnv(goenv map[string]string) string {
|
||||
if gmc := goenv["GOMODCACHE"]; gmc != "" {
|
||||
return gmc
|
||||
// golang/go#67156: ensure that the module cache is clean, since it is
|
||||
// assumed as a prefix to directories scanned by gopathwalk, which are
|
||||
// themselves clean.
|
||||
return filepath.Clean(gmc)
|
||||
}
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
|
@ -265,9 +268,7 @@ func (r *ModuleResolver) initAllMods() error {
|
|||
return err
|
||||
}
|
||||
if mod.Dir == "" {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
}
|
||||
r.env.logf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
|
@ -742,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest
|
|||
|
||||
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) {
|
||||
subdir = dir[len(prefix):]
|
||||
}
|
||||
importPath := filepath.ToSlash(subdir)
|
||||
if strings.HasPrefix(importPath, "vendor/") {
|
||||
|
@ -766,9 +767,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
|
|||
}
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
r.env.logf("decoding module cache path %q: %v", subdir, err)
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
|
|
63
vendor/golang.org/x/tools/internal/imports/source.go
generated
vendored
Normal file
63
vendor/golang.org/x/tools/internal/imports/source.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package imports
|
||||
|
||||
import "context"
|
||||
|
||||
// These types document the APIs below.
|
||||
//
|
||||
// TODO(rfindley): consider making these defined types rather than aliases.
|
||||
type (
|
||||
ImportPath = string
|
||||
PackageName = string
|
||||
Symbol = string
|
||||
|
||||
// References is set of References found in a Go file. The first map key is the
|
||||
// left hand side of a selector expression, the second key is the right hand
|
||||
// side, and the value should always be true.
|
||||
References = map[PackageName]map[Symbol]bool
|
||||
)
|
||||
|
||||
// A Result satisfies a missing import.
|
||||
//
|
||||
// The Import field describes the missing import spec, and the Package field
|
||||
// summarizes the package exports.
|
||||
type Result struct {
|
||||
Import *ImportInfo
|
||||
Package *PackageInfo
|
||||
}
|
||||
|
||||
// An ImportInfo represents a single import statement.
|
||||
type ImportInfo struct {
|
||||
ImportPath string // import path, e.g. "crypto/rand".
|
||||
Name string // import name, e.g. "crand", or "" if none.
|
||||
}
|
||||
|
||||
// A PackageInfo represents what's known about a package.
|
||||
type PackageInfo struct {
|
||||
Name string // package name in the package declaration, if known
|
||||
Exports map[string]bool // set of names of known package level sortSymbols
|
||||
}
|
||||
|
||||
// A Source provides imports to satisfy unresolved references in the file being
|
||||
// fixed.
|
||||
type Source interface {
|
||||
// LoadPackageNames queries PackageName information for the requested import
|
||||
// paths, when operating from the provided srcDir.
|
||||
//
|
||||
// TODO(rfindley): try to refactor to remove this operation.
|
||||
LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
|
||||
|
||||
// ResolveReferences asks the Source for the best package name to satisfy
|
||||
// each of the missing references, in the context of fixing the given
|
||||
// filename.
|
||||
//
|
||||
// Returns a map from package name to a [Result] for that package name that
|
||||
// provides the required symbols. Keys may be omitted in the map if no
|
||||
// candidates satisfy all missing references for that package name. It is up
|
||||
// to each data source to select the best result for each entry in the
|
||||
// missing map.
|
||||
ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error)
|
||||
}
|
129
vendor/golang.org/x/tools/internal/imports/source_env.go
generated
vendored
Normal file
129
vendor/golang.org/x/tools/internal/imports/source_env.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// ProcessEnvSource implements the [Source] interface using the legacy
|
||||
// [ProcessEnv] abstraction.
|
||||
type ProcessEnvSource struct {
|
||||
env *ProcessEnv
|
||||
srcDir string
|
||||
filename string
|
||||
pkgName string
|
||||
}
|
||||
|
||||
// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
|
||||
// env, to be used for fixing imports in the file with name filename in package
|
||||
// named pkgName.
|
||||
func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
|
||||
abs, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcDir := filepath.Dir(abs)
|
||||
return &ProcessEnvSource{
|
||||
env: env,
|
||||
srcDir: srcDir,
|
||||
filename: filename,
|
||||
pkgName: pkgName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
|
||||
r, err := s.env.GetResolver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.loadPackageNames(unknown, srcDir)
|
||||
}
|
||||
|
||||
func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) {
|
||||
var mu sync.Mutex
|
||||
found := make(map[string][]pkgDistance)
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true // We want everything.
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return pkgIsCandidate(filename, refs, pkg)
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
if _, want := refs[pkg.packageName]; !want {
|
||||
return false
|
||||
}
|
||||
if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
|
||||
// The candidate is in the same directory and has the
|
||||
// same package name. Don't try to import ourselves.
|
||||
return false
|
||||
}
|
||||
if !CanUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
|
||||
return false // We'll do our own loading after we sort.
|
||||
},
|
||||
}
|
||||
resolver, err := s.env.GetResolver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := resolver.scan(ctx, callback); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
searcher := symbolSearcher{
|
||||
logf: s.env.logf,
|
||||
srcDir: s.srcDir,
|
||||
xtest: strings.HasSuffix(s.pkgName, "_test"),
|
||||
loadExports: resolver.loadExports,
|
||||
}
|
||||
|
||||
var resultMu sync.Mutex
|
||||
results := make(map[string]*Result, len(refs))
|
||||
for pkgName, symbols := range refs {
|
||||
g.Go(func() error {
|
||||
found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if found == nil {
|
||||
return nil // No matching package.
|
||||
}
|
||||
|
||||
imp := &ImportInfo{
|
||||
ImportPath: found.importPathShort,
|
||||
}
|
||||
pkg := &PackageInfo{
|
||||
Name: pkgName,
|
||||
Exports: symbols,
|
||||
}
|
||||
resultMu.Lock()
|
||||
results[pkgName] = &Result{Import: imp, Package: pkg}
|
||||
resultMu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ans []*Result
|
||||
for _, x := range results {
|
||||
ans = append(ans, x)
|
||||
}
|
||||
return ans, nil
|
||||
}
|
103
vendor/golang.org/x/tools/internal/imports/source_modindex.go
generated
vendored
Normal file
103
vendor/golang.org/x/tools/internal/imports/source_modindex.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/modindex"
|
||||
)
|
||||
|
||||
// This code is here rather than in the modindex package
|
||||
// to avoid import loops
|
||||
|
||||
// implements Source using modindex, so only for module cache.
|
||||
//
|
||||
// this is perhaps over-engineered. A new Index is read at first use.
|
||||
// And then Update is called after every 15 minutes, and a new Index
|
||||
// is read if the index changed. It is not clear the Mutex is needed.
|
||||
type IndexSource struct {
|
||||
modcachedir string
|
||||
mutex sync.Mutex
|
||||
ix *modindex.Index
|
||||
expires time.Time
|
||||
}
|
||||
|
||||
// create a new Source. Called from NewView in cache/session.go.
|
||||
func NewIndexSource(cachedir string) *IndexSource {
|
||||
return &IndexSource{modcachedir: cachedir}
|
||||
}
|
||||
|
||||
func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) {
|
||||
/// This is used by goimports to resolve the package names of imports of the
|
||||
// current package, which is irrelevant for the module cache.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) {
|
||||
if err := s.maybeReadIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cs []modindex.Candidate
|
||||
for pkg, nms := range missing {
|
||||
for nm := range nms {
|
||||
x := s.ix.Lookup(pkg, nm, false)
|
||||
cs = append(cs, x...)
|
||||
}
|
||||
}
|
||||
found := make(map[string]*Result)
|
||||
for _, c := range cs {
|
||||
var x *Result
|
||||
if x = found[c.ImportPath]; x == nil {
|
||||
x = &Result{
|
||||
Import: &ImportInfo{
|
||||
ImportPath: c.ImportPath,
|
||||
Name: "",
|
||||
},
|
||||
Package: &PackageInfo{
|
||||
Name: c.PkgName,
|
||||
Exports: make(map[string]bool),
|
||||
},
|
||||
}
|
||||
found[c.ImportPath] = x
|
||||
}
|
||||
x.Package.Exports[c.Name] = true
|
||||
}
|
||||
var ans []*Result
|
||||
for _, x := range found {
|
||||
ans = append(ans, x)
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func (s *IndexSource) maybeReadIndex() error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
var readIndex bool
|
||||
if time.Now().After(s.expires) {
|
||||
ok, err := modindex.Update(s.modcachedir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
readIndex = true
|
||||
}
|
||||
}
|
||||
|
||||
if readIndex || s.ix == nil {
|
||||
ix, err := modindex.ReadIndex(s.modcachedir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.ix = ix
|
||||
// for now refresh every 15 minutes
|
||||
s.expires = time.Now().Add(time.Minute * 15)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
135
vendor/golang.org/x/tools/internal/modindex/directories.go
generated
vendored
Normal file
135
vendor/golang.org/x/tools/internal/modindex/directories.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package modindex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
type directory struct {
|
||||
path Relpath
|
||||
importPath string
|
||||
version string // semantic version
|
||||
syms []symbol
|
||||
}
|
||||
|
||||
// filterDirs groups the directories by import path,
|
||||
// sorting the ones with the same import path by semantic version,
|
||||
// most recent first.
|
||||
func byImportPath(dirs []Relpath) (map[string][]*directory, error) {
|
||||
ans := make(map[string][]*directory) // key is import path
|
||||
for _, d := range dirs {
|
||||
ip, sv, err := DirToImportPathVersion(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ans[ip] = append(ans[ip], &directory{
|
||||
path: d,
|
||||
importPath: ip,
|
||||
version: sv,
|
||||
})
|
||||
}
|
||||
for k, v := range ans {
|
||||
semanticSort(v)
|
||||
ans[k] = v
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
// sort the directories by semantic version, latest first
|
||||
func semanticSort(v []*directory) {
|
||||
slices.SortFunc(v, func(l, r *directory) int {
|
||||
if n := semver.Compare(l.version, r.version); n != 0 {
|
||||
return -n // latest first
|
||||
}
|
||||
return strings.Compare(string(l.path), string(r.path))
|
||||
})
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a relpathpath into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
// DirToImportPathVersion computes import path and semantic version
|
||||
func DirToImportPathVersion(dir Relpath) (string, string, error) {
|
||||
m := modCacheRegexp.FindStringSubmatch(string(dir))
|
||||
// m[1] is the module path
|
||||
// m[2] is the version major.minor.patch(-<pre release identifier)
|
||||
// m[3] is the rest of the package path
|
||||
if len(m) != 4 {
|
||||
return "", "", fmt.Errorf("bad dir %s", dir)
|
||||
}
|
||||
if !semver.IsValid(m[2]) {
|
||||
return "", "", fmt.Errorf("bad semantic version %s", m[2])
|
||||
}
|
||||
// ToSlash is required for Windows.
|
||||
return filepath.ToSlash(m[1] + m[3]), m[2], nil
|
||||
}
|
||||
|
||||
// a region controls what directories to look at, for
|
||||
// updating the index incrementally, and for testing that.
|
||||
// (for testing one builds an index as of A, incrementally
|
||||
// updates it to B, and compares the result to an index build
|
||||
// as of B.)
|
||||
type region struct {
|
||||
onlyAfter, onlyBefore time.Time
|
||||
sync.Mutex
|
||||
ans []Relpath
|
||||
}
|
||||
|
||||
func findDirs(root string, onlyAfter, onlyBefore time.Time) []Relpath {
|
||||
roots := []gopathwalk.Root{{Path: root, Type: gopathwalk.RootModuleCache}}
|
||||
// TODO(PJW): adjust concurrency
|
||||
opts := gopathwalk.Options{ModulesEnabled: true, Concurrency: 1 /* ,Logf: log.Printf*/}
|
||||
betw := ®ion{
|
||||
onlyAfter: onlyAfter,
|
||||
onlyBefore: onlyBefore,
|
||||
}
|
||||
gopathwalk.WalkSkip(roots, betw.addDir, betw.skipDir, opts)
|
||||
return betw.ans
|
||||
}
|
||||
|
||||
func (r *region) addDir(rt gopathwalk.Root, dir string) {
|
||||
// do we need to check times?
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
x := filepath.ToSlash(string(toRelpath(Abspath(rt.Path), dir)))
|
||||
r.ans = append(r.ans, toRelpath(Abspath(rt.Path), x))
|
||||
}
|
||||
|
||||
func (r *region) skipDir(_ gopathwalk.Root, dir string) bool {
|
||||
// The cache directory is already ignored in gopathwalk\
|
||||
if filepath.Base(dir) == "internal" {
|
||||
return true
|
||||
}
|
||||
if strings.Contains(dir, "toolchain@") {
|
||||
return true
|
||||
}
|
||||
// don't look inside @ directories that are too old
|
||||
if strings.Contains(filepath.Base(dir), "@") {
|
||||
st, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
log.Printf("can't stat dir %s %v", dir, err)
|
||||
return true
|
||||
}
|
||||
if st.ModTime().Before(r.onlyAfter) {
|
||||
return true
|
||||
}
|
||||
if st.ModTime().After(r.onlyBefore) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
266
vendor/golang.org/x/tools/internal/modindex/index.go
generated
vendored
Normal file
266
vendor/golang.org/x/tools/internal/modindex/index.go
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package modindex
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
The on-disk index is a text file.
|
||||
The first 3 lines are header information containing CurrentVersion,
|
||||
the value of GOMODCACHE, and the validity date of the index.
|
||||
(This is when the code started building the index.)
|
||||
Following the header are sections of lines, one section for each
|
||||
import path. These sections are sorted by package name.
|
||||
The first line of each section, marked by a leading :, contains
|
||||
the package name, the import path, the name of the directory relative
|
||||
to GOMODCACHE, and its semantic version.
|
||||
The rest of each section consists of one line per exported symbol.
|
||||
The lines are sorted by the symbol's name and contain the name,
|
||||
an indication of its lexical type (C, T, V, F), and if it is the
|
||||
name of a function, information about the signature.
|
||||
|
||||
The fields in the section header lines are separated by commas, and
|
||||
in the unlikely event this would be confusing, the csv package is used
|
||||
to write (and read) them.
|
||||
|
||||
In the lines containing exported names, C=const, V=var, T=type, F=func.
|
||||
If it is a func, the next field is the number of returned values,
|
||||
followed by pairs consisting of formal parameter names and types.
|
||||
All these fields are separated by spaces. Any spaces in a type
|
||||
(e.g., chan struct{}) are replaced by $s on the disk. The $s are
|
||||
turned back into spaces when read.
|
||||
|
||||
Here is an index header (the comments are not part of the index):
|
||||
0 // version (of the index format)
|
||||
/usr/local/google/home/pjw/go/pkg/mod // GOMODCACHE
|
||||
2024-09-11 18:55:09 // validity date of the index
|
||||
|
||||
Here is an index section:
|
||||
:yaml,gopkg.in/yaml.v1,gopkg.in/yaml.v1@v1.0.0-20140924161607-9f9df34309c0,v1.0.0-20140924161607-9f9df34309c0
|
||||
Getter T
|
||||
Marshal F 2 in interface{}
|
||||
Setter T
|
||||
Unmarshal F 1 in []byte out interface{}
|
||||
|
||||
The package name is yaml, the import path is gopkg.in/yaml.v1.
|
||||
Getter and Setter are types, and Marshal and Unmarshal are functions.
|
||||
The latter returns one value and has two arguments, 'in' and 'out'
|
||||
whose types are []byte and interface{}.
|
||||
*/
|
||||
|
||||
// CurrentVersion tells readers about the format of the index.
|
||||
const CurrentVersion int = 0
|
||||
|
||||
// Index is returned by ReadIndex().
|
||||
type Index struct {
|
||||
Version int
|
||||
Cachedir Abspath // The directory containing the module cache
|
||||
Changed time.Time // The index is up to date as of Changed
|
||||
Entries []Entry
|
||||
}
|
||||
|
||||
// An Entry contains information for an import path.
|
||||
type Entry struct {
|
||||
Dir Relpath // directory in modcache
|
||||
ImportPath string
|
||||
PkgName string
|
||||
Version string
|
||||
//ModTime STime // is this useful?
|
||||
Names []string // exported names and information
|
||||
}
|
||||
|
||||
// IndexDir is where the module index is stored.
|
||||
var IndexDir string
|
||||
|
||||
// Set IndexDir
|
||||
func init() {
|
||||
var dir string
|
||||
var err error
|
||||
if testing.Testing() {
|
||||
dir = os.TempDir()
|
||||
} else {
|
||||
dir, err = os.UserCacheDir()
|
||||
// shouldn't happen, but TempDir is better than
|
||||
// creating ./go/imports
|
||||
if err != nil {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
}
|
||||
dir = filepath.Join(dir, "go", "imports")
|
||||
os.MkdirAll(dir, 0777)
|
||||
IndexDir = dir
|
||||
}
|
||||
|
||||
// ReadIndex reads the latest version of the on-disk index
|
||||
// for the cache directory cd.
|
||||
// It returns (nil, nil) if there is no index, but returns
|
||||
// a non-nil error if the index exists but could not be read.
|
||||
func ReadIndex(cachedir string) (*Index, error) {
|
||||
cachedir, err := filepath.Abs(cachedir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cd := Abspath(cachedir)
|
||||
dir := IndexDir
|
||||
base := indexNameBase(cd)
|
||||
iname := filepath.Join(dir, base)
|
||||
buf, err := os.ReadFile(iname)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("cannot read %s: %w", iname, err)
|
||||
}
|
||||
fname := filepath.Join(dir, string(buf))
|
||||
fd, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
r := bufio.NewReader(fd)
|
||||
ix, err := readIndexFrom(cd, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ix, nil
|
||||
}
|
||||
|
||||
func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
|
||||
b := bufio.NewScanner(bx)
|
||||
var ans Index
|
||||
// header
|
||||
ok := b.Scan()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected scan error")
|
||||
}
|
||||
l := b.Text()
|
||||
var err error
|
||||
ans.Version, err = strconv.Atoi(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ans.Version != CurrentVersion {
|
||||
return nil, fmt.Errorf("got version %d, expected %d", ans.Version, CurrentVersion)
|
||||
}
|
||||
if ok := b.Scan(); !ok {
|
||||
return nil, fmt.Errorf("scanner error reading cachedir")
|
||||
}
|
||||
ans.Cachedir = Abspath(b.Text())
|
||||
if ok := b.Scan(); !ok {
|
||||
return nil, fmt.Errorf("scanner error reading index creation time")
|
||||
}
|
||||
// TODO(pjw): need to check that this is the expected cachedir
|
||||
// so the tag should be passed in to this function
|
||||
ans.Changed, err = time.ParseInLocation(time.DateTime, b.Text(), time.Local)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var curEntry *Entry
|
||||
for b.Scan() {
|
||||
v := b.Text()
|
||||
if v[0] == ':' {
|
||||
if curEntry != nil {
|
||||
ans.Entries = append(ans.Entries, *curEntry)
|
||||
}
|
||||
// as directories may contain commas and quotes, they need to be read as csv.
|
||||
rdr := strings.NewReader(v[1:])
|
||||
cs := csv.NewReader(rdr)
|
||||
flds, err := cs.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(flds) != 4 {
|
||||
return nil, fmt.Errorf("header contains %d fields, not 4: %q", len(v), v)
|
||||
}
|
||||
curEntry = &Entry{PkgName: flds[0], ImportPath: flds[1], Dir: toRelpath(cd, flds[2]), Version: flds[3]}
|
||||
continue
|
||||
}
|
||||
curEntry.Names = append(curEntry.Names, v)
|
||||
}
|
||||
if curEntry != nil {
|
||||
ans.Entries = append(ans.Entries, *curEntry)
|
||||
}
|
||||
if err := b.Err(); err != nil {
|
||||
return nil, fmt.Errorf("scanner failed %v", err)
|
||||
}
|
||||
return &ans, nil
|
||||
}
|
||||
|
||||
// write the index as a text file
|
||||
func writeIndex(cachedir Abspath, ix *Index) error {
|
||||
ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
|
||||
fd, err := os.CreateTemp(IndexDir, ipat)
|
||||
if err != nil {
|
||||
return err // can this happen?
|
||||
}
|
||||
defer fd.Close()
|
||||
if err := writeIndexToFile(ix, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
content := fd.Name()
|
||||
content = filepath.Base(content)
|
||||
base := indexNameBase(cachedir)
|
||||
nm := filepath.Join(IndexDir, base)
|
||||
err = os.WriteFile(nm, []byte(content), 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeIndexToFile(x *Index, fd *os.File) error {
|
||||
cnt := 0
|
||||
w := bufio.NewWriter(fd)
|
||||
fmt.Fprintf(w, "%d\n", x.Version)
|
||||
fmt.Fprintf(w, "%s\n", x.Cachedir)
|
||||
// round the time down
|
||||
tm := x.Changed.Add(-time.Second / 2)
|
||||
fmt.Fprintf(w, "%s\n", tm.Format(time.DateTime))
|
||||
for _, e := range x.Entries {
|
||||
if e.ImportPath == "" {
|
||||
continue // shouldn't happen
|
||||
}
|
||||
// PJW: maybe always write these headers as csv?
|
||||
if strings.ContainsAny(string(e.Dir), ",\"") {
|
||||
log.Printf("DIR: %s", e.Dir)
|
||||
cw := csv.NewWriter(w)
|
||||
cw.Write([]string{":" + e.PkgName, e.ImportPath, string(e.Dir), e.Version})
|
||||
cw.Flush()
|
||||
} else {
|
||||
fmt.Fprintf(w, ":%s,%s,%s,%s\n", e.PkgName, e.ImportPath, e.Dir, e.Version)
|
||||
}
|
||||
for _, x := range e.Names {
|
||||
fmt.Fprintf(w, "%s\n", x)
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// return the base name of the file containing the name of the current index
|
||||
func indexNameBase(cachedir Abspath) string {
|
||||
// crc64 is a way to convert path names into 16 hex digits.
|
||||
h := crc64.Checksum([]byte(cachedir), crc64.MakeTable(crc64.ECMA))
|
||||
fname := fmt.Sprintf("index-name-%d-%016x", CurrentVersion, h)
|
||||
return fname
|
||||
}
|
178
vendor/golang.org/x/tools/internal/modindex/lookup.go
generated
vendored
Normal file
178
vendor/golang.org/x/tools/internal/modindex/lookup.go
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package modindex
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Candidate struct {
|
||||
PkgName string
|
||||
Name string
|
||||
Dir string
|
||||
ImportPath string
|
||||
Type LexType
|
||||
Deprecated bool
|
||||
// information for Funcs
|
||||
Results int16 // how many results
|
||||
Sig []Field // arg names and types
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Arg, Type string
|
||||
}
|
||||
|
||||
type LexType int8
|
||||
|
||||
const (
|
||||
Const LexType = iota
|
||||
Var
|
||||
Type
|
||||
Func
|
||||
)
|
||||
|
||||
// LookupAll only returns those Candidates whose import path
|
||||
// finds all the nms.
|
||||
func (ix *Index) LookupAll(pkg string, names ...string) map[string][]Candidate {
|
||||
// this can be made faster when benchmarks show that it needs to be
|
||||
names = uniquify(names)
|
||||
byImpPath := make(map[string][]Candidate)
|
||||
for _, nm := range names {
|
||||
cands := ix.Lookup(pkg, nm, false)
|
||||
for _, c := range cands {
|
||||
byImpPath[c.ImportPath] = append(byImpPath[c.ImportPath], c)
|
||||
}
|
||||
}
|
||||
for k, v := range byImpPath {
|
||||
if len(v) != len(names) {
|
||||
delete(byImpPath, k)
|
||||
}
|
||||
}
|
||||
return byImpPath
|
||||
}
|
||||
|
||||
// remove duplicates
|
||||
func uniquify(in []string) []string {
|
||||
if len(in) == 0 {
|
||||
return in
|
||||
}
|
||||
in = slices.Clone(in)
|
||||
slices.Sort(in)
|
||||
return slices.Compact(in)
|
||||
}
|
||||
|
||||
// Lookup finds all the symbols in the index with the given PkgName and name.
|
||||
// If prefix is true, it finds all of these with name as a prefix.
|
||||
func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
|
||||
loc, ok := slices.BinarySearchFunc(ix.Entries, pkg, func(e Entry, pkg string) int {
|
||||
return strings.Compare(e.PkgName, pkg)
|
||||
})
|
||||
if !ok {
|
||||
return nil // didn't find the package
|
||||
}
|
||||
var ans []Candidate
|
||||
// loc is the first entry for this package name, but there may be severeal
|
||||
for i := loc; i < len(ix.Entries); i++ {
|
||||
e := ix.Entries[i]
|
||||
if e.PkgName != pkg {
|
||||
break // end of sorted package names
|
||||
}
|
||||
nloc, ok := slices.BinarySearchFunc(e.Names, name, func(s string, name string) int {
|
||||
if strings.HasPrefix(s, name) {
|
||||
return 0
|
||||
}
|
||||
if s < name {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
})
|
||||
if !ok {
|
||||
continue // didn't find the name, nor any symbols with name as a prefix
|
||||
}
|
||||
for j := nloc; j < len(e.Names); j++ {
|
||||
nstr := e.Names[j]
|
||||
// benchmarks show this makes a difference when there are a lot of Possibilities
|
||||
flds := fastSplit(nstr)
|
||||
if !(flds[0] == name || prefix && strings.HasPrefix(flds[0], name)) {
|
||||
// past range of matching Names
|
||||
break
|
||||
}
|
||||
if len(flds) < 2 {
|
||||
continue // should never happen
|
||||
}
|
||||
px := Candidate{
|
||||
PkgName: pkg,
|
||||
Name: flds[0],
|
||||
Dir: string(e.Dir),
|
||||
ImportPath: e.ImportPath,
|
||||
Type: asLexType(flds[1][0]),
|
||||
Deprecated: len(flds[1]) > 1 && flds[1][1] == 'D',
|
||||
}
|
||||
if px.Type == Func {
|
||||
n, err := strconv.Atoi(flds[2])
|
||||
if err != nil {
|
||||
continue // should never happen
|
||||
}
|
||||
px.Results = int16(n)
|
||||
if len(flds) >= 4 {
|
||||
sig := strings.Split(flds[3], " ")
|
||||
for i := 0; i < len(sig); i++ {
|
||||
// $ cannot otherwise occur. removing the spaces
|
||||
// almost works, but for chan struct{}, e.g.
|
||||
sig[i] = strings.Replace(sig[i], "$", " ", -1)
|
||||
}
|
||||
px.Sig = toFields(sig)
|
||||
}
|
||||
}
|
||||
ans = append(ans, px)
|
||||
}
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
||||
func toFields(sig []string) []Field {
|
||||
ans := make([]Field, len(sig)/2)
|
||||
for i := 0; i < len(ans); i++ {
|
||||
ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
||||
// benchmarks show this is measurably better than strings.Split
|
||||
// split into first 4 fields separated by single space
|
||||
func fastSplit(x string) []string {
|
||||
ans := make([]string, 0, 4)
|
||||
nxt := 0
|
||||
start := 0
|
||||
for i := 0; i < len(x); i++ {
|
||||
if x[i] != ' ' {
|
||||
continue
|
||||
}
|
||||
ans = append(ans, x[start:i])
|
||||
nxt++
|
||||
start = i + 1
|
||||
if nxt >= 3 {
|
||||
break
|
||||
}
|
||||
}
|
||||
ans = append(ans, x[start:])
|
||||
return ans
|
||||
}
|
||||
|
||||
func asLexType(c byte) LexType {
|
||||
switch c {
|
||||
case 'C':
|
||||
return Const
|
||||
case 'V':
|
||||
return Var
|
||||
case 'T':
|
||||
return Type
|
||||
case 'F':
|
||||
return Func
|
||||
}
|
||||
return -1
|
||||
}
|
164
vendor/golang.org/x/tools/internal/modindex/modindex.go
generated
vendored
Normal file
164
vendor/golang.org/x/tools/internal/modindex/modindex.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package modindex contains code for building and searching an index to
|
||||
// the Go module cache. The directory containing the index, returned by
|
||||
// IndexDir(), contains a file index-name-<ver> that contains the name
|
||||
// of the current index. We believe writing that short file is atomic.
|
||||
// ReadIndex reads that file to get the file name of the index.
|
||||
// WriteIndex writes an index with a unique name and then
|
||||
// writes that name into a new version of index-name-<ver>.
|
||||
// (<ver> stands for the CurrentVersion of the index format.)
|
||||
package modindex
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// Create always creates a new index for the go module cache that is in cachedir.
|
||||
func Create(cachedir string) error {
|
||||
_, err := indexModCache(cachedir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the index for the go module cache that is in cachedir,
|
||||
// If there is no existing index it will build one.
|
||||
// If there are changed directories since the last index, it will
|
||||
// write a new one and return true. Otherwise it returns false.
|
||||
func Update(cachedir string) (bool, error) {
|
||||
return indexModCache(cachedir, false)
|
||||
}
|
||||
|
||||
// indexModCache writes an index current as of when it is called.
|
||||
// If clear is true the index is constructed from all of GOMODCACHE
|
||||
// otherwise the index is constructed from the last previous index
|
||||
// and the updates to the cache. It returns true if it wrote an index,
|
||||
// false otherwise.
|
||||
func indexModCache(cachedir string, clear bool) (bool, error) {
|
||||
cachedir, err := filepath.Abs(cachedir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
cd := Abspath(cachedir)
|
||||
future := time.Now().Add(24 * time.Hour) // safely in the future
|
||||
ok, err := modindexTimed(future, cd, clear)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// modindexTimed writes an index current as of onlyBefore.
|
||||
// If clear is true the index is constructed from all of GOMODCACHE
|
||||
// otherwise the index is constructed from the last previous index
|
||||
// and all the updates to the cache before onlyBefore.
|
||||
// It returns true if it wrote a new index, false if it wrote nothing.
|
||||
func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
|
||||
var curIndex *Index
|
||||
if !clear {
|
||||
var err error
|
||||
curIndex, err = ReadIndex(string(cachedir))
|
||||
if clear && err != nil {
|
||||
return false, err
|
||||
}
|
||||
// TODO(pjw): check that most of those directories still exist
|
||||
}
|
||||
cfg := &work{
|
||||
onlyBefore: onlyBefore,
|
||||
oldIndex: curIndex,
|
||||
cacheDir: cachedir,
|
||||
}
|
||||
if curIndex != nil {
|
||||
cfg.onlyAfter = curIndex.Changed
|
||||
}
|
||||
if err := cfg.buildIndex(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
|
||||
// no changes from existing curIndex, don't write a new index
|
||||
return false, nil
|
||||
}
|
||||
if err := cfg.writeIndex(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type work struct {
|
||||
onlyBefore time.Time // do not use directories later than this
|
||||
onlyAfter time.Time // only interested in directories after this
|
||||
// directories from before onlyAfter come from oldIndex
|
||||
oldIndex *Index
|
||||
newIndex *Index
|
||||
cacheDir Abspath
|
||||
}
|
||||
|
||||
func (w *work) buildIndex() error {
|
||||
// The effective date of the new index should be at least
|
||||
// slightly earlier than when the directories are scanned
|
||||
// so set it now.
|
||||
w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
|
||||
dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
newdirs, err := byImportPath(dirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// for each import path it might occur only in newdirs,
|
||||
// only in w.oldIndex, or in both.
|
||||
// If it occurs in both, use the semantically later one
|
||||
if w.oldIndex != nil {
|
||||
for _, e := range w.oldIndex.Entries {
|
||||
found, ok := newdirs[e.ImportPath]
|
||||
if !ok {
|
||||
w.newIndex.Entries = append(w.newIndex.Entries, e)
|
||||
continue // use this one, there is no new one
|
||||
}
|
||||
if semver.Compare(found[0].version, e.Version) > 0 {
|
||||
// use the new one
|
||||
} else {
|
||||
// use the old one, forget the new one
|
||||
w.newIndex.Entries = append(w.newIndex.Entries, e)
|
||||
delete(newdirs, e.ImportPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
// get symbol information for all the new diredtories
|
||||
getSymbols(w.cacheDir, newdirs)
|
||||
// assemble the new index entries
|
||||
for k, v := range newdirs {
|
||||
d := v[0]
|
||||
pkg, names := processSyms(d.syms)
|
||||
if pkg == "" {
|
||||
continue // PJW: does this ever happen?
|
||||
}
|
||||
entry := Entry{
|
||||
PkgName: pkg,
|
||||
Dir: d.path,
|
||||
ImportPath: k,
|
||||
Version: d.version,
|
||||
Names: names,
|
||||
}
|
||||
w.newIndex.Entries = append(w.newIndex.Entries, entry)
|
||||
}
|
||||
// sort the entries in the new index
|
||||
slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
|
||||
if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
|
||||
return n
|
||||
}
|
||||
return strings.Compare(l.ImportPath, r.ImportPath)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *work) writeIndex() error {
|
||||
return writeIndex(w.cacheDir, w.newIndex)
|
||||
}
|
218
vendor/golang.org/x/tools/internal/modindex/symbols.go
generated
vendored
Normal file
218
vendor/golang.org/x/tools/internal/modindex/symbols.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package modindex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// The name of a symbol contains information about the symbol:
|
||||
// <name> T for types, TD if the type is deprecated
|
||||
// <name> C for consts, CD if the const is deprecated
|
||||
// <name> V for vars, VD if the var is deprecated
|
||||
// and for funcs: <name> F <num of return values> (<arg-name> <arg-type>)*
|
||||
// any spaces in <arg-type> are replaced by $s so that the fields
|
||||
// of the name are space separated. F is replaced by FD if the func
|
||||
// is deprecated.
|
||||
type symbol struct {
|
||||
pkg string // name of the symbols's package
|
||||
name string // declared name
|
||||
kind string // T, C, V, or F, follwed by D if deprecated
|
||||
sig string // signature information, for F
|
||||
}
|
||||
|
||||
// find the symbols for the best directories
|
||||
func getSymbols(cd Abspath, dirs map[string][]*directory) {
|
||||
var g errgroup.Group
|
||||
g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
|
||||
for _, vv := range dirs {
|
||||
// throttling some day?
|
||||
d := vv[0]
|
||||
g.Go(func() error {
|
||||
thedir := filepath.Join(string(cd), string(d.path))
|
||||
mode := parser.SkipObjectResolution | parser.ParseComments
|
||||
|
||||
fi, err := os.ReadDir(thedir)
|
||||
if err != nil {
|
||||
return nil // log this someday?
|
||||
}
|
||||
for _, fx := range fi {
|
||||
if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
|
||||
continue
|
||||
}
|
||||
fname := filepath.Join(thedir, fx.Name())
|
||||
tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
|
||||
if err != nil {
|
||||
continue // ignore errors, someday log them?
|
||||
}
|
||||
d.syms = append(d.syms, getFileExports(tr)...)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
g.Wait()
|
||||
}
|
||||
|
||||
func getFileExports(f *ast.File) []symbol {
|
||||
pkg := f.Name.Name
|
||||
if pkg == "main" {
|
||||
return nil
|
||||
}
|
||||
var ans []symbol
|
||||
// should we look for //go:build ignore?
|
||||
for _, decl := range f.Decls {
|
||||
switch decl := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
if decl.Recv != nil {
|
||||
// ignore methods, as we are completing package selections
|
||||
continue
|
||||
}
|
||||
name := decl.Name.Name
|
||||
dtype := decl.Type
|
||||
// not looking at dtype.TypeParams. That is, treating
|
||||
// generic functions just like non-generic ones.
|
||||
sig := dtype.Params
|
||||
kind := "F"
|
||||
if isDeprecated(decl.Doc) {
|
||||
kind += "D"
|
||||
}
|
||||
result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
|
||||
for _, x := range sig.List {
|
||||
// This code creates a string representing the type.
|
||||
// TODO(pjw): it may be fragile:
|
||||
// 1. x.Type could be nil, perhaps in ill-formed code
|
||||
// 2. ExprString might someday change incompatibly to
|
||||
// include struct tags, which can be arbitrary strings
|
||||
if x.Type == nil {
|
||||
// Can this happen without a parse error? (Files with parse
|
||||
// errors are ignored in getSymbols)
|
||||
continue // maybe report this someday
|
||||
}
|
||||
tp := types.ExprString(x.Type)
|
||||
if len(tp) == 0 {
|
||||
// Can this happen?
|
||||
continue // maybe report this someday
|
||||
}
|
||||
// This is only safe if ExprString never returns anything with a $
|
||||
// The only place a $ can occur seems to be in a struct tag, which
|
||||
// can be an arbitrary string literal, and ExprString does not presently
|
||||
// print struct tags. So for this to happen the type of a formal parameter
|
||||
// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
|
||||
// would have to show the struct tag. Even testing for this case seems
|
||||
// a waste of effort, but let's remember the possibility
|
||||
if strings.Contains(tp, "$") {
|
||||
continue
|
||||
}
|
||||
tp = strings.Replace(tp, " ", "$", -1)
|
||||
if len(x.Names) == 0 {
|
||||
result = append(result, "_")
|
||||
result = append(result, tp)
|
||||
} else {
|
||||
for _, y := range x.Names {
|
||||
result = append(result, y.Name)
|
||||
result = append(result, tp)
|
||||
}
|
||||
}
|
||||
}
|
||||
sigs := strings.Join(result, " ")
|
||||
if s := newsym(pkg, name, kind, sigs); s != nil {
|
||||
ans = append(ans, *s)
|
||||
}
|
||||
case *ast.GenDecl:
|
||||
depr := isDeprecated(decl.Doc)
|
||||
switch decl.Tok {
|
||||
case token.CONST, token.VAR:
|
||||
tp := "V"
|
||||
if decl.Tok == token.CONST {
|
||||
tp = "C"
|
||||
}
|
||||
if depr {
|
||||
tp += "D"
|
||||
}
|
||||
for _, sp := range decl.Specs {
|
||||
for _, x := range sp.(*ast.ValueSpec).Names {
|
||||
if s := newsym(pkg, x.Name, tp, ""); s != nil {
|
||||
ans = append(ans, *s)
|
||||
}
|
||||
}
|
||||
}
|
||||
case token.TYPE:
|
||||
tp := "T"
|
||||
if depr {
|
||||
tp += "D"
|
||||
}
|
||||
for _, sp := range decl.Specs {
|
||||
if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
|
||||
ans = append(ans, *s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
||||
func newsym(pkg, name, kind, sig string) *symbol {
|
||||
if len(name) == 0 || !ast.IsExported(name) {
|
||||
return nil
|
||||
}
|
||||
sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
|
||||
return &sym
|
||||
}
|
||||
|
||||
func isDeprecated(doc *ast.CommentGroup) bool {
|
||||
if doc == nil {
|
||||
return false
|
||||
}
|
||||
// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
|
||||
// This code fails for /* Deprecated: */, but it's the code from
|
||||
// gopls/internal/analysis/deprecated
|
||||
lines := strings.Split(doc.Text(), "\n\n")
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "Deprecated:") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// return the package name and the value for the symbols.
|
||||
// if there are multiple packages, choose one arbitrarily
|
||||
// the returned slice is sorted lexicographically
|
||||
func processSyms(syms []symbol) (string, []string) {
|
||||
if len(syms) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
slices.SortFunc(syms, func(l, r symbol) int {
|
||||
return strings.Compare(l.name, r.name)
|
||||
})
|
||||
pkg := syms[0].pkg
|
||||
var names []string
|
||||
for _, s := range syms {
|
||||
var nx string
|
||||
if s.pkg == pkg {
|
||||
if s.sig != "" {
|
||||
nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
|
||||
} else {
|
||||
nx = fmt.Sprintf("%s %s", s.name, s.kind)
|
||||
}
|
||||
names = append(names, nx)
|
||||
} else {
|
||||
continue // PJW: do we want to keep track of these?
|
||||
}
|
||||
}
|
||||
return pkg, names
|
||||
}
|
25
vendor/golang.org/x/tools/internal/modindex/types.go
generated
vendored
Normal file
25
vendor/golang.org/x/tools/internal/modindex/types.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package modindex
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// some special types to avoid confusions
|
||||
|
||||
// distinguish various types of directory names. It's easy to get confused.
|
||||
type Abspath string // absolute paths
|
||||
type Relpath string // paths with GOMODCACHE prefix removed
|
||||
|
||||
func toRelpath(cachedir Abspath, s string) Relpath {
|
||||
if strings.HasPrefix(s, string(cachedir)) {
|
||||
if s == string(cachedir) {
|
||||
return Relpath("")
|
||||
}
|
||||
return Relpath(s[len(cachedir)+1:])
|
||||
}
|
||||
return Relpath(s)
|
||||
}
|
6
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
6
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
|
@ -5,8 +5,7 @@
|
|||
// Package packagesinternal exposes internal-only fields from go/packages.
|
||||
package packagesinternal
|
||||
|
||||
var GetForTest = func(p interface{}) string { return "" }
|
||||
var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
|
||||
var GetDepsErrors = func(p any) []*PackageError { return nil }
|
||||
|
||||
type PackageError struct {
|
||||
ImportStack []string // shortest path from package named on command line to this one
|
||||
|
@ -16,7 +15,6 @@ type PackageError struct {
|
|||
|
||||
var TypecheckCgo int
|
||||
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
|
||||
var ForTest int // must be set as a LoadMode to call GetForTest
|
||||
|
||||
var SetModFlag = func(config interface{}, value string) {}
|
||||
var SetModFlag = func(config any, value string) {}
|
||||
var SetModFile = func(config interface{}, value string) {}
|
||||
|
|
38
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
38
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
|
@ -21,10 +21,7 @@ import (
|
|||
// export data.
|
||||
type PkgDecoder struct {
|
||||
// version is the file format version.
|
||||
version uint32
|
||||
|
||||
// aliases determines whether types.Aliases should be created
|
||||
aliases bool
|
||||
version Version
|
||||
|
||||
// sync indicates whether the file uses sync markers.
|
||||
sync bool
|
||||
|
@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
|
|||
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
|
||||
// IR export data from input. pkgPath is the package path for the
|
||||
// compilation unit that produced the export data.
|
||||
//
|
||||
// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
|
||||
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
|
||||
pr := PkgDecoder{
|
||||
pkgPath: pkgPath,
|
||||
//aliases: aliases.Enabled(),
|
||||
}
|
||||
|
||||
// TODO(mdempsky): Implement direct indexing of input string to
|
||||
|
@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
|
|||
|
||||
r := strings.NewReader(input)
|
||||
|
||||
assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
|
||||
var ver uint32
|
||||
assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
|
||||
pr.version = Version(ver)
|
||||
|
||||
switch pr.version {
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported version: %v", pr.version))
|
||||
case 0:
|
||||
// no flags
|
||||
case 1:
|
||||
if pr.version >= numVersions {
|
||||
panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
|
||||
}
|
||||
|
||||
if pr.version.Has(Flags) {
|
||||
var flags uint32
|
||||
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
|
||||
pr.sync = flags&flagSyncMarkers != 0
|
||||
|
@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
|
|||
assert(err == nil)
|
||||
|
||||
pr.elemData = input[pos:]
|
||||
assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
|
||||
|
||||
const fingerprintSize = 8
|
||||
assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
|
||||
|
||||
return pr
|
||||
}
|
||||
|
@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
|
|||
absIdx += int(pr.elemEndsEnds[k-1])
|
||||
}
|
||||
if absIdx >= int(pr.elemEndsEnds[k]) {
|
||||
errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
|
||||
panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
|
||||
}
|
||||
return absIdx
|
||||
}
|
||||
|
@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
|
|||
Idx: idx,
|
||||
}
|
||||
|
||||
// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
|
||||
r.Data = *strings.NewReader(pr.DataIdx(k, idx))
|
||||
|
||||
r.Data.Reset(pr.DataIdx(k, idx))
|
||||
r.Sync(SyncRelocs)
|
||||
r.Relocs = make([]RelocEnt, r.Len())
|
||||
for i := range r.Relocs {
|
||||
|
@ -248,7 +243,7 @@ type Decoder struct {
|
|||
|
||||
func (r *Decoder) checkErr(err error) {
|
||||
if err != nil {
|
||||
errorf("unexpected decoding error: %w", err)
|
||||
panicf("unexpected decoding error: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
|
|||
|
||||
return path, name, tag
|
||||
}
|
||||
|
||||
// Version reports the version of the bitstream.
|
||||
func (w *Decoder) Version() Version { return w.common.version }
|
||||
|
|
43
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
43
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
|
@ -12,18 +12,15 @@ import (
|
|||
"io"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// currentVersion is the current version number.
|
||||
//
|
||||
// - v0: initial prototype
|
||||
//
|
||||
// - v1: adds the flags uint32 word
|
||||
const currentVersion uint32 = 1
|
||||
|
||||
// A PkgEncoder provides methods for encoding a package's Unified IR
|
||||
// export data.
|
||||
type PkgEncoder struct {
|
||||
// version of the bitstream.
|
||||
version Version
|
||||
|
||||
// elems holds the bitstream for previously encoded elements.
|
||||
elems [numRelocs][]string
|
||||
|
||||
|
@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
|
|||
// export data files, but can help diagnosing desync errors in
|
||||
// higher-level Unified IR reader/writer code. If syncFrames is
|
||||
// negative, then sync markers are omitted entirely.
|
||||
func NewPkgEncoder(syncFrames int) PkgEncoder {
|
||||
func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
|
||||
return PkgEncoder{
|
||||
version: version,
|
||||
stringsIdx: make(map[string]Index),
|
||||
syncFrames: syncFrames,
|
||||
}
|
||||
|
@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
|
|||
assert(binary.Write(out, binary.LittleEndian, x) == nil)
|
||||
}
|
||||
|
||||
writeUint32(currentVersion)
|
||||
writeUint32(uint32(pw.version))
|
||||
|
||||
var flags uint32
|
||||
if pw.SyncMarkers() {
|
||||
flags |= flagSyncMarkers
|
||||
if pw.version.Has(Flags) {
|
||||
var flags uint32
|
||||
if pw.SyncMarkers() {
|
||||
flags |= flagSyncMarkers
|
||||
}
|
||||
writeUint32(flags)
|
||||
}
|
||||
writeUint32(flags)
|
||||
|
||||
// Write elemEndsEnds.
|
||||
var sum uint32
|
||||
|
@ -159,7 +159,7 @@ type Encoder struct {
|
|||
|
||||
// Flush finalizes the element's bitstream and returns its Index.
|
||||
func (w *Encoder) Flush() Index {
|
||||
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
|
||||
var sb strings.Builder
|
||||
|
||||
// Backup the data so we write the relocations at the front.
|
||||
var tmp bytes.Buffer
|
||||
|
@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
|
|||
|
||||
func (w *Encoder) checkErr(err error) {
|
||||
if err != nil {
|
||||
errorf("unexpected encoding error: %v", err)
|
||||
panicf("unexpected encoding error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
|
|||
// section (if not already present), and then writing a relocation
|
||||
// into the element bitstream.
|
||||
func (w *Encoder) String(s string) {
|
||||
w.StringRef(w.p.StringIdx(s))
|
||||
}
|
||||
|
||||
// StringRef writes a reference to the given index, which must be a
|
||||
// previously encoded string value.
|
||||
func (w *Encoder) StringRef(idx Index) {
|
||||
w.Sync(SyncString)
|
||||
w.Reloc(RelocString, w.p.StringIdx(s))
|
||||
w.Reloc(RelocString, idx)
|
||||
}
|
||||
|
||||
// Strings encodes and writes a variable-length slice of strings into
|
||||
|
@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
|
|||
func (w *Encoder) scalar(val constant.Value) {
|
||||
switch v := constant.Val(val).(type) {
|
||||
default:
|
||||
errorf("unhandled %v (%v)", val, val.Kind())
|
||||
panicf("unhandled %v (%v)", val, val.Kind())
|
||||
case bool:
|
||||
w.Code(ValBool)
|
||||
w.Bool(v)
|
||||
|
@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
|
|||
b := v.Append(nil, 'p', -1)
|
||||
w.String(string(b)) // TODO: More efficient encoding.
|
||||
}
|
||||
|
||||
// Version reports the version of the bitstream.
|
||||
func (w *Encoder) Version() Version { return w.p.version }
|
||||
|
|
21
vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
generated
vendored
21
vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
// TODO(mdempsky): Remove after #44505 is resolved
|
||||
|
||||
package pkgbits
|
||||
|
||||
import "runtime"
|
||||
|
||||
func walkFrames(pcs []uintptr, visit frameVisitor) {
|
||||
for _, pc := range pcs {
|
||||
fn := runtime.FuncForPC(pc)
|
||||
file, line := fn.FileLine(pc)
|
||||
|
||||
visit(file, line, fn.Name(), pc-fn.Entry())
|
||||
}
|
||||
}
|
28
vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
generated
vendored
28
vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package pkgbits
|
||||
|
||||
import "runtime"
|
||||
|
||||
// walkFrames calls visit for each call frame represented by pcs.
|
||||
//
|
||||
// pcs should be a slice of PCs, as returned by runtime.Callers.
|
||||
func walkFrames(pcs []uintptr, visit frameVisitor) {
|
||||
if len(pcs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
frames := runtime.CallersFrames(pcs)
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/golang.org/x/tools/internal/pkgbits/support.go
generated
vendored
2
vendor/golang.org/x/tools/internal/pkgbits/support.go
generated
vendored
|
@ -12,6 +12,6 @@ func assert(b bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
func panicf(format string, args ...any) {
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
|
23
vendor/golang.org/x/tools/internal/pkgbits/sync.go
generated
vendored
23
vendor/golang.org/x/tools/internal/pkgbits/sync.go
generated
vendored
|
@ -6,6 +6,7 @@ package pkgbits
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
|
|||
|
||||
type frameVisitor func(file string, line int, name string, offset uintptr)
|
||||
|
||||
// walkFrames calls visit for each call frame represented by pcs.
|
||||
//
|
||||
// pcs should be a slice of PCs, as returned by runtime.Callers.
|
||||
func walkFrames(pcs []uintptr, visit frameVisitor) {
|
||||
if len(pcs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
frames := runtime.CallersFrames(pcs)
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SyncMarker is an enum type that represents markers that may be
|
||||
// written to export data to ensure the reader and writer stay
|
||||
// synchronized.
|
||||
|
@ -110,4 +129,8 @@ const (
|
|||
SyncStmtsEnd
|
||||
SyncLabel
|
||||
SyncOptLabel
|
||||
|
||||
SyncMultiExpr
|
||||
SyncRType
|
||||
SyncConvRTTI
|
||||
)
|
||||
|
|
7
vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
generated
vendored
7
vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
generated
vendored
|
@ -74,11 +74,14 @@ func _() {
|
|||
_ = x[SyncStmtsEnd-64]
|
||||
_ = x[SyncLabel-65]
|
||||
_ = x[SyncOptLabel-66]
|
||||
_ = x[SyncMultiExpr-67]
|
||||
_ = x[SyncRType-68]
|
||||
_ = x[SyncConvRTTI-69]
|
||||
}
|
||||
|
||||
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
|
||||
const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
|
||||
|
||||
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
|
||||
var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
|
||||
|
||||
func (i SyncMarker) String() string {
|
||||
i -= 1
|
||||
|
|
85
vendor/golang.org/x/tools/internal/pkgbits/version.go
generated
vendored
Normal file
85
vendor/golang.org/x/tools/internal/pkgbits/version.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkgbits
|
||||
|
||||
// Version indicates a version of a unified IR bitstream.
|
||||
// Each Version indicates the addition, removal, or change of
|
||||
// new data in the bitstream.
|
||||
//
|
||||
// These are serialized to disk and the interpretation remains fixed.
|
||||
type Version uint32
|
||||
|
||||
const (
|
||||
// V0: initial prototype.
|
||||
//
|
||||
// All data that is not assigned a Field is in version V0
|
||||
// and has not been deprecated.
|
||||
V0 Version = iota
|
||||
|
||||
// V1: adds the Flags uint32 word
|
||||
V1
|
||||
|
||||
// V2: removes unused legacy fields and supports type parameters for aliases.
|
||||
// - remove the legacy "has init" bool from the public root
|
||||
// - remove obj's "derived func instance" bool
|
||||
// - add a TypeParamNames field to ObjAlias
|
||||
// - remove derived info "needed" bool
|
||||
V2
|
||||
|
||||
numVersions = iota
|
||||
)
|
||||
|
||||
// Field denotes a unit of data in the serialized unified IR bitstream.
|
||||
// It is conceptually a like field in a structure.
|
||||
//
|
||||
// We only really need Fields when the data may or may not be present
|
||||
// in a stream based on the Version of the bitstream.
|
||||
//
|
||||
// Unlike much of pkgbits, Fields are not serialized and
|
||||
// can change values as needed.
|
||||
type Field int
|
||||
|
||||
const (
|
||||
// Flags in a uint32 in the header of a bitstream
|
||||
// that is used to indicate whether optional features are enabled.
|
||||
Flags Field = iota
|
||||
|
||||
// Deprecated: HasInit was a bool indicating whether a package
|
||||
// has any init functions.
|
||||
HasInit
|
||||
|
||||
// Deprecated: DerivedFuncInstance was a bool indicating
|
||||
// whether an object was a function instance.
|
||||
DerivedFuncInstance
|
||||
|
||||
// ObjAlias has a list of TypeParamNames.
|
||||
AliasTypeParamNames
|
||||
|
||||
// Deprecated: DerivedInfoNeeded was a bool indicating
|
||||
// whether a type was a derived type.
|
||||
DerivedInfoNeeded
|
||||
|
||||
numFields = iota
|
||||
)
|
||||
|
||||
// introduced is the version a field was added.
|
||||
var introduced = [numFields]Version{
|
||||
Flags: V1,
|
||||
AliasTypeParamNames: V2,
|
||||
}
|
||||
|
||||
// removed is the version a field was removed in or 0 for fields
|
||||
// that have not yet been deprecated.
|
||||
// (So removed[f]-1 is the last version it is included in.)
|
||||
var removed = [numFields]Version{
|
||||
HasInit: V2,
|
||||
DerivedFuncInstance: V2,
|
||||
DerivedInfoNeeded: V2,
|
||||
}
|
||||
|
||||
// Has reports whether field f is present in a bitstream at version v.
|
||||
func (v Version) Has(f Field) bool {
|
||||
return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
|
||||
}
|
330
vendor/golang.org/x/tools/internal/stdlib/manifest.go
generated
vendored
330
vendor/golang.org/x/tools/internal/stdlib/manifest.go
generated
vendored
File diff suppressed because it is too large
Load diff
137
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
137
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
|
@ -1,137 +0,0 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// package tokeninternal provides access to some internal features of the token
|
||||
// package.
|
||||
package tokeninternal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"sort"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GetLines returns the table of line-start offsets from a token.File.
|
||||
func GetLines(file *token.File) []int {
|
||||
// token.File has a Lines method on Go 1.21 and later.
|
||||
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
|
||||
return file.Lines()
|
||||
}
|
||||
|
||||
// This declaration must match that of token.File.
|
||||
// This creates a risk of dependency skew.
|
||||
// For now we check that the size of the two
|
||||
// declarations is the same, on the (fragile) assumption
|
||||
// that future changes would add fields.
|
||||
type tokenFile119 struct {
|
||||
_ string
|
||||
_ int
|
||||
_ int
|
||||
mu sync.Mutex // we're not complete monsters
|
||||
lines []int
|
||||
_ []struct{}
|
||||
}
|
||||
|
||||
if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
|
||||
panic("unexpected token.File size")
|
||||
}
|
||||
var ptr *tokenFile119
|
||||
type uP = unsafe.Pointer
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
}
|
||||
|
||||
// AddExistingFiles adds the specified files to the FileSet if they
|
||||
// are not already present. It panics if any pair of files in the
|
||||
// resulting FileSet would overlap.
|
||||
func AddExistingFiles(fset *token.FileSet, files []*token.File) {
|
||||
// Punch through the FileSet encapsulation.
|
||||
type tokenFileSet struct {
|
||||
// This type remained essentially consistent from go1.16 to go1.21.
|
||||
mutex sync.RWMutex
|
||||
base int
|
||||
files []*token.File
|
||||
_ *token.File // changed to atomic.Pointer[token.File] in go1.19
|
||||
}
|
||||
|
||||
// If the size of token.FileSet changes, this will fail to compile.
|
||||
const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
|
||||
var _ [-delta * delta]int
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
var ptr *tokenFileSet
|
||||
*(*uP)(uP(&ptr)) = uP(fset)
|
||||
ptr.mutex.Lock()
|
||||
defer ptr.mutex.Unlock()
|
||||
|
||||
// Merge and sort.
|
||||
newFiles := append(ptr.files, files...)
|
||||
sort.Slice(newFiles, func(i, j int) bool {
|
||||
return newFiles[i].Base() < newFiles[j].Base()
|
||||
})
|
||||
|
||||
// Reject overlapping files.
|
||||
// Discard adjacent identical files.
|
||||
out := newFiles[:0]
|
||||
for i, file := range newFiles {
|
||||
if i > 0 {
|
||||
prev := newFiles[i-1]
|
||||
if file == prev {
|
||||
continue
|
||||
}
|
||||
if prev.Base()+prev.Size()+1 > file.Base() {
|
||||
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
|
||||
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
|
||||
file.Name(), file.Base(), file.Base()+file.Size()))
|
||||
}
|
||||
}
|
||||
out = append(out, file)
|
||||
}
|
||||
newFiles = out
|
||||
|
||||
ptr.files = newFiles
|
||||
|
||||
// Advance FileSet.Base().
|
||||
if len(newFiles) > 0 {
|
||||
last := newFiles[len(newFiles)-1]
|
||||
newBase := last.Base() + last.Size() + 1
|
||||
if ptr.base < newBase {
|
||||
ptr.base = newBase
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FileSetFor returns a new FileSet containing a sequence of new Files with
|
||||
// the same base, size, and line as the input files, for use in APIs that
|
||||
// require a FileSet.
|
||||
//
|
||||
// Precondition: the input files must be non-overlapping, and sorted in order
|
||||
// of their Base.
|
||||
func FileSetFor(files ...*token.File) *token.FileSet {
|
||||
fset := token.NewFileSet()
|
||||
for _, f := range files {
|
||||
f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
|
||||
lines := GetLines(f)
|
||||
f2.SetLines(lines)
|
||||
}
|
||||
return fset
|
||||
}
|
||||
|
||||
// CloneFileSet creates a new FileSet holding all files in fset. It does not
|
||||
// create copies of the token.Files in fset: they are added to the resulting
|
||||
// FileSet unmodified.
|
||||
func CloneFileSet(fset *token.FileSet) *token.FileSet {
|
||||
var files []*token.File
|
||||
fset.Iterate(func(f *token.File) bool {
|
||||
files = append(files, f)
|
||||
return true
|
||||
})
|
||||
newFileSet := token.NewFileSet()
|
||||
AddExistingFiles(newFileSet, files)
|
||||
return newFileSet
|
||||
}
|
68
vendor/golang.org/x/tools/internal/typeparams/common.go
generated
vendored
Normal file
68
vendor/golang.org/x/tools/internal/typeparams/common.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeparams contains common utilities for writing tools that
|
||||
// interact with generic Go code, as introduced with Go 1.18. It
|
||||
// supplements the standard library APIs. Notably, the StructuralTerms
|
||||
// API computes a minimal representation of the structural
|
||||
// restrictions on a type parameter.
|
||||
//
|
||||
// An external version of these APIs is available in the
|
||||
// golang.org/x/exp/typeparams module.
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// UnpackIndexExpr extracts data from AST nodes that represent index
|
||||
// expressions.
|
||||
//
|
||||
// For an ast.IndexExpr, the resulting indices slice will contain exactly one
|
||||
// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
|
||||
// number of index expressions.
|
||||
//
|
||||
// For nodes that don't represent index expressions, the first return value of
|
||||
// UnpackIndexExpr will be nil.
|
||||
func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
|
||||
switch e := n.(type) {
|
||||
case *ast.IndexExpr:
|
||||
return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
|
||||
case *ast.IndexListExpr:
|
||||
return e.X, e.Lbrack, e.Indices, e.Rbrack
|
||||
}
|
||||
return nil, token.NoPos, nil, token.NoPos
|
||||
}
|
||||
|
||||
// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
|
||||
// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
|
||||
// will panic.
|
||||
func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
|
||||
switch len(indices) {
|
||||
case 0:
|
||||
panic("empty indices")
|
||||
case 1:
|
||||
return &ast.IndexExpr{
|
||||
X: x,
|
||||
Lbrack: lbrack,
|
||||
Index: indices[0],
|
||||
Rbrack: rbrack,
|
||||
}
|
||||
default:
|
||||
return &ast.IndexListExpr{
|
||||
X: x,
|
||||
Lbrack: lbrack,
|
||||
Indices: indices,
|
||||
Rbrack: rbrack,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsTypeParam reports whether t is a type parameter (or an alias of one).
|
||||
func IsTypeParam(t types.Type) bool {
|
||||
_, ok := types.Unalias(t).(*types.TypeParam)
|
||||
return ok
|
||||
}
|
155
vendor/golang.org/x/tools/internal/typeparams/coretype.go
generated
vendored
Normal file
155
vendor/golang.org/x/tools/internal/typeparams/coretype.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// CoreType returns the core type of T or nil if T does not have a core type.
|
||||
//
|
||||
// See https://go.dev/ref/spec#Core_types for the definition of a core type.
|
||||
func CoreType(T types.Type) types.Type {
|
||||
U := T.Underlying()
|
||||
if _, ok := U.(*types.Interface); !ok {
|
||||
return U // for non-interface types,
|
||||
}
|
||||
|
||||
terms, err := NormalTerms(U)
|
||||
if len(terms) == 0 || err != nil {
|
||||
// len(terms) -> empty type set of interface.
|
||||
// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
|
||||
return nil // no core type.
|
||||
}
|
||||
|
||||
U = terms[0].Type().Underlying()
|
||||
var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
|
||||
for identical = 1; identical < len(terms); identical++ {
|
||||
if !types.Identical(U, terms[identical].Type().Underlying()) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if identical == len(terms) {
|
||||
// https://go.dev/ref/spec#Core_types
|
||||
// "There is a single type U which is the underlying type of all types in the type set of T"
|
||||
return U
|
||||
}
|
||||
ch, ok := U.(*types.Chan)
|
||||
if !ok {
|
||||
return nil // no core type as identical < len(terms) and U is not a channel.
|
||||
}
|
||||
// https://go.dev/ref/spec#Core_types
|
||||
// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
|
||||
// <-chan E depending on the direction of the directional channels present."
|
||||
for chans := identical; chans < len(terms); chans++ {
|
||||
curr, ok := terms[chans].Type().Underlying().(*types.Chan)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if !types.Identical(ch.Elem(), curr.Elem()) {
|
||||
return nil // channel elements are not identical.
|
||||
}
|
||||
if ch.Dir() == types.SendRecv {
|
||||
// ch is bidirectional. We can safely always use curr's direction.
|
||||
ch = curr
|
||||
} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
|
||||
// ch and curr are not bidirectional and not the same direction.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// NormalTerms returns a slice of terms representing the normalized structural
|
||||
// type restrictions of a type, if any.
|
||||
//
|
||||
// For all types other than *types.TypeParam, *types.Interface, and
|
||||
// *types.Union, this is just a single term with Tilde() == false and
|
||||
// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
|
||||
// below.
|
||||
//
|
||||
// Structural type restrictions of a type parameter are created via
|
||||
// non-interface types embedded in its constraint interface (directly, or via a
|
||||
// chain of interface embeddings). For example, in the declaration type
|
||||
// T[P interface{~int; m()}] int the structural restriction of the type
|
||||
// parameter P is ~int.
|
||||
//
|
||||
// With interface embedding and unions, the specification of structural type
|
||||
// restrictions may be arbitrarily complex. For example, consider the
|
||||
// following:
|
||||
//
|
||||
// type A interface{ ~string|~[]byte }
|
||||
//
|
||||
// type B interface{ int|string }
|
||||
//
|
||||
// type C interface { ~string|~int }
|
||||
//
|
||||
// type T[P interface{ A|B; C }] int
|
||||
//
|
||||
// In this example, the structural type restriction of P is ~string|int: A|B
|
||||
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
|
||||
// which when intersected with C (~string|~int) yields ~string|int.
|
||||
//
|
||||
// NormalTerms computes these expansions and reductions, producing a
|
||||
// "normalized" form of the embeddings. A structural restriction is normalized
|
||||
// if it is a single union containing no interface terms, and is minimal in the
|
||||
// sense that removing any term changes the set of types satisfying the
|
||||
// constraint. It is left as a proof for the reader that, modulo sorting, there
|
||||
// is exactly one such normalized form.
|
||||
//
|
||||
// Because the minimal representation always takes this form, NormalTerms
|
||||
// returns a slice of tilde terms corresponding to the terms of the union in
|
||||
// the normalized structural restriction. An error is returned if the type is
|
||||
// invalid, exceeds complexity bounds, or has an empty type set. In the latter
|
||||
// case, NormalTerms returns ErrEmptyTypeSet.
|
||||
//
|
||||
// NormalTerms makes no guarantees about the order of terms, except that it
|
||||
// is deterministic.
|
||||
func NormalTerms(T types.Type) ([]*types.Term, error) {
|
||||
// typeSetOf(T) == typeSetOf(Unalias(T))
|
||||
typ := types.Unalias(T)
|
||||
if named, ok := typ.(*types.Named); ok {
|
||||
typ = named.Underlying()
|
||||
}
|
||||
switch typ := typ.(type) {
|
||||
case *types.TypeParam:
|
||||
return StructuralTerms(typ)
|
||||
case *types.Union:
|
||||
return UnionTermSet(typ)
|
||||
case *types.Interface:
|
||||
return InterfaceTermSet(typ)
|
||||
default:
|
||||
return []*types.Term{types.NewTerm(false, T)}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Deref returns the type of the variable pointed to by t,
|
||||
// if t's core type is a pointer; otherwise it returns t.
|
||||
//
|
||||
// Do not assume that Deref(T)==T implies T is not a pointer:
|
||||
// consider "type T *T", for example.
|
||||
//
|
||||
// TODO(adonovan): ideally this would live in typesinternal, but that
|
||||
// creates an import cycle. Move there when we melt this package down.
|
||||
func Deref(t types.Type) types.Type {
|
||||
if ptr, ok := CoreType(t).(*types.Pointer); ok {
|
||||
return ptr.Elem()
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// MustDeref returns the type of the variable pointed to by t.
|
||||
// It panics if t's core type is not a pointer.
|
||||
//
|
||||
// TODO(adonovan): ideally this would live in typesinternal, but that
|
||||
// creates an import cycle. Move there when we melt this package down.
|
||||
func MustDeref(t types.Type) types.Type {
|
||||
if ptr, ok := CoreType(t).(*types.Pointer); ok {
|
||||
return ptr.Elem()
|
||||
}
|
||||
panic(fmt.Sprintf("%v is not a pointer", t))
|
||||
}
|
131
vendor/golang.org/x/tools/internal/typeparams/free.go
generated
vendored
Normal file
131
vendor/golang.org/x/tools/internal/typeparams/free.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/internal/aliases"
|
||||
)
|
||||
|
||||
// Free is a memoization of the set of free type parameters within a
|
||||
// type. It makes a sequence of calls to [Free.Has] for overlapping
|
||||
// types more efficient. The zero value is ready for use.
|
||||
//
|
||||
// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
|
||||
type Free struct {
|
||||
seen map[types.Type]bool
|
||||
}
|
||||
|
||||
// Has reports whether the specified type has a free type parameter.
|
||||
func (w *Free) Has(typ types.Type) (res bool) {
|
||||
// detect cycles
|
||||
if x, ok := w.seen[typ]; ok {
|
||||
return x
|
||||
}
|
||||
if w.seen == nil {
|
||||
w.seen = make(map[types.Type]bool)
|
||||
}
|
||||
w.seen[typ] = false
|
||||
defer func() {
|
||||
w.seen[typ] = res
|
||||
}()
|
||||
|
||||
switch t := typ.(type) {
|
||||
case nil, *types.Basic: // TODO(gri) should nil be handled here?
|
||||
break
|
||||
|
||||
case *types.Alias:
|
||||
if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
|
||||
return true // This is an uninstantiated Alias.
|
||||
}
|
||||
// The expansion of an alias can have free type parameters,
|
||||
// whether or not the alias itself has type parameters:
|
||||
//
|
||||
// func _[K comparable]() {
|
||||
// type Set = map[K]bool // free(Set) = {K}
|
||||
// type MapTo[V] = map[K]V // free(Map[foo]) = {V}
|
||||
// }
|
||||
//
|
||||
// So, we must Unalias.
|
||||
return w.Has(types.Unalias(t))
|
||||
|
||||
case *types.Array:
|
||||
return w.Has(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
return w.Has(t.Elem())
|
||||
|
||||
case *types.Struct:
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
if w.Has(t.Field(i).Type()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
case *types.Pointer:
|
||||
return w.Has(t.Elem())
|
||||
|
||||
case *types.Tuple:
|
||||
n := t.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
if w.Has(t.At(i).Type()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
case *types.Signature:
|
||||
// t.tparams may not be nil if we are looking at a signature
|
||||
// of a generic function type (or an interface method) that is
|
||||
// part of the type we're testing. We don't care about these type
|
||||
// parameters.
|
||||
// Similarly, the receiver of a method may declare (rather than
|
||||
// use) type parameters, we don't care about those either.
|
||||
// Thus, we only need to look at the input and result parameters.
|
||||
return w.Has(t.Params()) || w.Has(t.Results())
|
||||
|
||||
case *types.Interface:
|
||||
for i, n := 0, t.NumMethods(); i < n; i++ {
|
||||
if w.Has(t.Method(i).Type()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
terms, err := InterfaceTermSet(t)
|
||||
if err != nil {
|
||||
return false // ill typed
|
||||
}
|
||||
for _, term := range terms {
|
||||
if w.Has(term.Type()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
case *types.Map:
|
||||
return w.Has(t.Key()) || w.Has(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
return w.Has(t.Elem())
|
||||
|
||||
case *types.Named:
|
||||
args := t.TypeArgs()
|
||||
if params := t.TypeParams(); params.Len() > args.Len() {
|
||||
return true // this is an uninstantiated named type.
|
||||
}
|
||||
for i, n := 0, args.Len(); i < n; i++ {
|
||||
if w.Has(args.At(i)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return w.Has(t.Underlying()) // recurse for types local to parameterized functions
|
||||
|
||||
case *types.TypeParam:
|
||||
return true
|
||||
|
||||
default:
|
||||
panic(t) // unreachable
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
218
vendor/golang.org/x/tools/internal/typeparams/normalize.go
generated
vendored
Normal file
218
vendor/golang.org/x/tools/internal/typeparams/normalize.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:generate go run copytermlist.go
|
||||
|
||||
const debug = false
|
||||
|
||||
var ErrEmptyTypeSet = errors.New("empty type set")
|
||||
|
||||
// StructuralTerms returns a slice of terms representing the normalized
|
||||
// structural type restrictions of a type parameter, if any.
|
||||
//
|
||||
// Structural type restrictions of a type parameter are created via
|
||||
// non-interface types embedded in its constraint interface (directly, or via a
|
||||
// chain of interface embeddings). For example, in the declaration
|
||||
//
|
||||
// type T[P interface{~int; m()}] int
|
||||
//
|
||||
// the structural restriction of the type parameter P is ~int.
|
||||
//
|
||||
// With interface embedding and unions, the specification of structural type
|
||||
// restrictions may be arbitrarily complex. For example, consider the
|
||||
// following:
|
||||
//
|
||||
// type A interface{ ~string|~[]byte }
|
||||
//
|
||||
// type B interface{ int|string }
|
||||
//
|
||||
// type C interface { ~string|~int }
|
||||
//
|
||||
// type T[P interface{ A|B; C }] int
|
||||
//
|
||||
// In this example, the structural type restriction of P is ~string|int: A|B
|
||||
// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
|
||||
// which when intersected with C (~string|~int) yields ~string|int.
|
||||
//
|
||||
// StructuralTerms computes these expansions and reductions, producing a
|
||||
// "normalized" form of the embeddings. A structural restriction is normalized
|
||||
// if it is a single union containing no interface terms, and is minimal in the
|
||||
// sense that removing any term changes the set of types satisfying the
|
||||
// constraint. It is left as a proof for the reader that, modulo sorting, there
|
||||
// is exactly one such normalized form.
|
||||
//
|
||||
// Because the minimal representation always takes this form, StructuralTerms
|
||||
// returns a slice of tilde terms corresponding to the terms of the union in
|
||||
// the normalized structural restriction. An error is returned if the
|
||||
// constraint interface is invalid, exceeds complexity bounds, or has an empty
|
||||
// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
|
||||
//
|
||||
// StructuralTerms makes no guarantees about the order of terms, except that it
|
||||
// is deterministic.
|
||||
func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
|
||||
constraint := tparam.Constraint()
|
||||
if constraint == nil {
|
||||
return nil, fmt.Errorf("%s has nil constraint", tparam)
|
||||
}
|
||||
iface, _ := constraint.Underlying().(*types.Interface)
|
||||
if iface == nil {
|
||||
return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
|
||||
}
|
||||
return InterfaceTermSet(iface)
|
||||
}
|
||||
|
||||
// InterfaceTermSet computes the normalized terms for a constraint interface,
|
||||
// returning an error if the term set cannot be computed or is empty. In the
|
||||
// latter case, the error will be ErrEmptyTypeSet.
|
||||
//
|
||||
// See the documentation of StructuralTerms for more information on
|
||||
// normalization.
|
||||
func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
|
||||
return computeTermSet(iface)
|
||||
}
|
||||
|
||||
// UnionTermSet computes the normalized terms for a union, returning an error
|
||||
// if the term set cannot be computed or is empty. In the latter case, the
|
||||
// error will be ErrEmptyTypeSet.
|
||||
//
|
||||
// See the documentation of StructuralTerms for more information on
|
||||
// normalization.
|
||||
func UnionTermSet(union *types.Union) ([]*types.Term, error) {
|
||||
return computeTermSet(union)
|
||||
}
|
||||
|
||||
func computeTermSet(typ types.Type) ([]*types.Term, error) {
|
||||
tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tset.terms.isEmpty() {
|
||||
return nil, ErrEmptyTypeSet
|
||||
}
|
||||
if tset.terms.isAll() {
|
||||
return nil, nil
|
||||
}
|
||||
var terms []*types.Term
|
||||
for _, term := range tset.terms {
|
||||
terms = append(terms, types.NewTerm(term.tilde, term.typ))
|
||||
}
|
||||
return terms, nil
|
||||
}
|
||||
|
||||
// A termSet holds the normalized set of terms for a given type.
|
||||
//
|
||||
// The name termSet is intentionally distinct from 'type set': a type set is
|
||||
// all types that implement a type (and includes method restrictions), whereas
|
||||
// a term set just represents the structural restrictions on a type.
|
||||
type termSet struct {
|
||||
complete bool
|
||||
terms termlist
|
||||
}
|
||||
|
||||
func indentf(depth int, format string, args ...interface{}) {
|
||||
fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
|
||||
}
|
||||
|
||||
func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
|
||||
if t == nil {
|
||||
panic("nil type")
|
||||
}
|
||||
|
||||
if debug {
|
||||
indentf(depth, "%s", t.String())
|
||||
defer func() {
|
||||
if err != nil {
|
||||
indentf(depth, "=> %s", err)
|
||||
} else {
|
||||
indentf(depth, "=> %s", res.terms.String())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
const maxTermCount = 100
|
||||
if tset, ok := seen[t]; ok {
|
||||
if !tset.complete {
|
||||
return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
|
||||
}
|
||||
return tset, nil
|
||||
}
|
||||
|
||||
// Mark the current type as seen to avoid infinite recursion.
|
||||
tset := new(termSet)
|
||||
defer func() {
|
||||
tset.complete = true
|
||||
}()
|
||||
seen[t] = tset
|
||||
|
||||
switch u := t.Underlying().(type) {
|
||||
case *types.Interface:
|
||||
// The term set of an interface is the intersection of the term sets of its
|
||||
// embedded types.
|
||||
tset.terms = allTermlist
|
||||
for i := 0; i < u.NumEmbeddeds(); i++ {
|
||||
embedded := u.EmbeddedType(i)
|
||||
if _, ok := embedded.Underlying().(*types.TypeParam); ok {
|
||||
return nil, fmt.Errorf("invalid embedded type %T", embedded)
|
||||
}
|
||||
tset2, err := computeTermSetInternal(embedded, seen, depth+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tset.terms = tset.terms.intersect(tset2.terms)
|
||||
}
|
||||
case *types.Union:
|
||||
// The term set of a union is the union of term sets of its terms.
|
||||
tset.terms = nil
|
||||
for i := 0; i < u.Len(); i++ {
|
||||
t := u.Term(i)
|
||||
var terms termlist
|
||||
switch t.Type().Underlying().(type) {
|
||||
case *types.Interface:
|
||||
tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
terms = tset2.terms
|
||||
case *types.TypeParam, *types.Union:
|
||||
// A stand-alone type parameter or union is not permitted as union
|
||||
// term.
|
||||
return nil, fmt.Errorf("invalid union term %T", t)
|
||||
default:
|
||||
if t.Type() == types.Typ[types.Invalid] {
|
||||
continue
|
||||
}
|
||||
terms = termlist{{t.Tilde(), t.Type()}}
|
||||
}
|
||||
tset.terms = tset.terms.union(terms)
|
||||
if len(tset.terms) > maxTermCount {
|
||||
return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
|
||||
}
|
||||
}
|
||||
case *types.TypeParam:
|
||||
panic("unreachable")
|
||||
default:
|
||||
// For all other types, the term set is just a single non-tilde term
|
||||
// holding the type itself.
|
||||
if u != types.Typ[types.Invalid] {
|
||||
tset.terms = termlist{{false, t}}
|
||||
}
|
||||
}
|
||||
return tset, nil
|
||||
}
|
||||
|
||||
// under is a facade for the go/types internal function of the same name. It is
|
||||
// used by typeterm.go.
|
||||
func under(t types.Type) types.Type {
|
||||
return t.Underlying()
|
||||
}
|
163
vendor/golang.org/x/tools/internal/typeparams/termlist.go
generated
vendored
Normal file
163
vendor/golang.org/x/tools/internal/typeparams/termlist.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by copytermlist.go DO NOT EDIT.
|
||||
|
||||
package typeparams
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// A termlist represents the type set represented by the union
|
||||
// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
|
||||
// A termlist is in normal form if all terms are disjoint.
|
||||
// termlist operations don't require the operands to be in
|
||||
// normal form.
|
||||
type termlist []*term
|
||||
|
||||
// allTermlist represents the set of all types.
|
||||
// It is in normal form.
|
||||
var allTermlist = termlist{new(term)}
|
||||
|
||||
// String prints the termlist exactly (without normalization).
|
||||
func (xl termlist) String() string {
|
||||
if len(xl) == 0 {
|
||||
return "∅"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for i, x := range xl {
|
||||
if i > 0 {
|
||||
buf.WriteString(" | ")
|
||||
}
|
||||
buf.WriteString(x.String())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isEmpty reports whether the termlist xl represents the empty set of types.
|
||||
func (xl termlist) isEmpty() bool {
|
||||
// If there's a non-nil term, the entire list is not empty.
|
||||
// If the termlist is in normal form, this requires at most
|
||||
// one iteration.
|
||||
for _, x := range xl {
|
||||
if x != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isAll reports whether the termlist xl represents the set of all types.
|
||||
func (xl termlist) isAll() bool {
|
||||
// If there's a 𝓤 term, the entire list is 𝓤.
|
||||
// If the termlist is in normal form, this requires at most
|
||||
// one iteration.
|
||||
for _, x := range xl {
|
||||
if x != nil && x.typ == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// norm returns the normal form of xl.
|
||||
func (xl termlist) norm() termlist {
|
||||
// Quadratic algorithm, but good enough for now.
|
||||
// TODO(gri) fix asymptotic performance
|
||||
used := make([]bool, len(xl))
|
||||
var rl termlist
|
||||
for i, xi := range xl {
|
||||
if xi == nil || used[i] {
|
||||
continue
|
||||
}
|
||||
for j := i + 1; j < len(xl); j++ {
|
||||
xj := xl[j]
|
||||
if xj == nil || used[j] {
|
||||
continue
|
||||
}
|
||||
if u1, u2 := xi.union(xj); u2 == nil {
|
||||
// If we encounter a 𝓤 term, the entire list is 𝓤.
|
||||
// Exit early.
|
||||
// (Note that this is not just an optimization;
|
||||
// if we continue, we may end up with a 𝓤 term
|
||||
// and other terms and the result would not be
|
||||
// in normal form.)
|
||||
if u1.typ == nil {
|
||||
return allTermlist
|
||||
}
|
||||
xi = u1
|
||||
used[j] = true // xj is now unioned into xi - ignore it in future iterations
|
||||
}
|
||||
}
|
||||
rl = append(rl, xi)
|
||||
}
|
||||
return rl
|
||||
}
|
||||
|
||||
// union returns the union xl ∪ yl.
|
||||
func (xl termlist) union(yl termlist) termlist {
|
||||
return append(xl, yl...).norm()
|
||||
}
|
||||
|
||||
// intersect returns the intersection xl ∩ yl.
|
||||
func (xl termlist) intersect(yl termlist) termlist {
|
||||
if xl.isEmpty() || yl.isEmpty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Quadratic algorithm, but good enough for now.
|
||||
// TODO(gri) fix asymptotic performance
|
||||
var rl termlist
|
||||
for _, x := range xl {
|
||||
for _, y := range yl {
|
||||
if r := x.intersect(y); r != nil {
|
||||
rl = append(rl, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
return rl.norm()
|
||||
}
|
||||
|
||||
// equal reports whether xl and yl represent the same type set.
|
||||
func (xl termlist) equal(yl termlist) bool {
|
||||
// TODO(gri) this should be more efficient
|
||||
return xl.subsetOf(yl) && yl.subsetOf(xl)
|
||||
}
|
||||
|
||||
// includes reports whether t ∈ xl.
|
||||
func (xl termlist) includes(t types.Type) bool {
|
||||
for _, x := range xl {
|
||||
if x.includes(t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// supersetOf reports whether y ⊆ xl.
|
||||
func (xl termlist) supersetOf(y *term) bool {
|
||||
for _, x := range xl {
|
||||
if y.subsetOf(x) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// subsetOf reports whether xl ⊆ yl.
|
||||
func (xl termlist) subsetOf(yl termlist) bool {
|
||||
if yl.isEmpty() {
|
||||
return xl.isEmpty()
|
||||
}
|
||||
|
||||
// each term x of xl must be a subset of yl
|
||||
for _, x := range xl {
|
||||
if !yl.supersetOf(x) {
|
||||
return false // x is not a subset yl
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
169
vendor/golang.org/x/tools/internal/typeparams/typeterm.go
generated
vendored
Normal file
169
vendor/golang.org/x/tools/internal/typeparams/typeterm.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
|||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by copytermlist.go DO NOT EDIT.
|
||||
|
||||
package typeparams
|
||||
|
||||
import "go/types"
|
||||
|
||||
// A term describes elementary type sets:
|
||||
//
|
||||
// ∅: (*term)(nil) == ∅ // set of no types (empty set)
|
||||
// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
|
||||
// T: &term{false, T} == {T} // set of type T
|
||||
// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
|
||||
type term struct {
|
||||
tilde bool // valid if typ != nil
|
||||
typ types.Type
|
||||
}
|
||||
|
||||
func (x *term) String() string {
|
||||
switch {
|
||||
case x == nil:
|
||||
return "∅"
|
||||
case x.typ == nil:
|
||||
return "𝓤"
|
||||
case x.tilde:
|
||||
return "~" + x.typ.String()
|
||||
default:
|
||||
return x.typ.String()
|
||||
}
|
||||
}
|
||||
|
||||
// equal reports whether x and y represent the same type set.
|
||||
func (x *term) equal(y *term) bool {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil || y == nil:
|
||||
return x == y
|
||||
case x.typ == nil || y.typ == nil:
|
||||
return x.typ == y.typ
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
|
||||
}
|
||||
|
||||
// union returns the union x ∪ y: zero, one, or two non-nil terms.
|
||||
func (x *term) union(y *term) (_, _ *term) {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil && y == nil:
|
||||
return nil, nil // ∅ ∪ ∅ == ∅
|
||||
case x == nil:
|
||||
return y, nil // ∅ ∪ y == y
|
||||
case y == nil:
|
||||
return x, nil // x ∪ ∅ == x
|
||||
case x.typ == nil:
|
||||
return x, nil // 𝓤 ∪ y == 𝓤
|
||||
case y.typ == nil:
|
||||
return y, nil // x ∪ 𝓤 == 𝓤
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
if x.disjoint(y) {
|
||||
return x, y // x ∪ y == (x, y) if x ∩ y == ∅
|
||||
}
|
||||
// x.typ == y.typ
|
||||
|
||||
// ~t ∪ ~t == ~t
|
||||
// ~t ∪ T == ~t
|
||||
// T ∪ ~t == ~t
|
||||
// T ∪ T == T
|
||||
if x.tilde || !y.tilde {
|
||||
return x, nil
|
||||
}
|
||||
return y, nil
|
||||
}
|
||||
|
||||
// intersect returns the intersection x ∩ y.
|
||||
func (x *term) intersect(y *term) *term {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil || y == nil:
|
||||
return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
|
||||
case x.typ == nil:
|
||||
return y // 𝓤 ∩ y == y
|
||||
case y.typ == nil:
|
||||
return x // x ∩ 𝓤 == x
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
if x.disjoint(y) {
|
||||
return nil // x ∩ y == ∅ if x ∩ y == ∅
|
||||
}
|
||||
// x.typ == y.typ
|
||||
|
||||
// ~t ∩ ~t == ~t
|
||||
// ~t ∩ T == T
|
||||
// T ∩ ~t == T
|
||||
// T ∩ T == T
|
||||
if !x.tilde || y.tilde {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// includes reports whether t ∈ x.
|
||||
func (x *term) includes(t types.Type) bool {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil:
|
||||
return false // t ∈ ∅ == false
|
||||
case x.typ == nil:
|
||||
return true // t ∈ 𝓤 == true
|
||||
}
|
||||
// ∅ ⊂ x ⊂ 𝓤
|
||||
|
||||
u := t
|
||||
if x.tilde {
|
||||
u = under(u)
|
||||
}
|
||||
return types.Identical(x.typ, u)
|
||||
}
|
||||
|
||||
// subsetOf reports whether x ⊆ y.
|
||||
func (x *term) subsetOf(y *term) bool {
|
||||
// easy cases
|
||||
switch {
|
||||
case x == nil:
|
||||
return true // ∅ ⊆ y == true
|
||||
case y == nil:
|
||||
return false // x ⊆ ∅ == false since x != ∅
|
||||
case y.typ == nil:
|
||||
return true // x ⊆ 𝓤 == true
|
||||
case x.typ == nil:
|
||||
return false // 𝓤 ⊆ y == false since y != 𝓤
|
||||
}
|
||||
// ∅ ⊂ x, y ⊂ 𝓤
|
||||
|
||||
if x.disjoint(y) {
|
||||
return false // x ⊆ y == false if x ∩ y == ∅
|
||||
}
|
||||
// x.typ == y.typ
|
||||
|
||||
// ~t ⊆ ~t == true
|
||||
// ~t ⊆ T == false
|
||||
// T ⊆ ~t == true
|
||||
// T ⊆ T == true
|
||||
return !x.tilde || y.tilde
|
||||
}
|
||||
|
||||
// disjoint reports whether x ∩ y == ∅.
|
||||
// x.typ and y.typ must not be nil.
|
||||
func (x *term) disjoint(y *term) bool {
|
||||
if debug && (x.typ == nil || y.typ == nil) {
|
||||
panic("invalid argument(s)")
|
||||
}
|
||||
ux := x.typ
|
||||
if y.tilde {
|
||||
ux = under(ux)
|
||||
}
|
||||
uy := y.typ
|
||||
if x.tilde {
|
||||
uy = under(uy)
|
||||
}
|
||||
return !types.Identical(ux, uy)
|
||||
}
|
133
vendor/golang.org/x/tools/internal/typesinternal/element.go
generated
vendored
Normal file
133
vendor/golang.org/x/tools/internal/typesinternal/element.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typesinternal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// ForEachElement calls f for type T and each type reachable from its
|
||||
// type through reflection. It does this by recursively stripping off
|
||||
// type constructors; in addition, for each named type N, the type *N
|
||||
// is added to the result as it may have additional methods.
|
||||
//
|
||||
// The caller must provide an initially empty set used to de-duplicate
|
||||
// identical types, potentially across multiple calls to ForEachElement.
|
||||
// (Its final value holds all the elements seen, matching the arguments
|
||||
// passed to f.)
|
||||
//
|
||||
// TODO(adonovan): share/harmonize with go/callgraph/rta.
|
||||
func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
|
||||
var visit func(T types.Type, skip bool)
|
||||
visit = func(T types.Type, skip bool) {
|
||||
if !skip {
|
||||
if seen, _ := rtypes.Set(T, true).(bool); seen {
|
||||
return // de-dup
|
||||
}
|
||||
|
||||
f(T) // notify caller of new element type
|
||||
}
|
||||
|
||||
// Recursion over signatures of each method.
|
||||
tmset := msets.MethodSet(T)
|
||||
for i := 0; i < tmset.Len(); i++ {
|
||||
sig := tmset.At(i).Type().(*types.Signature)
|
||||
// It is tempting to call visit(sig, false)
|
||||
// but, as noted in golang.org/cl/65450043,
|
||||
// the Signature.Recv field is ignored by
|
||||
// types.Identical and typeutil.Map, which
|
||||
// is confusing at best.
|
||||
//
|
||||
// More importantly, the true signature rtype
|
||||
// reachable from a method using reflection
|
||||
// has no receiver but an extra ordinary parameter.
|
||||
// For the Read method of io.Reader we want:
|
||||
// func(Reader, []byte) (int, error)
|
||||
// but here sig is:
|
||||
// func([]byte) (int, error)
|
||||
// with .Recv = Reader (though it is hard to
|
||||
// notice because it doesn't affect Signature.String
|
||||
// or types.Identical).
|
||||
//
|
||||
// TODO(adonovan): construct and visit the correct
|
||||
// non-method signature with an extra parameter
|
||||
// (though since unnamed func types have no methods
|
||||
// there is essentially no actual demand for this).
|
||||
//
|
||||
// TODO(adonovan): document whether or not it is
|
||||
// safe to skip non-exported methods (as RTA does).
|
||||
visit(sig.Params(), true) // skip the Tuple
|
||||
visit(sig.Results(), true) // skip the Tuple
|
||||
}
|
||||
|
||||
switch T := T.(type) {
|
||||
case *types.Alias:
|
||||
visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
|
||||
|
||||
case *types.Basic:
|
||||
// nop
|
||||
|
||||
case *types.Interface:
|
||||
// nop---handled by recursion over method set.
|
||||
|
||||
case *types.Pointer:
|
||||
visit(T.Elem(), false)
|
||||
|
||||
case *types.Slice:
|
||||
visit(T.Elem(), false)
|
||||
|
||||
case *types.Chan:
|
||||
visit(T.Elem(), false)
|
||||
|
||||
case *types.Map:
|
||||
visit(T.Key(), false)
|
||||
visit(T.Elem(), false)
|
||||
|
||||
case *types.Signature:
|
||||
if T.Recv() != nil {
|
||||
panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
|
||||
}
|
||||
visit(T.Params(), true) // skip the Tuple
|
||||
visit(T.Results(), true) // skip the Tuple
|
||||
|
||||
case *types.Named:
|
||||
// A pointer-to-named type can be derived from a named
|
||||
// type via reflection. It may have methods too.
|
||||
visit(types.NewPointer(T), false)
|
||||
|
||||
// Consider 'type T struct{S}' where S has methods.
|
||||
// Reflection provides no way to get from T to struct{S},
|
||||
// only to S, so the method set of struct{S} is unwanted,
|
||||
// so set 'skip' flag during recursion.
|
||||
visit(T.Underlying(), true) // skip the unnamed type
|
||||
|
||||
case *types.Array:
|
||||
visit(T.Elem(), false)
|
||||
|
||||
case *types.Struct:
|
||||
for i, n := 0, T.NumFields(); i < n; i++ {
|
||||
// TODO(adonovan): document whether or not
|
||||
// it is safe to skip non-exported fields.
|
||||
visit(T.Field(i).Type(), false)
|
||||
}
|
||||
|
||||
case *types.Tuple:
|
||||
for i, n := 0, T.Len(); i < n; i++ {
|
||||
visit(T.At(i).Type(), false)
|
||||
}
|
||||
|
||||
case *types.TypeParam, *types.Union:
|
||||
// forEachReachable must not be called on parameterized types.
|
||||
panic(T)
|
||||
|
||||
default:
|
||||
panic(T)
|
||||
}
|
||||
}
|
||||
visit(T, false)
|
||||
}
|
10
vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
generated
vendored
10
vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
generated
vendored
|
@ -838,7 +838,7 @@ const (
|
|||
// InvalidCap occurs when an argument to the cap built-in function is not of
|
||||
// supported type.
|
||||
//
|
||||
// See https://golang.org/ref/spec#Lengthand_capacity for information on
|
||||
// See https://golang.org/ref/spec#Length_and_capacity for information on
|
||||
// which underlying types are supported as arguments to cap and len.
|
||||
//
|
||||
// Example:
|
||||
|
@ -859,7 +859,7 @@ const (
|
|||
// InvalidCopy occurs when the arguments are not of slice type or do not
|
||||
// have compatible type.
|
||||
//
|
||||
// See https://golang.org/ref/spec#Appendingand_copying_slices for more
|
||||
// See https://golang.org/ref/spec#Appending_and_copying_slices for more
|
||||
// information on the type requirements for the copy built-in.
|
||||
//
|
||||
// Example:
|
||||
|
@ -897,7 +897,7 @@ const (
|
|||
// InvalidLen occurs when an argument to the len built-in function is not of
|
||||
// supported type.
|
||||
//
|
||||
// See https://golang.org/ref/spec#Lengthand_capacity for information on
|
||||
// See https://golang.org/ref/spec#Length_and_capacity for information on
|
||||
// which underlying types are supported as arguments to cap and len.
|
||||
//
|
||||
// Example:
|
||||
|
@ -914,7 +914,7 @@ const (
|
|||
|
||||
// InvalidMake occurs when make is called with an unsupported type argument.
|
||||
//
|
||||
// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
|
||||
// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
|
||||
// information on the types that may be created using make.
|
||||
//
|
||||
// Example:
|
||||
|
@ -966,7 +966,7 @@ const (
|
|||
// var _ = string(x)
|
||||
InvalidConversion
|
||||
|
||||
// InvalidUntypedConversion occurs when an there is no valid implicit
|
||||
// InvalidUntypedConversion occurs when there is no valid implicit
|
||||
// conversion from an untyped value satisfying the type constraints of the
|
||||
// context in which it is used.
|
||||
//
|
||||
|
|
46
vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
generated
vendored
Normal file
46
vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typesinternal
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// FileQualifier returns a [types.Qualifier] function that qualifies
|
||||
// imported symbols appropriately based on the import environment of a given
|
||||
// file.
|
||||
// If the same package is imported multiple times, the last appearance is
|
||||
// recorded.
|
||||
func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
|
||||
// Construct mapping of import paths to their defined names.
|
||||
// It is only necessary to look at renaming imports.
|
||||
imports := make(map[string]string)
|
||||
for _, imp := range f.Imports {
|
||||
if imp.Name != nil && imp.Name.Name != "_" {
|
||||
path, _ := strconv.Unquote(imp.Path.Value)
|
||||
imports[path] = imp.Name.Name
|
||||
}
|
||||
}
|
||||
|
||||
// Define qualifier to replace full package paths with names of the imports.
|
||||
return func(p *types.Package) string {
|
||||
if p == nil || p == pkg {
|
||||
return ""
|
||||
}
|
||||
|
||||
if name, ok := imports[p.Path()]; ok {
|
||||
if name == "." {
|
||||
return ""
|
||||
} else {
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// If there is no local renaming, fall back to the package name.
|
||||
return p.Name()
|
||||
}
|
||||
}
|
11
vendor/golang.org/x/tools/internal/typesinternal/recv.go
generated
vendored
11
vendor/golang.org/x/tools/internal/typesinternal/recv.go
generated
vendored
|
@ -6,20 +6,21 @@ package typesinternal
|
|||
|
||||
import (
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/internal/aliases"
|
||||
)
|
||||
|
||||
// ReceiverNamed returns the named type (if any) associated with the
|
||||
// type of recv, which may be of the form N or *N, or aliases thereof.
|
||||
// It also reports whether a Pointer was present.
|
||||
//
|
||||
// The named result may be nil if recv is from a method on an
|
||||
// anonymous interface or struct types or in ill-typed code.
|
||||
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
|
||||
t := recv.Type()
|
||||
if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
|
||||
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
|
||||
isPtr = true
|
||||
t = ptr.Elem()
|
||||
}
|
||||
named, _ = aliases.Unalias(t).(*types.Named)
|
||||
named, _ = types.Unalias(t).(*types.Named)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -36,7 +37,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
|
|||
// indirection from the type, regardless of named types (analogous to
|
||||
// a LOAD instruction).
|
||||
func Unpointer(t types.Type) types.Type {
|
||||
if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
|
||||
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
|
||||
return ptr.Elem()
|
||||
}
|
||||
return t
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue