From 6df2cb97ad25ecf52636037913bc71b782e127b0 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 00:47:14 +0100 Subject: [PATCH 01/48] Add builds for windows-arm --- .ci/ci-build.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.ci/ci-build.sh b/.ci/ci-build.sh index 3fac07a6..709f119a 100755 --- a/.ci/ci-build.sh +++ b/.ci/ci-build.sh @@ -22,6 +22,15 @@ for i in win64/LICENSE win64/*.toml win64/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\ ln ../windows/* win64/ zip -9 -r dnscrypt-proxy-win64-${PACKAGE_VERSION:-dev}.zip win64 +go clean +env GOOS=windows GOARCH=arm64 go build -mod vendor -ldflags="-s -w" +mkdir winarm +ln dnscrypt-proxy.exe winarm/ +cp ../LICENSE example-dnscrypt-proxy.toml localhost.pem example-*.txt winarm/ +for i in winarm/LICENSE winarm/*.toml winarm/*.txt; do ex -bsc '%!awk "{sub(/$/,\"\r\")}1"' -cx "$i"; done +ln ../windows/* winarm/ +zip -9 -r dnscrypt-proxy-winarm-${PACKAGE_VERSION:-dev}.zip winarm + go clean env GO386=softfloat GOOS=openbsd GOARCH=386 go build -mod vendor -ldflags="-s -w" mkdir openbsd-i386 From eccca90bbcdb7c7857e649bf16dbf2ab7c2125aa Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 00:48:46 +0100 Subject: [PATCH 02/48] Typo --- ChangeLog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index c876c2f6..354eabac 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,5 @@ # Version 2.1.6 - - Forwarding: in the list of servers for a zone, the `$BOOTSTRAP` keyword can be included as a shortcut to forward to the bootstrap servers. And the `$DHCP` keyword can be included to forward to the DNS resolvers provided by the local DHCP server. Based on work by YX Hao, thanks! DHCP forwarding should be considered experimental and my not work on all operating systems. A rule for a zone can mix and match multiple forwarder types, such as `10.0.0.1,10.0.0.254,$DHCP,192.168.1.1,$BOOTSTRAP`. Note that this is not implemented for captive portals yet. + - Forwarding: in the list of servers for a zone, the `$BOOTSTRAP` keyword can be included as a shortcut to forward to the bootstrap servers. And the `$DHCP` keyword can be included to forward to the DNS resolvers provided by the local DHCP server. Based on work by YX Hao, thanks! DHCP forwarding should be considered experimental and may not work on all operating systems. A rule for a zone can mix and match multiple forwarder types, such as `10.0.0.1,10.0.0.254,$DHCP,192.168.1.1,$BOOTSTRAP`. Note that this is not implemented for captive portals yet. - Lying resolvers are now skipped, instead of just printing an error. This doesn't apply to captive portal and forwarding entries, which are the only reasonable use case for lying resolvers. - Support for XSalsa20 in DNSCrypt has been removed. This was not documented, and was supserseded by XChaCha20 in 2016. - Source files are now fetched with compression. From 14af44d78a23612d5cbb49e01d7e6b9c7c3f15bf Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 01:03:32 +0100 Subject: [PATCH 03/48] Add Windows ARM --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f54b4ca2..c0986ab1 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ Up-to-date, pre-built binaries are available for: * OpenBSD/x86_64 * Windows * Windows 64 bit +* Windows ARM How to use these files, as well as how to verify their signatures, are documented in the [installation instructions](https://github.com/dnscrypt/dnscrypt-proxy/wiki/installation). From 062dc53971c198b47eb5beab172edad1f3013def Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 02:13:31 +0100 Subject: [PATCH 04/48] Revert "Remove support for xsalsapoly" Apparently, a bunch of popular resolvers such as adguard, cleanbrowsing and comodo still only support xsalsapoly o_O Add a lying resolver check for old DNSCrypt servers. --- dnscrypt-proxy/common.go | 1 + dnscrypt-proxy/config.go | 1 + dnscrypt-proxy/crypto.go | 28 +- dnscrypt-proxy/dnscrypt_certs.go | 8 +- dnscrypt-proxy/example-dnscrypt-proxy.toml | 6 +- dnscrypt-proxy/serversInfo.go | 34 + vendor/golang.org/x/crypto/nacl/box/box.go | 182 ++++ .../x/crypto/nacl/secretbox/secretbox.go | 173 ++++ .../x/crypto/salsa20/salsa/hsalsa20.go | 146 +++ .../x/crypto/salsa20/salsa/salsa208.go | 201 ++++ .../x/crypto/salsa20/salsa/salsa20_amd64.go | 23 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 880 ++++++++++++++++++ .../x/crypto/salsa20/salsa/salsa20_noasm.go | 14 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 233 +++++ vendor/modules.txt | 3 + 15 files changed, 1924 insertions(+), 9 deletions(-) create mode 100644 vendor/golang.org/x/crypto/nacl/box/box.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go diff --git a/dnscrypt-proxy/common.go b/dnscrypt-proxy/common.go index 8496b395..647ccfac 100644 --- a/dnscrypt-proxy/common.go +++ b/dnscrypt-proxy/common.go @@ -18,6 +18,7 @@ type CryptoConstruction uint16 const ( UndefinedConstruction CryptoConstruction = iota + XSalsa20Poly1305 XChacha20Poly1305 ) diff --git a/dnscrypt-proxy/config.go b/dnscrypt-proxy/config.go index 98314591..a7ceb5c5 100644 --- a/dnscrypt-proxy/config.go +++ b/dnscrypt-proxy/config.go @@ -154,6 +154,7 @@ func newConfig() Config { BlockedQueryResponse: "hinfo", BrokenImplementations: BrokenImplementationsConfig{ FragmentsBlocked: []string{ + "cisco", "cisco-ipv6", "cisco-familyshield", "cisco-familyshield-ipv6", "cleanbrowsing-adult", "cleanbrowsing-adult-ipv6", "cleanbrowsing-family", "cleanbrowsing-family-ipv6", "cleanbrowsing-security", "cleanbrowsing-security-ipv6", }, }, diff --git a/dnscrypt-proxy/crypto.go b/dnscrypt-proxy/crypto.go index fc9aaa71..ed08f3cc 100644 --- a/dnscrypt-proxy/crypto.go +++ b/dnscrypt-proxy/crypto.go @@ -9,6 +9,8 @@ import ( "github.com/jedisct1/dlog" "github.com/jedisct1/xsecretbox" "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/nacl/box" + "golang.org/x/crypto/nacl/secretbox" ) const ( @@ -55,9 +57,19 @@ func ComputeSharedKey( dlog.Criticalf("[%v] Weak XChaCha20 public key", providerName) } } else { - dlog.Criticalf("[%v] Unsupported encryption system", providerName) + box.Precompute(&sharedKey, serverPk, secretKey) + c := byte(0) + for i := 0; i < 32; i++ { + c |= sharedKey[i] + } + if c == 0 { + dlog.Criticalf("[%v] Weak XSalsa20 public key", providerName) + if _, err := crypto_rand.Read(sharedKey[:]); err != nil { + dlog.Fatal(err) + } + } } - return sharedKey + return } func (proxy *Proxy) Encrypt( @@ -112,7 +124,9 @@ func (proxy *Proxy) Encrypt( if serverInfo.CryptoConstruction == XChacha20Poly1305 { encrypted = xsecretbox.Seal(encrypted, nonce, padded, sharedKey[:]) } else { - err = errors.New("Unsupported encryption system") + var xsalsaNonce [24]byte + copy(xsalsaNonce[:], nonce) + encrypted = secretbox.Seal(encrypted, padded, &xsalsaNonce, sharedKey) } return } @@ -139,7 +153,13 @@ func (proxy *Proxy) Decrypt( if serverInfo.CryptoConstruction == XChacha20Poly1305 { packet, err = xsecretbox.Open(nil, serverNonce, encrypted[responseHeaderLen:], sharedKey[:]) } else { - err = errors.New("Unsupported encryption system") + var xsalsaServerNonce [24]byte + copy(xsalsaServerNonce[:], serverNonce) + var ok bool + packet, ok = secretbox.Open(nil, encrypted[responseHeaderLen:], &xsalsaServerNonce, sharedKey) + if !ok { + err = errors.New("Incorrect tag") + } } if err != nil { return encrypted, err diff --git a/dnscrypt-proxy/dnscrypt_certs.go b/dnscrypt-proxy/dnscrypt_certs.go index c6e07a3a..850b2f4b 100644 --- a/dnscrypt-proxy/dnscrypt_certs.go +++ b/dnscrypt-proxy/dnscrypt_certs.go @@ -95,12 +95,12 @@ func FetchCurrentDNSCryptCert( cryptoConstruction := CryptoConstruction(0) switch esVersion := binary.BigEndian.Uint16(binCert[4:6]); esVersion { case 0x0001: - dlog.Noticef("[%v] Deprecated, now unsupported encryption system", *serverName) - continue + cryptoConstruction = XSalsa20Poly1305 + dlog.Noticef("[%v] should upgrade to XChaCha20 for encryption", *serverName) case 0x0002: cryptoConstruction = XChacha20Poly1305 default: - dlog.Noticef("[%v] Unsupported encryption system", *serverName) + dlog.Debugf("[%v] uses an unsupported encryption system", *serverName) continue } signature := binCert[8:72] @@ -164,7 +164,7 @@ func FetchCurrentDNSCryptCert( dlog.Debugf("[%v] Upgrading the construction from %v to %v", *serverName, certInfo.CryptoConstruction, cryptoConstruction) } } - if cryptoConstruction != XChacha20Poly1305 { + if cryptoConstruction != XChacha20Poly1305 && cryptoConstruction != XSalsa20Poly1305 { dlog.Noticef("[%v] Cryptographic construction %v not supported", *serverName, cryptoConstruction) continue } diff --git a/dnscrypt-proxy/example-dnscrypt-proxy.toml b/dnscrypt-proxy/example-dnscrypt-proxy.toml index 96b3bec4..6ecd9152 100644 --- a/dnscrypt-proxy/example-dnscrypt-proxy.toml +++ b/dnscrypt-proxy/example-dnscrypt-proxy.toml @@ -774,6 +774,10 @@ format = 'tsv' [broken_implementations] +## Cisco servers currently cannot handle queries larger than 1472 bytes, and don't +## truncate responses larger than questions as expected by the DNSCrypt protocol. +## This prevents large responses from being received over UDP and over relays. +## ## Older versions of the `dnsdist` server software had a bug with queries larger ## than 1500 bytes. This is fixed since `dnsdist` version 1.5.0, but ## some server may still run an outdated version. @@ -781,7 +785,7 @@ format = 'tsv' ## The list below enables workarounds to make non-relayed usage more reliable ## until the servers are fixed. -fragments_blocked = ['cleanbrowsing-adult', 'cleanbrowsing-adult-ipv6', 'cleanbrowsing-family', 'cleanbrowsing-family-ipv6', 'cleanbrowsing-security', 'cleanbrowsing-security-ipv6'] +fragments_blocked = ['cisco', 'cisco-ipv6', 'cisco-familyshield', 'cisco-familyshield-ipv6', 'cisco-sandbox', 'cleanbrowsing-adult', 'cleanbrowsing-adult-ipv6', 'cleanbrowsing-family', 'cleanbrowsing-family-ipv6', 'cleanbrowsing-security', 'cleanbrowsing-security-ipv6'] diff --git a/dnscrypt-proxy/serversInfo.go b/dnscrypt-proxy/serversInfo.go index 52fa8015..3b63eb0b 100644 --- a/dnscrypt-proxy/serversInfo.go +++ b/dnscrypt-proxy/serversInfo.go @@ -608,6 +608,27 @@ func fetchDNSCryptServerInfo(proxy *Proxy, name string, stamp stamps.ServerStamp if err != nil { return ServerInfo{}, err } + + if certInfo.CryptoConstruction == XSalsa20Poly1305 { + query := plainNXTestPacket(0xcafe) + msg, _, _, err := DNSExchange( + proxy, + proxy.mainProto, + &query, + stamp.ServerAddrStr, + dnscryptRelay, + &name, + false, + ) + if err == nil { + if msg.Rcode != dns.RcodeNameError && msg.Id == 0xcafe { + dlog.Warnf("[%s] may be a lying resolver -- skipping", name) + return ServerInfo{}, fmt.Errorf("[%s] unexpected catchall response", name) + } + dlog.Debugf("[%s] seems to be also accessible over plain DNS", name) + } + } + return ServerInfo{ Proto: stamps.StampProtoTypeDNSCrypt, MagicQuery: certInfo.MagicQuery, @@ -665,6 +686,19 @@ func dohNXTestPacket(msgID uint16) []byte { return body } +func plainNXTestPacket(msgID uint16) dns.Msg { + msg := dns.Msg{} + qName := make([]byte, 16) + charset := "abcdefghijklmnopqrstuvwxyz" + for i := range qName { + qName[i] = charset[rand.Intn(len(charset))] + } + msg.SetQuestion(string(qName)+".test.dnscrypt.", dns.TypeNS) + msg.Id = msgID + msg.MsgHdr.RecursionDesired = true + return msg +} + func fetchDoHServerInfo(proxy *Proxy, name string, stamp stamps.ServerStamp, isNew bool) (ServerInfo, error) { // If an IP has been provided, use it forever. // Or else, if the fallback server and the DoH server are operated diff --git a/vendor/golang.org/x/crypto/nacl/box/box.go b/vendor/golang.org/x/crypto/nacl/box/box.go new file mode 100644 index 00000000..357bdc77 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/box/box.go @@ -0,0 +1,182 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package box authenticates and encrypts small messages using public-key cryptography. + +Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate +messages. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html. +Anonymous sealing/opening is an extension of NaCl defined by and interoperable +with libsodium: +https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes. +*/ +package box + +import ( + cryptorand "crypto/rand" + "io" + + "golang.org/x/crypto/blake2b" + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/salsa20/salsa" +) + +const ( + // Overhead is the number of bytes of overhead when boxing a message. + Overhead = secretbox.Overhead + + // AnonymousOverhead is the number of bytes of overhead when using anonymous + // sealed boxes. + AnonymousOverhead = Overhead + 32 +) + +// GenerateKey generates a new public/private key pair suitable for use with +// Seal and Open. +func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) { + publicKey = new([32]byte) + privateKey = new([32]byte) + _, err = io.ReadFull(rand, privateKey[:]) + if err != nil { + publicKey = nil + privateKey = nil + return + } + + curve25519.ScalarBaseMult(publicKey, privateKey) + return +} + +var zeros [16]byte + +// Precompute calculates the shared key between peersPublicKey and privateKey +// and writes it to sharedKey. The shared key can be used with +// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing +// when using the same pair of keys repeatedly. +func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) { + curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey) + salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma) +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// will be Overhead bytes longer than the original and must not overlap it. The +// nonce must be unique for each distinct message for a given pair of keys. +func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte { + var sharedKey [32]byte + Precompute(&sharedKey, peersPublicKey, privateKey) + return secretbox.Seal(out, message, nonce, &sharedKey) +} + +// SealAfterPrecomputation performs the same actions as Seal, but takes a +// shared key as generated by Precompute. +func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte { + return secretbox.Seal(out, message, nonce, sharedKey) +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) { + var sharedKey [32]byte + Precompute(&sharedKey, peersPublicKey, privateKey) + return secretbox.Open(out, box, nonce, &sharedKey) +} + +// OpenAfterPrecomputation performs the same actions as Open, but takes a +// shared key as generated by Precompute. +func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) { + return secretbox.Open(out, box, nonce, sharedKey) +} + +// SealAnonymous appends an encrypted and authenticated copy of message to out, +// which will be AnonymousOverhead bytes longer than the original and must not +// overlap it. This differs from Seal in that the sender is not required to +// provide a private key. +func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) { + if rand == nil { + rand = cryptorand.Reader + } + ephemeralPub, ephemeralPriv, err := GenerateKey(rand) + if err != nil { + return nil, err + } + + var nonce [24]byte + if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil { + return nil, err + } + + if total := len(out) + AnonymousOverhead + len(message); cap(out) < total { + original := out + out = make([]byte, 0, total) + out = append(out, original...) + } + out = append(out, ephemeralPub[:]...) + + return Seal(out, message, &nonce, recipient, ephemeralPriv), nil +} + +// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and +// appends the message to out, which must not overlap box. The output will be +// AnonymousOverhead bytes smaller than box. +func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) { + if len(box) < AnonymousOverhead { + return nil, false + } + + var ephemeralPub [32]byte + copy(ephemeralPub[:], box[:32]) + + var nonce [24]byte + if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil { + return nil, false + } + + return Open(out, box[32:], &nonce, &ephemeralPub, privateKey) +} + +// sealNonce generates a 24 byte nonce that is a blake2b digest of the +// ephemeral public key and the receiver's public key. +func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error { + h, err := blake2b.New(24, nil) + if err != nil { + return err + } + + if _, err = h.Write(ephemeralPub[:]); err != nil { + return err + } + + if _, err = h.Write(peersPublicKey[:]); err != nil { + return err + } + + h.Sum(nonce[:0]) + + return nil +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 00000000..1fe600ad --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox + +import ( + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if alias.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if alias.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 00000000..3685b344 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,146 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa + +import "math/bits" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 00000000..7ec7bb39 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,201 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 00000000..e76b44fe --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 00000000..3883e0ec --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,880 @@ +// Code generated by command: go run salsa20_amd64_asm.go -out ../salsa20_amd64.s -pkg salsa. DO NOT EDIT. + +//go:build amd64 && !purego && gc + +// func salsa2020XORKeyStream(out *byte, in *byte, n uint64, nonce *byte, key *byte) +// Requires: SSE2 +TEXT ·salsa2020XORKeyStream(SB), $456-40 + // This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + MOVQ n+16(FP), DX + MOVQ nonce+24(FP), CX + MOVQ key+32(FP), R8 + MOVQ SP, R12 + ADDQ $0x1f, R12 + ANDQ $-32, R12 + MOVQ DX, R9 + MOVQ CX, DX + MOVQ R8, R10 + CMPQ R9, $0x00 + JBE DONE + MOVL 20(R10), CX + MOVL (R10), R8 + MOVL (DX), AX + MOVL 16(R10), R11 + MOVL CX, (R12) + MOVL R8, 4(R12) + MOVL AX, 8(R12) + MOVL R11, 12(R12) + MOVL 8(DX), CX + MOVL 24(R10), R8 + MOVL 4(R10), AX + MOVL 4(DX), R11 + MOVL CX, 16(R12) + MOVL R8, 20(R12) + MOVL AX, 24(R12) + MOVL R11, 28(R12) + MOVL 12(DX), CX + MOVL 12(R10), DX + MOVL 28(R10), R8 + MOVL 8(R10), AX + MOVL DX, 32(R12) + MOVL CX, 36(R12) + MOVL R8, 40(R12) + MOVL AX, 44(R12) + MOVQ $0x61707865, DX + MOVQ $0x3320646e, CX + MOVQ $0x79622d32, R8 + MOVQ $0x6b206574, AX + MOVL DX, 48(R12) + MOVL CX, 52(R12) + MOVL R8, 56(R12) + MOVL AX, 60(R12) + CMPQ R9, $0x00000100 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12), X0 + PSHUFL $0x55, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X3 + PSHUFL $0x00, X0, X0 + MOVOA X1, 64(R12) + MOVOA X2, 80(R12) + MOVOA X3, 96(R12) + MOVOA X0, 112(R12) + MOVOA (R12), X0 + PSHUFL $0xaa, X0, X1 + PSHUFL $0xff, X0, X2 + PSHUFL $0x00, X0, X3 + PSHUFL $0x55, X0, X0 + MOVOA X1, 128(R12) + MOVOA X2, 144(R12) + MOVOA X3, 160(R12) + MOVOA X0, 176(R12) + MOVOA 16(R12), X0 + PSHUFL $0xff, X0, X1 + PSHUFL $0x55, X0, X2 + PSHUFL $0xaa, X0, X0 + MOVOA X1, 192(R12) + MOVOA X2, 208(R12) + MOVOA X0, 224(R12) + MOVOA 32(R12), X0 + PSHUFL $0x00, X0, X1 + PSHUFL $0xaa, X0, X2 + PSHUFL $0xff, X0, X0 + MOVOA X1, 240(R12) + MOVOA X2, 256(R12) + MOVOA X0, 272(R12) + +BYTESATLEAST256: + MOVL 16(R12), DX + MOVL 36(R12), CX + MOVL DX, 288(R12) + MOVL CX, 304(R12) + SHLQ $0x20, CX + ADDQ CX, DX + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 292(R12) + MOVL CX, 308(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 296(R12) + MOVL CX, 312(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 300(R12) + MOVL CX, 316(R12) + ADDQ $0x01, DX + MOVQ DX, CX + SHRQ $0x20, CX + MOVL DX, 16(R12) + MOVL CX, 36(R12) + MOVQ R9, 352(R12) + MOVQ $0x00000014, DX + MOVOA 64(R12), X0 + MOVOA 80(R12), X1 + MOVOA 96(R12), X2 + MOVOA 256(R12), X3 + MOVOA 272(R12), X4 + MOVOA 128(R12), X5 + MOVOA 144(R12), X6 + MOVOA 176(R12), X7 + MOVOA 192(R12), X8 + MOVOA 208(R12), X9 + MOVOA 224(R12), X10 + MOVOA 304(R12), X11 + MOVOA 112(R12), X12 + MOVOA 160(R12), X13 + MOVOA 240(R12), X14 + MOVOA 288(R12), X15 + +MAINLOOP1: + MOVOA X1, 320(R12) + MOVOA X2, 336(R12) + MOVOA X13, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X14 + PSRLL $0x19, X2 + PXOR X2, X14 + MOVOA X7, X1 + PADDL X0, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X11 + PSRLL $0x19, X2 + PXOR X2, X11 + MOVOA X12, X1 + PADDL X14, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X15 + PSRLL $0x17, X2 + PXOR X2, X15 + MOVOA X0, X1 + PADDL X11, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X9 + PSRLL $0x17, X2 + PXOR X2, X9 + MOVOA X14, X1 + PADDL X15, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X13 + PSRLL $0x13, X2 + PXOR X2, X13 + MOVOA X11, X1 + PADDL X9, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X7 + PSRLL $0x13, X2 + PXOR X2, X7 + MOVOA X15, X1 + PADDL X13, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA 320(R12), X1 + MOVOA X12, 320(R12) + MOVOA X9, X2 + PADDL X7, X2 + MOVOA X2, X12 + PSLLL $0x12, X2 + PXOR X2, X0 + PSRLL $0x0e, X12 + PXOR X12, X0 + MOVOA X5, X2 + PADDL X1, X2 + MOVOA X2, X12 + PSLLL $0x07, X2 + PXOR X2, X3 + PSRLL $0x19, X12 + PXOR X12, X3 + MOVOA 336(R12), X2 + MOVOA X0, 336(R12) + MOVOA X6, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X4 + PSRLL $0x19, X12 + PXOR X12, X4 + MOVOA X1, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X10 + PSRLL $0x17, X12 + PXOR X12, X10 + MOVOA X2, X0 + PADDL X4, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X8 + PSRLL $0x17, X12 + PXOR X12, X8 + MOVOA X3, X0 + PADDL X10, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X5 + PSRLL $0x13, X12 + PXOR X12, X5 + MOVOA X4, X0 + PADDL X8, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X6 + PSRLL $0x13, X12 + PXOR X12, X6 + MOVOA X10, X0 + PADDL X5, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA 320(R12), X0 + MOVOA X1, 320(R12) + MOVOA X4, X1 + PADDL X0, X1 + MOVOA X1, X12 + PSLLL $0x07, X1 + PXOR X1, X7 + PSRLL $0x19, X12 + PXOR X12, X7 + MOVOA X8, X1 + PADDL X6, X1 + MOVOA X1, X12 + PSLLL $0x12, X1 + PXOR X1, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 336(R12), X12 + MOVOA X2, 336(R12) + MOVOA X14, X1 + PADDL X12, X1 + MOVOA X1, X2 + PSLLL $0x07, X1 + PXOR X1, X5 + PSRLL $0x19, X2 + PXOR X2, X5 + MOVOA X0, X1 + PADDL X7, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X10 + PSRLL $0x17, X2 + PXOR X2, X10 + MOVOA X12, X1 + PADDL X5, X1 + MOVOA X1, X2 + PSLLL $0x09, X1 + PXOR X1, X8 + PSRLL $0x17, X2 + PXOR X2, X8 + MOVOA X7, X1 + PADDL X10, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X4 + PSRLL $0x13, X2 + PXOR X2, X4 + MOVOA X5, X1 + PADDL X8, X1 + MOVOA X1, X2 + PSLLL $0x0d, X1 + PXOR X1, X14 + PSRLL $0x13, X2 + PXOR X2, X14 + MOVOA X10, X1 + PADDL X4, X1 + MOVOA X1, X2 + PSLLL $0x12, X1 + PXOR X1, X0 + PSRLL $0x0e, X2 + PXOR X2, X0 + MOVOA 320(R12), X1 + MOVOA X0, 320(R12) + MOVOA X8, X0 + PADDL X14, X0 + MOVOA X0, X2 + PSLLL $0x12, X0 + PXOR X0, X12 + PSRLL $0x0e, X2 + PXOR X2, X12 + MOVOA X11, X0 + PADDL X1, X0 + MOVOA X0, X2 + PSLLL $0x07, X0 + PXOR X0, X6 + PSRLL $0x19, X2 + PXOR X2, X6 + MOVOA 336(R12), X2 + MOVOA X12, 336(R12) + MOVOA X3, X0 + PADDL X2, X0 + MOVOA X0, X12 + PSLLL $0x07, X0 + PXOR X0, X13 + PSRLL $0x19, X12 + PXOR X12, X13 + MOVOA X1, X0 + PADDL X6, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X15 + PSRLL $0x17, X12 + PXOR X12, X15 + MOVOA X2, X0 + PADDL X13, X0 + MOVOA X0, X12 + PSLLL $0x09, X0 + PXOR X0, X9 + PSRLL $0x17, X12 + PXOR X12, X9 + MOVOA X6, X0 + PADDL X15, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X11 + PSRLL $0x13, X12 + PXOR X12, X11 + MOVOA X13, X0 + PADDL X9, X0 + MOVOA X0, X12 + PSLLL $0x0d, X0 + PXOR X0, X3 + PSRLL $0x13, X12 + PXOR X12, X3 + MOVOA X15, X0 + PADDL X11, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X1 + PSRLL $0x0e, X12 + PXOR X12, X1 + MOVOA X9, X0 + PADDL X3, X0 + MOVOA X0, X12 + PSLLL $0x12, X0 + PXOR X0, X2 + PSRLL $0x0e, X12 + PXOR X12, X2 + MOVOA 320(R12), X12 + MOVOA 336(R12), X0 + SUBQ $0x02, DX + JA MAINLOOP1 + PADDL 112(R12), X12 + PADDL 176(R12), X7 + PADDL 224(R12), X10 + PADDL 272(R12), X4 + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL (SI), DX + XORL 4(SI), CX + XORL 8(SI), R8 + XORL 12(SI), R9 + MOVL DX, (DI) + MOVL CX, 4(DI) + MOVL R8, 8(DI) + MOVL R9, 12(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 64(SI), DX + XORL 68(SI), CX + XORL 72(SI), R8 + XORL 76(SI), R9 + MOVL DX, 64(DI) + MOVL CX, 68(DI) + MOVL R8, 72(DI) + MOVL R9, 76(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + PSHUFL $0x39, X12, X12 + PSHUFL $0x39, X7, X7 + PSHUFL $0x39, X10, X10 + PSHUFL $0x39, X4, X4 + XORL 128(SI), DX + XORL 132(SI), CX + XORL 136(SI), R8 + XORL 140(SI), R9 + MOVL DX, 128(DI) + MOVL CX, 132(DI) + MOVL R8, 136(DI) + MOVL R9, 140(DI) + MOVD X12, DX + MOVD X7, CX + MOVD X10, R8 + MOVD X4, R9 + XORL 192(SI), DX + XORL 196(SI), CX + XORL 200(SI), R8 + XORL 204(SI), R9 + MOVL DX, 192(DI) + MOVL CX, 196(DI) + MOVL R8, 200(DI) + MOVL R9, 204(DI) + PADDL 240(R12), X14 + PADDL 64(R12), X0 + PADDL 128(R12), X5 + PADDL 192(R12), X8 + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 16(SI), DX + XORL 20(SI), CX + XORL 24(SI), R8 + XORL 28(SI), R9 + MOVL DX, 16(DI) + MOVL CX, 20(DI) + MOVL R8, 24(DI) + MOVL R9, 28(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 80(SI), DX + XORL 84(SI), CX + XORL 88(SI), R8 + XORL 92(SI), R9 + MOVL DX, 80(DI) + MOVL CX, 84(DI) + MOVL R8, 88(DI) + MOVL R9, 92(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + PSHUFL $0x39, X14, X14 + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X5, X5 + PSHUFL $0x39, X8, X8 + XORL 144(SI), DX + XORL 148(SI), CX + XORL 152(SI), R8 + XORL 156(SI), R9 + MOVL DX, 144(DI) + MOVL CX, 148(DI) + MOVL R8, 152(DI) + MOVL R9, 156(DI) + MOVD X14, DX + MOVD X0, CX + MOVD X5, R8 + MOVD X8, R9 + XORL 208(SI), DX + XORL 212(SI), CX + XORL 216(SI), R8 + XORL 220(SI), R9 + MOVL DX, 208(DI) + MOVL CX, 212(DI) + MOVL R8, 216(DI) + MOVL R9, 220(DI) + PADDL 288(R12), X15 + PADDL 304(R12), X11 + PADDL 80(R12), X1 + PADDL 144(R12), X6 + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 32(SI), DX + XORL 36(SI), CX + XORL 40(SI), R8 + XORL 44(SI), R9 + MOVL DX, 32(DI) + MOVL CX, 36(DI) + MOVL R8, 40(DI) + MOVL R9, 44(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 96(SI), DX + XORL 100(SI), CX + XORL 104(SI), R8 + XORL 108(SI), R9 + MOVL DX, 96(DI) + MOVL CX, 100(DI) + MOVL R8, 104(DI) + MOVL R9, 108(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + PSHUFL $0x39, X15, X15 + PSHUFL $0x39, X11, X11 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X6, X6 + XORL 160(SI), DX + XORL 164(SI), CX + XORL 168(SI), R8 + XORL 172(SI), R9 + MOVL DX, 160(DI) + MOVL CX, 164(DI) + MOVL R8, 168(DI) + MOVL R9, 172(DI) + MOVD X15, DX + MOVD X11, CX + MOVD X1, R8 + MOVD X6, R9 + XORL 224(SI), DX + XORL 228(SI), CX + XORL 232(SI), R8 + XORL 236(SI), R9 + MOVL DX, 224(DI) + MOVL CX, 228(DI) + MOVL R8, 232(DI) + MOVL R9, 236(DI) + PADDL 160(R12), X13 + PADDL 208(R12), X9 + PADDL 256(R12), X3 + PADDL 96(R12), X2 + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 48(SI), DX + XORL 52(SI), CX + XORL 56(SI), R8 + XORL 60(SI), R9 + MOVL DX, 48(DI) + MOVL CX, 52(DI) + MOVL R8, 56(DI) + MOVL R9, 60(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 112(SI), DX + XORL 116(SI), CX + XORL 120(SI), R8 + XORL 124(SI), R9 + MOVL DX, 112(DI) + MOVL CX, 116(DI) + MOVL R8, 120(DI) + MOVL R9, 124(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + PSHUFL $0x39, X13, X13 + PSHUFL $0x39, X9, X9 + PSHUFL $0x39, X3, X3 + PSHUFL $0x39, X2, X2 + XORL 176(SI), DX + XORL 180(SI), CX + XORL 184(SI), R8 + XORL 188(SI), R9 + MOVL DX, 176(DI) + MOVL CX, 180(DI) + MOVL R8, 184(DI) + MOVL R9, 188(DI) + MOVD X13, DX + MOVD X9, CX + MOVD X3, R8 + MOVD X2, R9 + XORL 240(SI), DX + XORL 244(SI), CX + XORL 248(SI), R8 + XORL 252(SI), R9 + MOVL DX, 240(DI) + MOVL CX, 244(DI) + MOVL R8, 248(DI) + MOVL R9, 252(DI) + MOVQ 352(R12), R9 + SUBQ $0x00000100, R9 + ADDQ $0x00000100, SI + ADDQ $0x00000100, DI + CMPQ R9, $0x00000100 + JAE BYTESATLEAST256 + CMPQ R9, $0x00 + JBE DONE + +BYTESBETWEEN1AND255: + CMPQ R9, $0x40 + JAE NOCOPY + MOVQ DI, DX + LEAQ 360(R12), DI + MOVQ R9, CX + REP; MOVSB + LEAQ 360(R12), DI + LEAQ 360(R12), SI + +NOCOPY: + MOVQ R9, 352(R12) + MOVOA 48(R12), X0 + MOVOA (R12), X1 + MOVOA 16(R12), X2 + MOVOA 32(R12), X3 + MOVOA X1, X4 + MOVQ $0x00000014, CX + +MAINLOOP2: + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X3 + PXOR X6, X3 + PADDL X3, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X3, X3 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X1 + PSHUFL $0x4e, X2, X2 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X3, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X1, X1 + PXOR X6, X0 + PADDL X0, X4 + MOVOA X0, X5 + MOVOA X4, X6 + PSLLL $0x07, X4 + PSRLL $0x19, X6 + PXOR X4, X1 + PXOR X6, X1 + PADDL X1, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x09, X5 + PSRLL $0x17, X6 + PXOR X5, X2 + PSHUFL $0x93, X1, X1 + PXOR X6, X2 + PADDL X2, X4 + MOVOA X2, X5 + MOVOA X4, X6 + PSLLL $0x0d, X4 + PSRLL $0x13, X6 + PXOR X4, X3 + PSHUFL $0x4e, X2, X2 + PXOR X6, X3 + SUBQ $0x04, CX + PADDL X3, X5 + MOVOA X1, X4 + MOVOA X5, X6 + PSLLL $0x12, X5 + PXOR X7, X7 + PSRLL $0x0e, X6 + PXOR X5, X0 + PSHUFL $0x39, X3, X3 + PXOR X6, X0 + JA MAINLOOP2 + PADDL 48(R12), X0 + PADDL (R12), X1 + PADDL 16(R12), X2 + PADDL 32(R12), X3 + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL (SI), CX + XORL 48(SI), R8 + XORL 32(SI), R9 + XORL 16(SI), AX + MOVL CX, (DI) + MOVL R8, 48(DI) + MOVL R9, 32(DI) + MOVL AX, 16(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 20(SI), CX + XORL 4(SI), R8 + XORL 52(SI), R9 + XORL 36(SI), AX + MOVL CX, 20(DI) + MOVL R8, 4(DI) + MOVL R9, 52(DI) + MOVL AX, 36(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + PSHUFL $0x39, X0, X0 + PSHUFL $0x39, X1, X1 + PSHUFL $0x39, X2, X2 + PSHUFL $0x39, X3, X3 + XORL 40(SI), CX + XORL 24(SI), R8 + XORL 8(SI), R9 + XORL 56(SI), AX + MOVL CX, 40(DI) + MOVL R8, 24(DI) + MOVL R9, 8(DI) + MOVL AX, 56(DI) + MOVD X0, CX + MOVD X1, R8 + MOVD X2, R9 + MOVD X3, AX + XORL 60(SI), CX + XORL 44(SI), R8 + XORL 28(SI), R9 + XORL 12(SI), AX + MOVL CX, 60(DI) + MOVL R8, 44(DI) + MOVL R9, 28(DI) + MOVL AX, 12(DI) + MOVQ 352(R12), R9 + MOVL 16(R12), CX + MOVL 36(R12), R8 + ADDQ $0x01, CX + SHLQ $0x20, R8 + ADDQ R8, CX + MOVQ CX, R8 + SHRQ $0x20, R8 + MOVL CX, 16(R12) + MOVL R8, 36(R12) + CMPQ R9, $0x40 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI, SI + MOVQ DX, DI + MOVQ R9, CX + REP; MOVSB + +BYTESATLEAST64: +DONE: + RET + +BYTESATLEAST65: + SUBQ $0x40, R9 + ADDQ $0x40, DI + ADDQ $0x40, SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 00000000..9448760f --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 00000000..e5cdb9a2 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,233 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4058f8c8..be0766c7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -142,7 +142,10 @@ golang.org/x/crypto/ed25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/nacl/box +golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/poly1305 +golang.org/x/crypto/salsa20/salsa # golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 ## explicit; go 1.22.0 golang.org/x/exp/rand From e0ecfcf822281e56b1e29a97182e49f868c38abc Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 13:15:04 +0100 Subject: [PATCH 05/48] Update the ODoH relay used for test CI tests --- .ci/test-odoh-proxied.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/test-odoh-proxied.toml b/.ci/test-odoh-proxied.toml index 64a4428c..a175469e 100644 --- a/.ci/test-odoh-proxied.toml +++ b/.ci/test-odoh-proxied.toml @@ -9,7 +9,7 @@ file = 'query.log' stamp = 'sdns://BQcAAAAAAAAADm9kb2guY3J5cHRvLnN4Ci9kbnMtcXVlcnk' [static.'odohrelay'] - stamp = 'sdns://hQcAAAAAAAAADDg5LjM4LjEzMS4zOAAYb2RvaC1ubC5hbGVrYmVyZy5uZXQ6NDQzBi9wcm94eQ' + stamp = 'sdns://hQcAAAAAAAAAAAAab2RvaC1yZWxheS5lZGdlY29tcHV0ZS5hcHABLw' [anonymized_dns] routes = [ From d469ad13962fc11b0b0ad8f032aa4425c008638f Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 13:23:00 +0100 Subject: [PATCH 06/48] Update ChangeLog, bump --- ChangeLog | 32 +++++++++++++++++++++++++++----- dnscrypt-proxy/main.go | 2 +- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index 354eabac..a961dea6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,14 +1,36 @@ +# Version 2.1.7 + - This version reintroduces support for XSalsa20 enryption in DNSCrypt, +which was removed in 2.1.6. Unfortunately, a bunch of servers still +only support that encryption system. + - A check for lying resolvers was added for DNSCrypt, similar to the one +that was already present for DoH and ODoH. + - Binary packages for Windows/ARM are now available, albeit not in MSI +format yet. + # Version 2.1.6 - - Forwarding: in the list of servers for a zone, the `$BOOTSTRAP` keyword can be included as a shortcut to forward to the bootstrap servers. And the `$DHCP` keyword can be included to forward to the DNS resolvers provided by the local DHCP server. Based on work by YX Hao, thanks! DHCP forwarding should be considered experimental and may not work on all operating systems. A rule for a zone can mix and match multiple forwarder types, such as `10.0.0.1,10.0.0.254,$DHCP,192.168.1.1,$BOOTSTRAP`. Note that this is not implemented for captive portals yet. - - Lying resolvers are now skipped, instead of just printing an error. This doesn't apply to captive portal and forwarding entries, which are the only reasonable use case for lying resolvers. - - Support for XSalsa20 in DNSCrypt has been removed. This was not documented, and was supserseded by XChaCha20 in 2016. + - Forwarding: in the list of servers for a zone, the `$BOOTSTRAP` keyword + can be included as a shortcut to forward to the bootstrap servers. + And the `$DHCP` keyword can be included to forward to the DNS resolvers + provided by the local DHCP server. Based on work by YX Hao, thanks! + DHCP forwarding should be considered experimental and may not work on all + operating systems. A rule for a zone can mix and match multiple forwarder + types, such as `10.0.0.1,10.0.0.254,$DHCP,192.168.1.1,$BOOTSTRAP`. + Note that this is not implemented for captive portals yet. + - Lying resolvers are now skipped, instead of just printing an error. + This doesn't apply to captive portal and forwarding entries, which are the + only reasonable use case for lying resolvers. + - Support for XSalsa20 in DNSCrypt has been removed. This was not documented, + and was supserseded by XChaCha20 in 2016. - Source files are now fetched with compression. - DNS64: compatibility has been improved. - Forwarding: the root domain (`.`) can now be forwarded. - The ARC caching algorithm has been replaced by the SIEVE algorithm. - - Properties of multiple servers are now updated simultaneously. The concurrency level can be adjusted with the new `cert_refresh_concurrency` setting. Contributed by YX Hao. + - Properties of multiple servers are now updated simultaneously. + The concurrency level can be adjusted with the new `cert_refresh_concurrency` + setting. Contributed by YX Hao. - MSI packages for DNSCrypt can now easily be built. - - New command-line flag: `-include-relays` to include relays in `-list` and `-list-all`. + - New command-line flag: `-include-relays` to include relays in `-list` and + `-list-all`. - Support for DNS extended error codes has been added. - Documentation updates, bug fixes, dependency updates. diff --git a/dnscrypt-proxy/main.go b/dnscrypt-proxy/main.go index 7ff9fa1c..3121bcd0 100644 --- a/dnscrypt-proxy/main.go +++ b/dnscrypt-proxy/main.go @@ -15,7 +15,7 @@ import ( ) const ( - AppVersion = "2.1.6" + AppVersion = "2.1.7" DefaultConfigFileName = "dnscrypt-proxy.toml" ) From b74b46c6cda4e970b0009ed4e727a266e82b1d50 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 14:24:18 +0100 Subject: [PATCH 07/48] Indent generate-domains-blocklist.py --- .../generate-domains-blocklist.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/utils/generate-domains-blocklist/generate-domains-blocklist.py b/utils/generate-domains-blocklist/generate-domains-blocklist.py index a78a542d..380b27c2 100755 --- a/utils/generate-domains-blocklist/generate-domains-blocklist.py +++ b/utils/generate-domains-blocklist/generate-domains-blocklist.py @@ -93,8 +93,7 @@ def parse_list(content, trusted=False): def print_restricted_name(output_fd, name, time_restrictions): if name in time_restrictions: - print("{}\t{}".format( - name, time_restrictions[name]), file=output_fd, end="\n") + print("{}\t{}".format(name, time_restrictions[name]), file=output_fd, end="\n") else: print( "# ignored: [{}] was in the time-restricted list, " @@ -122,8 +121,7 @@ def load_from_url(url): except urllib.URLError as err: raise Exception("[{}] could not be loaded: {}\n".format(url, err)) if trusted is False and response.getcode() != 200: - raise Exception("[{}] returned HTTP code {}\n".format( - url, response.getcode())) + raise Exception("[{}] returned HTTP code {}\n".format(url, response.getcode())) content = response.read() if URLLIB_NEW: content = content.decode("utf-8", errors="replace") @@ -265,12 +263,10 @@ def blocklists_from_config_file( list_names.sort(key=name_cmp) if ignored: - print("# Ignored duplicates: {}".format( - ignored), file=output_fd, end="\n") + print("# Ignored duplicates: {}".format(ignored), file=output_fd, end="\n") if glob_ignored: print( - "# Ignored due to overlapping local patterns: {}".format( - glob_ignored), + "# Ignored due to overlapping local patterns: {}".format(glob_ignored), file=output_fd, end="\n", ) From f332394c4b60709d6ec0c6d07fc2cf967f71f7d1 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 15:17:11 +0100 Subject: [PATCH 08/48] More heuristics to detect lying resolvers --- dnscrypt-proxy/serversInfo.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/dnscrypt-proxy/serversInfo.go b/dnscrypt-proxy/serversInfo.go index 3b63eb0b..442000c3 100644 --- a/dnscrypt-proxy/serversInfo.go +++ b/dnscrypt-proxy/serversInfo.go @@ -621,11 +621,26 @@ func fetchDNSCryptServerInfo(proxy *Proxy, name string, stamp stamps.ServerStamp false, ) if err == nil { - if msg.Rcode != dns.RcodeNameError && msg.Id == 0xcafe { - dlog.Warnf("[%s] may be a lying resolver -- skipping", name) - return ServerInfo{}, fmt.Errorf("[%s] unexpected catchall response", name) + if msg.Id != 0xcafe { + dlog.Infof("[%s] handling of DNS message identifiers is broken", name) + } + for _, rr := range msg.Answer { + if rr.Header().Rrtype == dns.TypeA || rr.Header().Rrtype == dns.TypeAAAA { + dlog.Warnf("[%s] may be a lying resolver -- skipping", name) + return ServerInfo{}, fmt.Errorf("[%s] unexpected record: [%s]", name, rr.String()) + } + } + for _, rr := range msg.Extra { + if rr.Header().Rrtype == dns.TypeTXT { + dlog.Warnf("[%s] may be a dummy resolver -- skipping", name) + txts := rr.(*dns.TXT).Txt + cause := "" + if len(txts) > 0 { + cause = txts[0] + } + return ServerInfo{}, fmt.Errorf("[%s] unexpected record: [%s]", name, cause) + } } - dlog.Debugf("[%s] seems to be also accessible over plain DNS", name) } } From 6dd22becac486a5abeee6e1ddfd54c6f966f2e3f Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sat, 11 Jan 2025 15:29:49 +0100 Subject: [PATCH 09/48] More heuristics to detect valid plain DNS responses --- dnscrypt-proxy/serversInfo.go | 38 +++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/dnscrypt-proxy/serversInfo.go b/dnscrypt-proxy/serversInfo.go index 442000c3..5f9161b7 100644 --- a/dnscrypt-proxy/serversInfo.go +++ b/dnscrypt-proxy/serversInfo.go @@ -620,25 +620,29 @@ func fetchDNSCryptServerInfo(proxy *Proxy, name string, stamp stamps.ServerStamp &name, false, ) - if err == nil { - if msg.Id != 0xcafe { - dlog.Infof("[%s] handling of DNS message identifiers is broken", name) - } - for _, rr := range msg.Answer { - if rr.Header().Rrtype == dns.TypeA || rr.Header().Rrtype == dns.TypeAAAA { - dlog.Warnf("[%s] may be a lying resolver -- skipping", name) - return ServerInfo{}, fmt.Errorf("[%s] unexpected record: [%s]", name, rr.String()) + if err == nil && len(msg.Question) > 0 { + question := msg.Question[0] + if question.Qtype == query.Question[0].Qtype && strings.EqualFold(question.Name, query.Question[0].Name) { + dlog.Debugf("[%s] also serves plaintext DNS", name) + if msg.Id != 0xcafe { + dlog.Infof("[%s] handling of DNS message identifiers is broken", name) } - } - for _, rr := range msg.Extra { - if rr.Header().Rrtype == dns.TypeTXT { - dlog.Warnf("[%s] may be a dummy resolver -- skipping", name) - txts := rr.(*dns.TXT).Txt - cause := "" - if len(txts) > 0 { - cause = txts[0] + for _, rr := range msg.Answer { + if rr.Header().Rrtype == dns.TypeA || rr.Header().Rrtype == dns.TypeAAAA { + dlog.Warnf("[%s] may be a lying resolver -- skipping", name) + return ServerInfo{}, fmt.Errorf("[%s] unexpected record: [%s]", name, rr.String()) + } + } + for _, rr := range msg.Extra { + if rr.Header().Rrtype == dns.TypeTXT { + dlog.Warnf("[%s] may be a dummy resolver -- skipping", name) + txts := rr.(*dns.TXT).Txt + cause := "" + if len(txts) > 0 { + cause = txts[0] + } + return ServerInfo{}, fmt.Errorf("[%s] unexpected record: [%s]", name, cause) } - return ServerInfo{}, fmt.Errorf("[%s] unexpected record: [%s]", name, cause) } } } From 31e9a7d251213164212a7b90c3e3ccea897109b5 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Fri, 17 Jan 2025 16:32:33 +0100 Subject: [PATCH 10/48] Remove obsolete badges --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index c0986ab1..0969f0d6 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,6 @@ [![Financial Contributors on Open Collective](https://opencollective.com/dnscrypt/all/badge.svg?label=financial+contributors)](https://opencollective.com/dnscrypt) [![DNSCrypt-Proxy Release](https://img.shields.io/github/release/dnscrypt/dnscrypt-proxy.svg?label=Latest%20Release&style=popout)](https://github.com/dnscrypt/dnscrypt-proxy/releases/latest) [![Build Status](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml/badge.svg)](https://github.com/DNSCrypt/dnscrypt-proxy/actions/workflows/releases.yml) -![CodeQL scan](https://github.com/DNSCrypt/dnscrypt-proxy/workflows/CodeQL%20scan/badge.svg) -![ShiftLeft Scan](https://github.com/DNSCrypt/dnscrypt-proxy/workflows/ShiftLeft%20Scan/badge.svg) -[![#dnscrypt-proxy:matrix.org](https://img.shields.io/matrix/dnscrypt-proxy:matrix.org.svg?label=DNSCrypt-Proxy%20Matrix%20Chat&server_fqdn=matrix.org&style=popout)](https://matrix.to/#/#dnscrypt-proxy:matrix.org) ## Overview From 8f2be59a821d94fdbd2b50469450c94ffc8aef34 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Fri, 17 Jan 2025 23:01:07 +0100 Subject: [PATCH 11/48] Error out on domain names with wildcards in captive portals --- dnscrypt-proxy/coldstart.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dnscrypt-proxy/coldstart.go b/dnscrypt-proxy/coldstart.go index 81bccb7b..0f0b73ac 100644 --- a/dnscrypt-proxy/coldstart.go +++ b/dnscrypt-proxy/coldstart.go @@ -170,6 +170,12 @@ func ColdStart(proxy *Proxy) (*CaptivePortalHandler, error) { if err != nil { continue } + if strings.Index(ipsStr, "*") != -1 { + return nil, fmt.Errorf( + "A captive portal rule must use an exact host name at line %d", + 1+lineNo, + ) + } var ips []net.IP for _, ip := range strings.Split(ipsStr, ",") { ipStr := strings.TrimSpace(ip) From eb2c1dc6b36f01af3641cc0c9053c1d66a6a5807 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Fri, 17 Jan 2025 23:01:40 +0100 Subject: [PATCH 12/48] plugin_forward: silently skip '*.' prefixes --- dnscrypt-proxy/plugin_forward.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dnscrypt-proxy/plugin_forward.go b/dnscrypt-proxy/plugin_forward.go index 08003299..12e52928 100644 --- a/dnscrypt-proxy/plugin_forward.go +++ b/dnscrypt-proxy/plugin_forward.go @@ -60,6 +60,12 @@ func (plugin *PluginForward) Init(proxy *Proxy) error { continue } domain, serversStr, ok := StringTwoFields(line) + if strings.HasPrefix(domain, "*.") { + domain = domain[2:] + } + if strings.Index(domain, "*") != -1 { + ok = false + } if !ok { return fmt.Errorf( "Syntax error for a forwarding rule at line %d. Expected syntax: example.com 9.9.9.9,8.8.8.8", From 8727250618b9cf86ebc6345829f26053c5755665 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 03:40:10 +0000 Subject: [PATCH 13/48] Bump softprops/action-gh-release Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from c43d7637b9b9ce3e953168c325d27253a5d48d8e to 1fd6f3f2b1b859145679a1892abda586ef982a84. - [Release notes](https://github.com/softprops/action-gh-release/releases) - [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md) - [Commits](https://github.com/softprops/action-gh-release/compare/c43d7637b9b9ce3e953168c325d27253a5d48d8e...1fd6f3f2b1b859145679a1892abda586ef982a84) --- updated-dependencies: - dependency-name: softprops/action-gh-release dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- .github/workflows/releases.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml index 6ad2925b..a0d8959b 100644 --- a/.github/workflows/releases.yml +++ b/.github/workflows/releases.yml @@ -83,7 +83,7 @@ jobs: prerelease: false - name: Upload release assets - uses: softprops/action-gh-release@c43d7637b9b9ce3e953168c325d27253a5d48d8e + uses: softprops/action-gh-release@1fd6f3f2b1b859145679a1892abda586ef982a84 if: startsWith(github.ref, 'refs/tags/') env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 3b75a4c6ac1013d1b11dad376e02fd7b1d5fd0a2 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sun, 26 Jan 2025 23:18:03 +0100 Subject: [PATCH 14/48] Fix undefined vs empty confusion for tls_cipher_suite The documentation refers to tls_cipher_suite being empty in order to use the default parameters, not undefined. However, configuring an empty set of cipher suites did just that: no cipher suites could be used, which is not very useful. Fix the documentation: in order to use the default suites, the parameter must be undefined, not empty. And in code, make an empty set equivalent to the parameter being undefined. --- dnscrypt-proxy/example-dnscrypt-proxy.toml | 4 ++-- dnscrypt-proxy/xtransport.go | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/dnscrypt-proxy/example-dnscrypt-proxy.toml b/dnscrypt-proxy/example-dnscrypt-proxy.toml index 6ecd9152..a2412672 100644 --- a/dnscrypt-proxy/example-dnscrypt-proxy.toml +++ b/dnscrypt-proxy/example-dnscrypt-proxy.toml @@ -223,9 +223,9 @@ cert_refresh_delay = 240 ## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...), ## the following suite improves performance. ## This may also help on Intel CPUs running 32-bit operating systems. +## However, this can cause issues fetching sources or connecting to some HTTP servers. ## -## Keep tls_cipher_suite empty if you have issues fetching sources or -## connecting to some DoH servers. +## Keep tls_cipher_suite undefined to let the app automatically choose secure parameters # tls_cipher_suite = [52392, 49199] diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go index 249a514e..39186f8f 100644 --- a/dnscrypt-proxy/xtransport.go +++ b/dnscrypt-proxy/xtransport.go @@ -217,12 +217,13 @@ func (xTransport *XTransport) rebuildTransport() { tlsClientConfig.Certificates = []tls.Certificate{cert} } - if xTransport.tlsDisableSessionTickets || xTransport.tlsCipherSuite != nil { + overrideCipherSuite := xTransport.tlsCipherSuite != nil && len(xTransport.tlsCipherSuite) > 0 + if xTransport.tlsDisableSessionTickets || overrideCipherSuite { tlsClientConfig.SessionTicketsDisabled = xTransport.tlsDisableSessionTickets if !xTransport.tlsDisableSessionTickets { tlsClientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10) } - if xTransport.tlsCipherSuite != nil { + if overrideCipherSuite { tlsClientConfig.PreferServerCipherSuites = false tlsClientConfig.CipherSuites = xTransport.tlsCipherSuite From 0ba23128cc7e448c6b09685f14db4ce180a38c58 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sun, 26 Jan 2025 23:28:21 +0100 Subject: [PATCH 15/48] Improve comment --- dnscrypt-proxy/example-dnscrypt-proxy.toml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/dnscrypt-proxy/example-dnscrypt-proxy.toml b/dnscrypt-proxy/example-dnscrypt-proxy.toml index a2412672..9bb4819a 100644 --- a/dnscrypt-proxy/example-dnscrypt-proxy.toml +++ b/dnscrypt-proxy/example-dnscrypt-proxy.toml @@ -221,11 +221,12 @@ cert_refresh_delay = 240 ## 52393 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 ## ## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...), -## the following suite improves performance. +## uncommenting the following line may improve performance. ## This may also help on Intel CPUs running 32-bit operating systems. -## However, this can cause issues fetching sources or connecting to some HTTP servers. +## However, this can cause issues fetching sources or connecting to some HTTP servers, +## and should not be set on regular CPUs. ## -## Keep tls_cipher_suite undefined to let the app automatically choose secure parameters +## Keep tls_cipher_suite undefined to let the app automatically choose secure parameters. # tls_cipher_suite = [52392, 49199] From 4c659acad9ac5278259e0c849e766bb7aa948bc8 Mon Sep 17 00:00:00 2001 From: Frank Denis Date: Sun, 26 Jan 2025 23:29:11 +0100 Subject: [PATCH 16/48] Update quic-go --- go.mod | 22 +- go.sum | 57 +- .../go-task/slim-sprig/{v3 => }/.editorconfig | 0 .../slim-sprig/{v3 => }/.gitattributes | 0 .../go-task/slim-sprig/{v3 => }/.gitignore | 0 .../go-task/slim-sprig/{v3 => }/CHANGELOG.md | 19 - .../go-task/slim-sprig/{v3 => }/LICENSE.txt | 0 .../go-task/slim-sprig/{v3 => }/README.md | 2 +- .../go-task/slim-sprig/{v3 => }/Taskfile.yml | 2 +- .../go-task/slim-sprig/{v3 => }/crypto.go | 0 .../go-task/slim-sprig/{v3 => }/date.go | 0 .../go-task/slim-sprig/{v3 => }/defaults.go | 0 .../go-task/slim-sprig/{v3 => }/dict.go | 0 .../go-task/slim-sprig/{v3 => }/doc.go | 0 .../go-task/slim-sprig/{v3 => }/functions.go | 0 .../go-task/slim-sprig/{v3 => }/list.go | 0 .../go-task/slim-sprig/{v3 => }/network.go | 0 .../go-task/slim-sprig/{v3 => }/numeric.go | 0 .../go-task/slim-sprig/{v3 => }/reflect.go | 0 .../go-task/slim-sprig/{v3 => }/regex.go | 0 .../go-task/slim-sprig/{v3 => }/strings.go | 0 .../go-task/slim-sprig/{v3 => }/url.go | 0 .../github.com/google/pprof/profile/encode.go | 97 +- .../github.com/google/pprof/profile/filter.go | 4 - .../pprof/profile/legacy_java_profile.go | 4 +- .../google/pprof/profile/legacy_profile.go | 31 +- .../github.com/google/pprof/profile/merge.go | 287 +- .../google/pprof/profile/profile.go | 84 +- .../github.com/google/pprof/profile/proto.go | 19 +- .../github.com/google/pprof/profile/prune.go | 26 +- .../github.com/hashicorp/golang-lru/LICENSE | 2 - .../hashicorp/golang-lru/simplelru/lru.go | 22 +- .../golang-lru/simplelru/lru_interface.go | 8 +- vendor/github.com/miekg/dns/README.md | 1 + vendor/github.com/miekg/dns/dnssec.go | 42 +- vendor/github.com/miekg/dns/edns.go | 36 +- ...useport.go => listen_no_socket_options.go} | 22 +- ..._reuseport.go => listen_socket_options.go} | 31 + vendor/github.com/miekg/dns/server.go | 1 + vendor/github.com/miekg/dns/sig0.go | 3 +- vendor/github.com/miekg/dns/version.go | 2 +- .../onsi/ginkgo/v2/formatter/formatter.go | 4 - .../ginkgo/v2/ginkgo/build/build_command.go | 15 +- .../v2/ginkgo/generators/bootstrap_command.go | 2 +- .../v2/ginkgo/generators/generate_command.go | 8 +- .../ginkgo/generators/generate_templates.go | 6 +- .../v2/ginkgo/generators/generators_common.go | 12 - .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 14 +- .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 129 - .../ginkgo/internal/profiles_and_reports.go | 46 +- .../ginkgo/v2/ginkgo/internal/test_suite.go | 9 +- .../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 7 +- .../onsi/ginkgo/v2/ginkgo/outline/import.go | 9 +- .../onsi/ginkgo/v2/ginkgo/outline/outline.go | 26 +- .../ginkgo/v2/ginkgo/watch/dependencies.go | 2 +- .../ginkgo/v2/ginkgo/watch/package_hash.go | 13 +- .../ginkgo/v2/reporters/default_reporter.go | 61 +- .../onsi/ginkgo/v2/reporters/json_report.go | 19 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 21 - .../ginkgo/v2/reporters/teamcity_report.go | 4 - .../onsi/ginkgo/v2/types/code_location.go | 2 +- .../github.com/onsi/ginkgo/v2/types/config.go | 29 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 13 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 15 +- .../onsi/ginkgo/v2/types/label_filter.go | 229 +- .../github.com/onsi/ginkgo/v2/types/types.go | 14 +- .../onsi/ginkgo/v2/types/version.go | 2 +- .../github.com/quic-go/quic-go/.golangci.yml | 7 +- vendor/github.com/quic-go/quic-go/README.md | 4 +- vendor/github.com/quic-go/quic-go/client.go | 147 - vendor/github.com/quic-go/quic-go/codecov.yml | 2 + .../quic-go/quic-go/conn_id_generator.go | 32 +- .../quic-go/quic-go/conn_id_manager.go | 22 + .../github.com/quic-go/quic-go/connection.go | 230 +- .../quic-go/quic-go/connection_logging.go | 7 +- vendor/github.com/quic-go/quic-go/errors.go | 8 +- vendor/github.com/quic-go/quic-go/framer.go | 151 +- .../github.com/quic-go/quic-go/http3/body.go | 13 +- .../quic-go/quic-go/http3/client.go | 15 +- .../github.com/quic-go/quic-go/http3/conn.go | 4 +- .../github.com/quic-go/quic-go/http3/error.go | 5 + .../quic-go/quic-go/http3/http_stream.go | 26 +- .../quic-go/quic-go/http3/ip_addr.go | 48 + .../quic-go/quic-go/http3/mockgen.go | 4 +- .../quic-go/quic-go/http3/request_writer.go | 19 +- .../github.com/quic-go/quic-go/http3/trace.go | 105 + .../quic-go/quic-go/http3/transport.go | 124 +- .../github.com/quic-go/quic-go/interface.go | 9 +- .../quic-go/internal/ackhandler/interfaces.go | 13 +- .../ackhandler/received_packet_handler.go | 6 +- .../ackhandler/received_packet_tracker.go | 3 +- .../ackhandler/sent_packet_handler.go | 161 +- .../flowcontrol/base_flow_controller.go | 16 +- .../flowcontrol/connection_flow_controller.go | 51 +- .../quic-go/internal/flowcontrol/interface.go | 18 +- .../flowcontrol/stream_flow_controller.go | 35 +- .../quic-go/internal/handshake/retry.go | 13 +- .../quic-go/internal/protocol/params.go | 4 - .../quic-go/quic-go/internal/qerr/errors.go | 51 +- .../quic-go/internal/utils/rtt_stats.go | 2 +- .../quic-go/quic-go/internal/wire/frame.go | 21 + .../quic-go/internal/wire/interface.go | 11 - .../quic-go/internal/wire/stream_frame.go | 7 +- .../quic-go/logging/connection_tracer.go | 267 +- .../logging/connection_tracer_multiplexer.go | 236 ++ .../quic-go/logging/generate_multiplexer.go | 161 ++ .../quic-go/quic-go/logging/multiplexer.tmpl | 21 + .../quic-go/quic-go/logging/tracer.go | 55 +- .../quic-go/logging/tracer_multiplexer.go | 51 + vendor/github.com/quic-go/quic-go/mockgen.go | 5 - .../quic-go/quic-go/mtu_discoverer.go | 16 +- .../github.com/quic-go/quic-go/multiplexer.go | 75 - .../quic-go/quic-go/packet_handler_map.go | 29 +- .../quic-go/quic-go/packet_packer.go | 137 +- .../quic-go/quic-go/receive_stream.go | 67 +- .../github.com/quic-go/quic-go/send_stream.go | 164 +- vendor/github.com/quic-go/quic-go/server.go | 85 +- .../quic-go/quic-go/stateless_reset.go | 42 + vendor/github.com/quic-go/quic-go/stream.go | 13 +- vendor/github.com/quic-go/quic-go/sys_conn.go | 2 +- .../quic-go/quic-go/sys_conn_df_darwin.go | 90 +- .../quic-go/quic-go/sys_conn_df_windows.go | 12 +- .../quic-go/quic-go/sys_conn_oob.go | 2 +- .../github.com/quic-go/quic-go/transport.go | 220 +- vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/mod/LICENSE | 4 +- vendor/golang.org/x/mod/modfile/read.go | 7 +- vendor/golang.org/x/mod/modfile/rule.go | 80 +- vendor/golang.org/x/mod/modfile/work.go | 2 +- vendor/golang.org/x/mod/module/module.go | 2 + vendor/golang.org/x/tools/LICENSE | 4 +- vendor/golang.org/x/tools/cover/profile.go | 266 -- .../x/tools/go/ast/astutil/enclosing.go | 24 +- .../x/tools/go/ast/astutil/imports.go | 5 - .../golang.org/x/tools/go/ast/astutil/util.go | 11 +- .../x/tools/go/ast/inspector/inspector.go | 50 +- .../x/tools/go/ast/inspector/iter.go | 85 - .../x/tools/go/ast/inspector/typeof.go | 3 - .../x/tools/go/gcexportdata/gcexportdata.go | 117 +- .../tools/go/internal/packagesdriver/sizes.go | 54 + vendor/golang.org/x/tools/go/packages/doc.go | 15 +- .../x/tools/go/packages/external.go | 15 +- .../golang.org/x/tools/go/packages/golist.go | 90 +- .../x/tools/go/packages/loadmode_string.go | 73 +- .../x/tools/go/packages/packages.go | 413 ++- .../golang.org/x/tools/go/packages/visit.go | 9 - .../x/tools/go/types/objectpath/objectpath.go | 186 +- .../x/tools/go/types/typeutil/callee.go | 68 - .../x/tools/go/types/typeutil/imports.go | 30 - .../x/tools/go/types/typeutil/map.go | 467 --- .../tools/go/types/typeutil/methodsetcache.go | 71 - .../x/tools/go/types/typeutil/ui.go | 53 - .../x/tools/internal/aliases/aliases.go | 10 +- .../x/tools/internal/aliases/aliases_go121.go | 31 + .../x/tools/internal/aliases/aliases_go122.go | 55 +- .../x/tools/internal/gcimporter/bimport.go | 61 + .../x/tools/internal/gcimporter/exportdata.go | 460 +-- .../x/tools/internal/gcimporter/gcimporter.go | 182 +- .../x/tools/internal/gcimporter/iexport.go | 284 +- .../x/tools/internal/gcimporter/iimport.go | 51 +- .../internal/gcimporter/iimport_go122.go | 53 - .../internal/gcimporter/newInterface10.go | 22 + .../internal/gcimporter/newInterface11.go | 14 + .../tools/internal/gcimporter/predeclared.go | 91 - .../x/tools/internal/gcimporter/support.go | 30 - .../internal/gcimporter/support_go118.go | 34 + .../x/tools/internal/gcimporter/unified_no.go | 10 + .../tools/internal/gcimporter/unified_yes.go | 10 + .../tools/internal/gcimporter/ureader_yes.go | 53 +- .../x/tools/internal/gocommand/invoke.go | 39 +- .../x/tools/internal/imports/fix.go | 521 ++-- .../x/tools/internal/imports/imports.go | 33 +- .../x/tools/internal/imports/mod.go | 17 +- .../x/tools/internal/imports/source.go | 63 - .../x/tools/internal/imports/source_env.go | 129 - .../tools/internal/imports/source_modindex.go | 103 - .../x/tools/internal/modindex/directories.go | 135 - .../x/tools/internal/modindex/index.go | 266 -- .../x/tools/internal/modindex/lookup.go | 148 - .../x/tools/internal/modindex/modindex.go | 164 -- .../x/tools/internal/modindex/symbols.go | 217 -- .../x/tools/internal/modindex/types.go | 25 - .../internal/packagesinternal/packages.go | 2 + .../x/tools/internal/pkgbits/decoder.go | 38 +- .../x/tools/internal/pkgbits/encoder.go | 43 +- .../x/tools/internal/pkgbits/frames_go1.go | 21 + .../x/tools/internal/pkgbits/frames_go17.go | 28 + .../x/tools/internal/pkgbits/support.go | 2 +- .../x/tools/internal/pkgbits/sync.go | 23 - .../internal/pkgbits/syncmarker_string.go | 7 +- .../x/tools/internal/pkgbits/version.go | 85 - .../x/tools/internal/stdlib/manifest.go | 330 --- .../internal/tokeninternal/tokeninternal.go | 137 + .../x/tools/internal/typeparams/common.go | 68 - .../x/tools/internal/typeparams/coretype.go | 150 - .../x/tools/internal/typeparams/free.go | 131 - .../x/tools/internal/typeparams/normalize.go | 218 -- .../x/tools/internal/typeparams/termlist.go | 163 -- .../x/tools/internal/typeparams/typeterm.go | 169 -- .../x/tools/internal/typesinternal/element.go | 133 - .../tools/internal/typesinternal/errorcode.go | 8 +- .../tools/internal/typesinternal/qualifier.go | 46 - .../x/tools/internal/typesinternal/recv.go | 10 +- .../x/tools/internal/typesinternal/types.go | 72 - .../tools/internal/typesinternal/zerovalue.go | 392 --- .../x/tools/internal/versions/toolchain.go | 14 + .../internal/versions/toolchain_go119.go} | 14 +- .../internal/versions/toolchain_go120.go | 14 + .../internal/versions/toolchain_go121.go | 14 + .../x/tools/internal/versions/types.go | 28 +- .../x/tools/internal/versions/types_go121.go | 30 + .../x/tools/internal/versions/types_go122.go | 41 + .../protobuf/internal/descopts/options.go | 20 +- .../editiondefaults/editions_defaults.binpb | Bin 138 -> 93 bytes .../internal/editionssupport/editions.go | 7 +- .../protobuf/internal/errors/is_go112.go | 40 + .../protobuf/internal/errors/is_go113.go | 13 + .../protobuf/internal/filedesc/desc.go | 22 - .../protobuf/internal/filedesc/desc_init.go | 2 - .../protobuf/internal/filedesc/desc_lazy.go | 2 - .../protobuf/internal/filedesc/editions.go | 10 +- .../protobuf/internal/genid/doc.go | 2 +- .../internal/genid/go_features_gen.go | 49 +- .../protobuf/internal/genid/map_entry.go | 2 +- .../protobuf/internal/genid/wrappers.go | 2 +- .../internal/impl/api_export_opaque.go | 128 - .../protobuf/internal/impl/bitmap.go | 34 - .../protobuf/internal/impl/bitmap_race.go | 126 - .../protobuf/internal/impl/checkinit.go | 33 - .../protobuf/internal/impl/codec_extension.go | 11 +- .../protobuf/internal/impl/codec_field.go | 3 - .../internal/impl/codec_field_opaque.go | 264 -- .../protobuf/internal/impl/codec_message.go | 16 - .../internal/impl/codec_message_opaque.go | 156 - .../protobuf/internal/impl/codec_reflect.go | 210 ++ .../protobuf/internal/impl/codec_unsafe.go | 3 + .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/decode.go | 56 +- .../protobuf/internal/impl/encode.go | 80 +- .../protobuf/internal/impl/equal.go | 224 -- .../protobuf/internal/impl/lazy.go | 433 --- .../internal/impl/legacy_extension.go | 1 - .../protobuf/internal/impl/merge.go | 27 - .../protobuf/internal/impl/message.go | 16 +- .../protobuf/internal/impl/message_opaque.go | 614 ---- .../internal/impl/message_opaque_gen.go | 132 - .../protobuf/internal/impl/message_reflect.go | 5 - .../internal/impl/message_reflect_field.go | 34 +- .../impl/message_reflect_field_gen.go | 273 -- .../protobuf/internal/impl/pointer_reflect.go | 215 ++ .../protobuf/internal/impl/pointer_unsafe.go | 12 +- .../internal/impl/pointer_unsafe_opaque.go | 42 - .../protobuf/internal/impl/presence.go | 142 - .../protobuf/internal/impl/validate.go | 16 - .../internal/protolazy/bufferreader.go | 364 --- .../protobuf/internal/protolazy/lazy.go | 359 --- .../internal/protolazy/pointer_unsafe.go | 17 - .../protobuf/internal/strs/strings_pure.go | 28 + .../internal/strs/strings_unsafe_go120.go | 3 +- .../internal/strs/strings_unsafe_go121.go | 3 +- .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 16 - .../protobuf/proto/encode.go | 3 +- .../google.golang.org/protobuf/proto/equal.go | 9 - .../protobuf/proto/extension.go | 71 - .../google.golang.org/protobuf/proto/size.go | 8 - .../protobuf/proto/wrapperopaque.go | 80 - .../protobuf/reflect/protodesc/desc.go | 12 +- .../protobuf/reflect/protodesc/desc_init.go | 4 - .../protobuf/reflect/protodesc/editions.go | 10 +- .../protobuf/reflect/protoreflect/methods.go | 10 - .../protobuf/reflect/protoreflect/value.go | 2 +- .../reflect/protoreflect/value_pure.go | 60 + .../protoreflect/value_unsafe_go120.go | 3 +- .../protoreflect/value_unsafe_go121.go | 3 +- .../protobuf/runtime/protoiface/methods.go | 34 - .../protobuf/runtime/protoimpl/impl.go | 4 - .../types/descriptorpb/descriptor.pb.go | 2535 ++++++++++------- .../types/gofeaturespb/go_features.pb.go | 250 +- .../protobuf/types/known/anypb/any.pb.go | 33 +- .../types/known/durationpb/duration.pb.go | 33 +- .../types/known/timestamppb/timestamp.pb.go | 33 +- vendor/modules.txt | 45 +- 283 files changed, 6124 insertions(+), 14020 deletions(-) rename vendor/github.com/go-task/slim-sprig/{v3 => }/.editorconfig (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/.gitattributes (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/.gitignore (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/CHANGELOG.md (95%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/LICENSE.txt (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/README.md (88%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/Taskfile.yml (89%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/crypto.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/date.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/defaults.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/dict.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/doc.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/functions.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/list.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/network.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/numeric.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/reflect.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/regex.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/strings.go (100%) rename vendor/github.com/go-task/slim-sprig/{v3 => }/url.go (100%) rename vendor/github.com/miekg/dns/{listen_no_reuseport.go => listen_no_socket_options.go} (61%) rename vendor/github.com/miekg/dns/{listen_reuseport.go => listen_socket_options.go} (66%) delete mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go create mode 100644 vendor/github.com/quic-go/quic-go/http3/ip_addr.go create mode 100644 vendor/github.com/quic-go/quic-go/http3/trace.go create mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/frame.go delete mode 100644 vendor/github.com/quic-go/quic-go/internal/wire/interface.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go create mode 100644 vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl create mode 100644 vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go delete mode 100644 vendor/github.com/quic-go/quic-go/multiplexer.go create mode 100644 vendor/github.com/quic-go/quic-go/stateless_reset.go delete mode 100644 vendor/golang.org/x/tools/cover/profile.go delete mode 100644 vendor/golang.org/x/tools/go/ast/inspector/iter.go create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go delete mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go create mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/predeclared.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/source.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/source_env.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/source_modindex.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/directories.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/index.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/lookup.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/modindex.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/symbols.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/types.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/version.go create mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/free.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/element.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/qualifier.go delete mode 100644 vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go rename vendor/{google.golang.org/protobuf/internal/genid/name.go => golang.org/x/tools/internal/versions/toolchain_go119.go} (50%) create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/equal.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/lazy.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/presence.go delete mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go delete mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/lazy.go delete mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 vendor/google.golang.org/protobuf/proto/wrapperopaque.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go diff --git a/go.mod b/go.mod index 60b7bf22..0e97f22c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/dnscrypt/dnscrypt-proxy -go 1.23.4 +go 1.23.5 require ( github.com/BurntSushi/toml v1.4.0 @@ -18,10 +18,10 @@ require ( github.com/k-sone/critbitgo v1.4.0 github.com/kardianos/service v1.2.2 github.com/lifenjoiner/dhcpdns v0.0.6 - github.com/miekg/dns v1.1.62 + github.com/miekg/dns v1.1.63 github.com/opencoff/go-sieve v0.2.1 github.com/powerman/check v1.8.0 - github.com/quic-go/quic-go v0.48.2 + github.com/quic-go/quic-go v0.49.0 golang.org/x/crypto v0.32.0 golang.org/x/net v0.34.0 golang.org/x/sys v0.29.0 @@ -30,24 +30,24 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect github.com/hashicorp/go-syslog v1.0.0 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/onsi/ginkgo/v2 v2.22.2 // indirect + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/powerman/deepequal v0.1.0 // indirect github.com/quic-go/qpack v0.5.1 // indirect github.com/smartystreets/goconvey v1.8.1 // indirect go.uber.org/mock v0.5.0 // indirect - golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect - golang.org/x/mod v0.22.0 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/mod v0.18.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.29.0 // indirect + golang.org/x/tools v0.22.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.56.3 // indirect - google.golang.org/protobuf v1.36.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect ) diff --git a/go.sum b/go.sum index d5b3486d..e2dcddc8 100644 --- a/go.sum +++ b/go.sum @@ -2,24 +2,28 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0 github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 h1:3T8ZyTDp5QxTx3NU48JVb2u+75xc040fofcBaN+6jPA= github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185/go.mod h1:cFRxtTwTOJkz2x3rQUNCYKWC93yP1VKjR8NUhqFxZNU= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -28,11 +32,11 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg= github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ= github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e h1:tzG4EjKgHIqKVkLIAC4pXTIapuM2BR05uXokEEysAXA= @@ -53,12 +57,12 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/lifenjoiner/dhcpdns v0.0.6 h1:rn4Y5RRR5sgQ6RjWenwhA7i/uHzHW9hbZpCobA4CAJs= github.com/lifenjoiner/dhcpdns v0.0.6/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= +github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencoff/go-sieve v0.2.1 h1:5Pv6rd3zRquNmXcYHFndjVoolTgcv0ua2XTdMQ+gw0M= github.com/opencoff/go-sieve v0.2.1/go.mod h1:CndxLpW4R8fDq04XfBSCOZ+qWwDCcxjfUJbr0GPqWHY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -71,27 +75,30 @@ github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+ github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= -github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94= +github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s= github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -99,8 +106,8 @@ golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= @@ -108,9 +115,11 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/vendor/github.com/go-task/slim-sprig/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/.editorconfig rename to vendor/github.com/go-task/slim-sprig/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/vendor/github.com/go-task/slim-sprig/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/.gitattributes rename to vendor/github.com/go-task/slim-sprig/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/vendor/github.com/go-task/slim-sprig/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/.gitignore rename to vendor/github.com/go-task/slim-sprig/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/CHANGELOG.md index 2ce45dd4..61d8ebff 100644 --- a/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -1,24 +1,5 @@ # Changelog -## Release 3.2.3 (2022-11-29) - -### Changed - -- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) -- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) -- #353: Updated masterminds/semver which included bug fixes -- #354: Updated golang.org/x/crypto which included bug fixes - -## Release 3.2.2 (2021-02-04) - -This is a re-release of 3.2.1 to satisfy something with the Go module system. - -## Release 3.2.1 (2021-02-04) - -### Changed - -- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) - ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/v3/README.md b/vendor/github.com/go-task/slim-sprig/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/v3/README.md rename to vendor/github.com/go-task/slim-sprig/README.md index b5ab5642..72579471 100644 --- a/vendor/github.com/go-task/slim-sprig/v3/README.md +++ b/vendor/github.com/go-task/slim-sprig/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/Taskfile.yml index 8e6346bb..cdcfd223 100644 --- a/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '3' +version: '2' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/crypto.go rename to vendor/github.com/go-task/slim-sprig/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/date.go b/vendor/github.com/go-task/slim-sprig/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/date.go rename to vendor/github.com/go-task/slim-sprig/date.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/vendor/github.com/go-task/slim-sprig/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/defaults.go rename to vendor/github.com/go-task/slim-sprig/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/dict.go b/vendor/github.com/go-task/slim-sprig/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/dict.go rename to vendor/github.com/go-task/slim-sprig/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/doc.go b/vendor/github.com/go-task/slim-sprig/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/doc.go rename to vendor/github.com/go-task/slim-sprig/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/functions.go b/vendor/github.com/go-task/slim-sprig/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/functions.go rename to vendor/github.com/go-task/slim-sprig/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/list.go b/vendor/github.com/go-task/slim-sprig/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/list.go rename to vendor/github.com/go-task/slim-sprig/list.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/network.go b/vendor/github.com/go-task/slim-sprig/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/network.go rename to vendor/github.com/go-task/slim-sprig/network.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/vendor/github.com/go-task/slim-sprig/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/numeric.go rename to vendor/github.com/go-task/slim-sprig/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/vendor/github.com/go-task/slim-sprig/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/reflect.go rename to vendor/github.com/go-task/slim-sprig/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/regex.go rename to vendor/github.com/go-task/slim-sprig/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/strings.go b/vendor/github.com/go-task/slim-sprig/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/strings.go rename to vendor/github.com/go-task/slim-sprig/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/v3/url.go b/vendor/github.com/go-task/slim-sprig/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/v3/url.go rename to vendor/github.com/go-task/slim-sprig/url.go diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 8ce9d3cf..ab7f03ae 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -17,7 +17,6 @@ package profile import ( "errors" "sort" - "strings" ) func (p *Profile) decoder() []decoder { @@ -122,7 +121,6 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) - p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -157,7 +155,6 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) - encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -186,13 +183,12 @@ var profileDecoder = []decoder{ // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) - x.Line = b.tmpLines[:0] // Use shared space temporarily + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) - b.tmpLines = x.Line[:0] - // Copy to shrink size and detach from shared space. - x.Line = append([]Line(nil), x.Line...) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size return err }, // repeated Function function = 5 @@ -239,8 +235,6 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, - // string doc_link = 15; - func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -258,14 +252,6 @@ func (p *Profile) postDecode() error { } else { mappings[m.ID] = m } - - // If this a main linux kernel mapping with a relocation symbol suffix - // ("[kernel.kallsyms]_text"), extract said suffix. - // It is fairly hacky to handle at this level, but the alternatives appear even worse. - const prefix = "[kernel.kallsyms]" - if strings.HasPrefix(m.File, prefix) { - m.KernelRelocationSymbol = m.File[len(prefix):] - } } functions := make(map[uint64]*Function, len(p.Function)) @@ -312,52 +298,41 @@ func (p *Profile) postDecode() error { st.Unit, err = getString(p.stringTable, &st.unitX, err) } - // Pre-allocate space for all locations. - numLocations := 0 for _, s := range p.Sample { - numLocations += len(s.locationIDX) - } - locBuffer := make([]*Location, numLocations) - - for _, s := range p.Sample { - if len(s.labelX) > 0 { - labels := make(map[string][]string, len(s.labelX)) - numLabels := make(map[string][]int64, len(s.labelX)) - numUnits := make(map[string][]string, len(s.labelX)) - for _, l := range s.labelX { - var key, value string - key, err = getString(p.stringTable, &l.keyX, err) - if l.strX != 0 { - value, err = getString(p.stringTable, &l.strX, err) - labels[key] = append(labels[key], value) - } else if l.numX != 0 || l.unitX != 0 { - numValues := numLabels[key] - units := numUnits[key] - if l.unitX != 0 { - var unit string - unit, err = getString(p.stringTable, &l.unitX, err) - units = padStringArray(units, len(numValues)) - numUnits[key] = append(units, unit) - } - numLabels[key] = append(numLabels[key], l.numX) + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) } - } - if len(labels) > 0 { - s.Label = labels - } - if len(numLabels) > 0 { - s.NumLabel = numLabels - for key, units := range numUnits { - if len(units) > 0 { - numUnits[key] = padStringArray(units, len(numLabels[key])) - } - } - s.NumUnit = numUnits + numLabels[key] = append(numLabels[key], l.numX) } } - - s.Location = locBuffer[:len(s.locationIDX)] - locBuffer = locBuffer[len(s.locationIDX):] + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] @@ -388,7 +363,6 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) - p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } @@ -535,7 +509,6 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) - encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -544,8 +517,6 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, - // optional int64 column = 3 - func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go index c794b939..ea8e66c6 100644 --- a/vendor/github.com/google/pprof/profile/filter.go +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -22,10 +22,6 @@ import "regexp" // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { - if focus == nil && ignore == nil && hide == nil && show == nil { - fm = true // Missing focus implies a match - return - } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 4580bab1..91f45e53 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false, false); err != nil { + if err = p.Aggregate(true, true, true, true, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false, false); err != nil { + if err = p.Aggregate(true, true, true, true, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 8d07fd6c..0c8f3bb5 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -295,12 +295,11 @@ func get64b(b []byte) (uint64, []byte) { // // The general format for profilez samples is a sequence of words in // binary format. The first words are a header with the following data: -// -// 1st word -- 0 -// 2nd word -- 3 -// 3rd word -- 0 if a c++ application, 1 if a java application. -// 4th word -- Sampling period (in microseconds). -// 5th word -- Padding. +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. func parseCPU(b []byte) (*Profile, error) { var parse func([]byte) (uint64, []byte) var n1, n2, n3, n4, n5 uint64 @@ -404,18 +403,15 @@ func cleanupDuplicateLocations(p *Profile) { // // profilez samples are a repeated sequence of stack frames of the // form: -// -// 1st word -- The number of times this stack was encountered. -// 2nd word -- The size of the stack (StackSize). -// 3rd word -- The first address on the stack. -// ... -// StackSize + 2 -- The last address on the stack -// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack // The last stack trace is of the form: -// -// 1st word -- 0 -// 2nd word -- 1 -// 3rd word -- 0 +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 // // Addresses from stack traces may point to the next instruction after // each call. Optionally adjust by -1 to land somewhere on the actual @@ -865,6 +861,7 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { + line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index ba4d7464..9978e733 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -15,7 +15,6 @@ package profile import ( - "encoding/binary" "fmt" "sort" "strconv" @@ -59,7 +58,7 @@ func Merge(srcs []*Profile) (*Profile, error) { for _, src := range srcs { // Clear the profile-specific hash tables - pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) @@ -137,7 +136,7 @@ type profileMerger struct { p *Profile // Memoization tables within a profile. - locationsByID locationIDMap + locationsByID map[uint64]*Location functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo @@ -154,16 +153,6 @@ type mapInfo struct { } func (pm *profileMerger) mapSample(src *Sample) *Sample { - // Check memoization table - k := pm.sampleKey(src) - if ss, ok := pm.samples[k]; ok { - for i, v := range src.Value { - ss.Value[i] += v - } - return ss - } - - // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), @@ -188,98 +177,52 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.NumLabel[k] = vv s.NumUnit[k] = uu } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } -func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { - // Accumulate contents into a string. - var buf strings.Builder - buf.Grow(64) // Heuristic to avoid extra allocs - - // encode a number - putNumber := func(v uint64) { - var num [binary.MaxVarintLen64]byte - n := binary.PutUvarint(num[:], v) - buf.Write(num[:n]) +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) } - // encode a string prefixed with its length. - putDelimitedString := func(s string) { - putNumber(uint64(len(s))) - buf.WriteString(s) + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) } + sort.Strings(labels) - for _, l := range sample.Location { - // Get the location in the merged profile, which may have a different ID. - if loc := pm.mapLocation(l); loc != nil { - putNumber(loc.ID) - } + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) } - putNumber(0) // Delimiter + sort.Strings(numlabels) - for _, l := range sortedKeys1(sample.Label) { - putDelimitedString(l) - values := sample.Label[l] - putNumber(uint64(len(values))) - for _, v := range values { - putDelimitedString(v) - } + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), } - - for _, l := range sortedKeys2(sample.NumLabel) { - putDelimitedString(l) - values := sample.NumLabel[l] - putNumber(uint64(len(values))) - for _, v := range values { - putNumber(uint64(v)) - } - units := sample.NumUnit[l] - putNumber(uint64(len(units))) - for _, v := range units { - putDelimitedString(v) - } - } - - return sampleKey(buf.String()) } -type sampleKey string - -// sortedKeys1 returns the sorted keys found in a string->[]string map. -// -// Note: this is currently non-generic since github pprof runs golint, -// which does not support generics. When that issue is fixed, it can -// be merged with sortedKeys2 and made into a generic function. -func sortedKeys1(m map[string][]string) []string { - if len(m) == 0 { - return nil - } - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// sortedKeys2 returns the sorted keys found in a string->[]int64 map. -// -// Note: this is currently non-generic since github pprof runs golint, -// which does not support generics. When that issue is fixed, it can -// be merged with sortedKeys1 and made into a generic function. -func sortedKeys2(m map[string][]int64) []string { - if len(m) == 0 { - return nil - } - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys +type sampleKey struct { + locations string + labels string + numlabels string } func (pm *profileMerger) mapLocation(src *Location) *Location { @@ -287,7 +230,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { return nil } - if l := pm.locationsByID.get(src.ID); l != nil { + if l, ok := pm.locationsByID[src.ID]; ok { return l } @@ -306,10 +249,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { - pm.locationsByID.set(src.ID, ll) + pm.locationsByID[src.ID] = ll return ll } - pm.locationsByID.set(src.ID, l) + pm.locationsByID[src.ID] = l pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l @@ -326,13 +269,12 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*3) + lines := make([]string, len(l.Line)*2) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) - lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -361,17 +303,16 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { return mi } m := &Mapping{ - ID: uint64(len(pm.p.Mapping) + 1), - Start: src.Start, - Limit: src.Limit, - Offset: src.Offset, - File: src.File, - KernelRelocationSymbol: src.KernelRelocationSymbol, - BuildID: src.BuildID, - HasFunctions: src.HasFunctions, - HasFilenames: src.HasFilenames, - HasLineNumbers: src.HasLineNumbers, - HasInlineFrames: src.HasInlineFrames, + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, } pm.p.Mapping = append(pm.p.Mapping, m) @@ -419,7 +360,6 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, - Column: src.Column, } return ln } @@ -476,7 +416,6 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} - var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -495,9 +434,6 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } - if docURL == "" { - docURL = s.DocURL - } } p := &Profile{ @@ -513,7 +449,6 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, - DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil @@ -544,131 +479,3 @@ func (p *Profile) compatible(pb *Profile) error { func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } - -// locationIDMap is like a map[uint64]*Location, but provides efficiency for -// ids that are densely numbered, which is often the case. -type locationIDMap struct { - dense []*Location // indexed by id for id < len(dense) - sparse map[uint64]*Location // indexed by id for id >= len(dense) -} - -func makeLocationIDMap(n int) locationIDMap { - return locationIDMap{ - dense: make([]*Location, n), - sparse: map[uint64]*Location{}, - } -} - -func (lm locationIDMap) get(id uint64) *Location { - if id < uint64(len(lm.dense)) { - return lm.dense[int(id)] - } - return lm.sparse[id] -} - -func (lm locationIDMap) set(id uint64, loc *Location) { - if id < uint64(len(lm.dense)) { - lm.dense[id] = loc - return - } - lm.sparse[id] = loc -} - -// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It -// keeps sample types that appear in all profiles only and drops/reorders the -// sample types as necessary. -// -// In the case of sample types order is not the same for given profiles the -// order is derived from the first profile. -// -// Profiles are modified in-place. -// -// It returns an error if the sample type's intersection is empty. -func CompatibilizeSampleTypes(ps []*Profile) error { - sTypes := commonSampleTypes(ps) - if len(sTypes) == 0 { - return fmt.Errorf("profiles have empty common sample type list") - } - for _, p := range ps { - if err := compatibilizeSampleTypes(p, sTypes); err != nil { - return err - } - } - return nil -} - -// commonSampleTypes returns sample types that appear in all profiles in the -// order how they ordered in the first profile. -func commonSampleTypes(ps []*Profile) []string { - if len(ps) == 0 { - return nil - } - sTypes := map[string]int{} - for _, p := range ps { - for _, st := range p.SampleType { - sTypes[st.Type]++ - } - } - var res []string - for _, st := range ps[0].SampleType { - if sTypes[st.Type] == len(ps) { - res = append(res, st.Type) - } - } - return res -} - -// compatibilizeSampleTypes drops sample types that are not present in sTypes -// list and reorder them if needed. -// -// It sets DefaultSampleType to sType[0] if it is not in sType list. -// -// It assumes that all sample types from the sTypes list are present in the -// given profile otherwise it returns an error. -func compatibilizeSampleTypes(p *Profile, sTypes []string) error { - if len(sTypes) == 0 { - return fmt.Errorf("sample type list is empty") - } - defaultSampleType := sTypes[0] - reMap, needToModify := make([]int, len(sTypes)), false - for i, st := range sTypes { - if st == p.DefaultSampleType { - defaultSampleType = p.DefaultSampleType - } - idx := searchValueType(p.SampleType, st) - if idx < 0 { - return fmt.Errorf("%q sample type is not found in profile", st) - } - reMap[i] = idx - if idx != i { - needToModify = true - } - } - if !needToModify && len(sTypes) == len(p.SampleType) { - return nil - } - p.DefaultSampleType = defaultSampleType - oldSampleTypes := p.SampleType - p.SampleType = make([]*ValueType, len(sTypes)) - for i, idx := range reMap { - p.SampleType[i] = oldSampleTypes[idx] - } - values := make([]int64, len(sTypes)) - for _, s := range p.Sample { - for i, idx := range reMap { - values[i] = s.Value[idx] - } - s.Value = s.Value[:len(values)] - copy(s.Value, values) - } - return nil -} - -func searchValueType(vts []*ValueType, s string) int { - for i, vt := range vts { - if vt.Type == s { - return i - } - } - return -1 -} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index f47a2439..2590c8dd 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -21,6 +21,7 @@ import ( "compress/gzip" "fmt" "io" + "io/ioutil" "math" "path/filepath" "regexp" @@ -39,7 +40,6 @@ type Profile struct { Location []*Location Function []*Function Comments []string - DocURL string DropFrames string KeepFrames string @@ -54,7 +54,6 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 - docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -74,23 +73,9 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - // Label is a per-label-key map to values for string labels. - // - // In general, having multiple values for the given label key is strongly - // discouraged - see docs for the sample label field in profile.proto. The - // main reason this unlikely state is tracked here is to make the - // decoding->encoding roundtrip not lossy. But we expect that the value - // slices present in this map are always of length 1. - Label map[string][]string - // NumLabel is a per-label-key map to values for numeric labels. See a note - // above on handling multiple values for a label. + Label map[string][]string NumLabel map[string][]int64 - // NumUnit is a per-label-key map to the unit names of corresponding numeric - // label values. The unit info may be missing even if the label is in - // NumLabel, see the docs in profile.proto for details. When the value is - // slice is present and not nil, its length must be equal to the length of - // the corresponding value slice in NumLabel. - NumUnit map[string][]string + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -121,15 +106,6 @@ type Mapping struct { fileX int64 buildIDX int64 - - // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. - // For linux kernel mappings generated by some tools, correct symbolization depends - // on knowing which of the two possible relocation symbols was used for `Start`. - // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). - // - // Note, this public field is not persisted in the proto. For the purposes of - // copying / merging / hashing profiles, it is considered subsumed by `File`. - KernelRelocationSymbol string } // Location corresponds to Profile.Location @@ -147,7 +123,6 @@ type Location struct { type Line struct { Function *Function Line int64 - Column int64 functionIDX uint64 } @@ -169,7 +144,7 @@ type Function struct { // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { - data, err := io.ReadAll(r) + data, err := ioutil.ReadAll(r) if err != nil { return nil, err } @@ -184,7 +159,7 @@ func ParseData(data []byte) (*Profile, error) { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { - data, err = io.ReadAll(gz) + data, err = ioutil.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) @@ -439,7 +414,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -461,7 +436,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnn } // Aggregate locations - if !inlineFrame || !address || !linenumber || !columnnumber { + if !inlineFrame || !address || !linenumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -469,12 +444,6 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnn if !linenumber { for i := range l.Line { l.Line[i].Line = 0 - l.Line[i].Column = 0 - } - } - if !columnnumber { - for i := range l.Line { - l.Line[i].Column = 0 } } if !address { @@ -557,9 +526,6 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } - if url := p.DocURL; url != "" { - ss = append(ss, fmt.Sprintf("Doc: %s", url)) - } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -639,11 +605,10 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, - l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" @@ -742,35 +707,6 @@ func (s *Sample) HasLabel(key, value string) bool { return false } -// SetNumLabel sets the specified key to the specified value for all samples in the -// profile. "unit" is a slice that describes the units that each corresponding member -// of "values" is measured in (e.g. bytes or seconds). If there is no relevant -// unit for a given value, that member of "unit" should be the empty string. -// "unit" must either have the same length as "value", or be nil. -func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { - for _, sample := range p.Sample { - if sample.NumLabel == nil { - sample.NumLabel = map[string][]int64{key: value} - } else { - sample.NumLabel[key] = value - } - if sample.NumUnit == nil { - sample.NumUnit = map[string][]string{key: unit} - } else { - sample.NumUnit[key] = unit - } - } -} - -// RemoveNumLabel removes all numerical labels associated with the specified key for all -// samples in the profile. -func (p *Profile) RemoveNumLabel(key string) { - for _, sample := range p.Sample { - delete(sample.NumLabel, key) - delete(sample.NumUnit, key) - } -} - // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { @@ -849,10 +785,10 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", "[vsyscall]" and some others, see the code. +// "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index a15696ba..539ad3ab 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -39,12 +39,11 @@ import ( ) type buffer struct { - field int // field tag - typ int // proto wire type code for field - u64 uint64 - data []byte - tmp [16]byte - tmpLines []Line // temporary storage used while decoding "repeated Line". + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte } type decoder func(*buffer, message) error @@ -287,6 +286,7 @@ func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -294,8 +294,9 @@ func decodeInt64s(b *buffer, x *[]int64) error { if u, data, err = decodeVarint(data); err != nil { return err } - *x = append(*x, int64(u)) + tmp = append(tmp, int64(u)) } + *x = append(*x, tmp...) return nil } var i int64 @@ -318,6 +319,7 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -325,8 +327,9 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if u, data, err = decodeVarint(data); err != nil { return err } - *x = append(*x, u) + tmp = append(tmp, u) } + *x = append(*x, tmp...) return nil } var u uint64 diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index b2f9fd54..02d21a81 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -62,31 +62,15 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) - // simplifyFunc can be expensive, so cache results. - // Note that the same function name can be encountered many times due - // different lines and addresses in the same function. - pruneCache := map[string]bool{} // Map from function to whether or not to prune - pruneFromHere := func(s string) bool { - if r, ok := pruneCache[s]; ok { - return r - } - funcName := simplifyFunc(s) - if dropRx.MatchString(funcName) { - if keepRx == nil || !keepRx.MatchString(funcName) { - pruneCache[s] = true - return true - } - } - pruneCache[s] = false - return false - } - for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - if pruneFromHere(fn.Name) { - break + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } } } } diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE index 0e5d580e..be2cc4df 100644 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -1,5 +1,3 @@ -Copyright (c) 2014 HashiCorp, Inc. - Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index 9233583c..5673773b 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -25,7 +25,7 @@ type entry struct { // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { - return nil, errors.New("must provide a positive size") + return nil, errors.New("Must provide a positive size") } c := &LRU{ size: size, @@ -73,9 +73,6 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) - if ent.Value.(*entry) == nil { - return nil, false - } return ent.Value.(*entry).value, true } return @@ -109,7 +106,7 @@ func (c *LRU) Remove(key interface{}) (present bool) { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) @@ -120,7 +117,7 @@ func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key, value interface{}, ok bool) { +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) @@ -145,19 +142,6 @@ func (c *LRU) Len() int { return c.evictList.Len() } -// Resize changes the cache size. -func (c *LRU) Resize(size int) (evicted int) { - diff := c.Len() - size - if diff < 0 { - diff = 0 - } - for i := 0; i < diff; i++ { - c.removeOldest() - } - c.size = size - return diff -} - // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index cb7f8caf..74c70774 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -1,4 +1,3 @@ -// Package simplelru provides simple LRU implementation based on build-in container/list. package simplelru // LRUCache is the interface for simple LRU cache. @@ -11,7 +10,7 @@ type LRUCache interface { // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) - // Checks if a key exists in cache without updating the recent-ness. + // Check if a key exsists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. @@ -32,9 +31,6 @@ type LRUCache interface { // Returns the number of items in the cache. Len() int - // Clears all cache entries. + // Clear all cache entries Purge() - - // Resizes cache, returning number evicted - Resize(int) int } diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 8d5a2a47..9831c37b 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -85,6 +85,7 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/wintbiit/NineDNS * https://linuxcontainers.org/incus/ * https://ifconfig.es +* https://github.com/zmap/zdns Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 1be87eae..ffdafceb 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -250,14 +250,6 @@ func (d *DS) ToCDS() *CDS { // zero, it is used as-is, otherwise the TTL of the RRset is used as the // OrigTTL. func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - h0 := rrset[0].Header() rr.Hdr.Rrtype = TypeRRSIG rr.Hdr.Name = h0.Name @@ -272,6 +264,18 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { rr.Labels-- // wildcard, remove from label count } + return rr.signAsIs(k, rrset) +} + +func (rr *RRSIG) signAsIs(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + sigwire := new(rrsigWireFmt) sigwire.TypeCovered = rr.TypeCovered sigwire.Algorithm = rr.Algorithm @@ -370,9 +374,12 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { if rr.Algorithm != k.Algorithm { return ErrKey } - if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { + + signerName := CanonicalName(rr.SignerName) + if !equal(signerName, k.Hdr.Name) { return ErrKey } + if k.Protocol != 3 { return ErrKey } @@ -384,9 +391,18 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { } // IsRRset checked that we have at least one RR and that the RRs in - // the set have consistent type, class, and name. Also check that type and - // class matches the RRSIG record. - if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered { + // the set have consistent type, class, and name. Also check that type, + // class and name matches the RRSIG record. + // Also checks RFC 4035 5.3.1 the number of labels in the RRset owner + // name MUST be greater than or equal to the value in the RRSIG RR's Labels field. + // RFC 4035 5.3.1 Signer's Name MUST be the name of the zone that [contains the RRset]. + // Since we don't have SOA info, checking suffix may be the best we can do...? + if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || + h0.Rrtype != rr.TypeCovered || + uint8(CountLabel(h0.Name)) < rr.Labels || + !equal(h0.Name, rr.Hdr.Name) || + !strings.HasSuffix(CanonicalName(h0.Name), signerName) { + return ErrRRset } @@ -400,7 +416,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { sigwire.Expiration = rr.Expiration sigwire.Inception = rr.Inception sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = CanonicalName(rr.SignerName) + sigwire.SignerName = signerName // Create the desired binary blob signeddata := make([]byte, DefaultMsgSize) n, err := packSigWire(sigwire, signeddata) diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index c1bbdaae..0447fd82 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -58,7 +58,7 @@ func makeDataOpt(code uint16) EDNS0 { case EDNS0EDE: return new(EDNS0_EDE) case EDNS0ESU: - return &EDNS0_ESU{Code: EDNS0ESU} + return new(EDNS0_ESU) default: e := new(EDNS0_LOCAL) e.Code = code @@ -66,8 +66,7 @@ func makeDataOpt(code uint16) EDNS0 { } } -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. See RFC 6891. type OPT struct { Hdr RR_Header Option []EDNS0 `dns:"opt"` @@ -144,8 +143,6 @@ func (*OPT) parse(c *zlexer, origin string) *ParseError { func (rr *OPT) isDuplicate(r2 RR) bool { return false } -// return the old value -> delete SetVersion? - // Version returns the EDNS version used. Only zero is defined. func (rr *OPT) Version() uint8 { return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) @@ -236,8 +233,8 @@ type EDNS0 interface { // e.Nsid = "AA" // o.Option = append(o.Option, e) type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded + Code uint16 // always EDNS0NSID + Nsid string // string needs to be hex encoded } func (e *EDNS0_NSID) pack() ([]byte, error) { @@ -275,7 +272,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} // When packing it will apply SourceNetmask. If you need more advanced logic, // patches welcome and good luck. type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET + Code uint16 // always EDNS0SUBNET Family uint16 // 1 for IP, 2 for IP6 SourceNetmask uint8 SourceScope uint8 @@ -399,8 +396,8 @@ func (e *EDNS0_SUBNET) copy() EDNS0 { // // There is no guarantee that the Cookie string has a specific length. type EDNS0_COOKIE struct { - Code uint16 // Always EDNS0COOKIE - Cookie string // Hex-encoded cookie data + Code uint16 // always EDNS0COOKIE + Cookie string // hex encoded cookie data } func (e *EDNS0_COOKIE) pack() ([]byte, error) { @@ -430,7 +427,7 @@ func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.C // e.Lease = 120 // in seconds // o.Option = append(o.Option, e) type EDNS0_UL struct { - Code uint16 // Always EDNS0UL + Code uint16 // always EDNS0UL Lease uint32 KeyLease uint32 } @@ -469,7 +466,7 @@ func (e *EDNS0_UL) unpack(b []byte) error { // EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 // Implemented for completeness, as the EDNS0 type code is assigned. type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ + Code uint16 // always EDNS0LLQ Version uint16 Opcode uint16 Error uint16 @@ -515,7 +512,7 @@ func (e *EDNS0_LLQ) copy() EDNS0 { // EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU + Code uint16 // always EDNS0DAU AlgCode []uint8 } @@ -539,7 +536,7 @@ func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} } // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU + Code uint16 // always EDNS0DHU AlgCode []uint8 } @@ -563,7 +560,7 @@ func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} } // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U + Code uint16 // always EDNS0N3U AlgCode []uint8 } @@ -588,7 +585,7 @@ func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } // EDNS0_EXPIRE implements the EDNS0 option as described in RFC 7314. type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE + Code uint16 // always EDNS0EXPIRE Expire uint32 Empty bool // Empty is used to signal an empty Expire option in a backwards compatible way, it's not used on the wire. } @@ -668,7 +665,7 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error { // EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep // the TCP connection alive. See RFC 7828. type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE + Code uint16 // always EDNSTCPKEEPALIVE // Timeout is an idle timeout value for the TCP connection, specified in // units of 100 milliseconds, encoded in network byte order. If set to 0, @@ -839,13 +836,12 @@ func (e *EDNS0_EDE) unpack(b []byte) error { return nil } -// The EDNS0_ESU option for ENUM Source-URI Extension +// The EDNS0_ESU option for ENUM Source-URI Extension. type EDNS0_ESU struct { - Code uint16 + Code uint16 // always EDNS0ESU Uri string } -// Option implements the EDNS0 interface. func (e *EDNS0_ESU) Option() uint16 { return EDNS0ESU } func (e *EDNS0_ESU) String() string { return e.Uri } func (e *EDNS0_ESU) copy() EDNS0 { return &EDNS0_ESU{e.Code, e.Uri} } diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_socket_options.go similarity index 61% rename from vendor/github.com/miekg/dns/listen_no_reuseport.go rename to vendor/github.com/miekg/dns/listen_no_socket_options.go index 8cebb2f1..9e4010bd 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_socket_options.go @@ -3,9 +3,15 @@ package dns -import "net" +import ( + "fmt" + "net" +) -const supportsReusePort = false +const ( + supportsReusePort = false + supportsReuseAddr = false +) func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { if reuseport || reuseaddr { @@ -15,8 +21,6 @@ func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, e return net.Listen(network, addr) } -const supportsReuseAddr = false - func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { if reuseport || reuseaddr { // TODO(tmthrgd): return an error? @@ -24,3 +28,13 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, return net.ListenPacket(network, addr) } + +// this is just for test compatibility +func checkReuseport(fd uintptr) (bool, error) { + return false, fmt.Errorf("not supported") +} + +// this is just for test compatibility +func checkReuseaddr(fd uintptr) (bool, error) { + return false, fmt.Errorf("not supported") +} diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_socket_options.go similarity index 66% rename from vendor/github.com/miekg/dns/listen_reuseport.go rename to vendor/github.com/miekg/dns/listen_socket_options.go index 41326f20..35dfc949 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_socket_options.go @@ -39,10 +39,40 @@ func reuseaddrControl(network, address string, c syscall.RawConn) error { return opErr } +func reuseaddrandportControl(network, address string, c syscall.RawConn) error { + err := reuseaddrControl(network, address, c) + if err != nil { + return err + } + + return reuseportControl(network, address, c) +} + +// this is just for test compatibility +func checkReuseport(fd uintptr) (bool, error) { + v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT) + if err != nil { + return false, err + } + + return v == 1, nil +} + +// this is just for test compatibility +func checkReuseaddr(fd uintptr) (bool, error) { + v, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR) + if err != nil { + return false, err + } + + return v == 1, nil +} + func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { var lc net.ListenConfig switch { case reuseaddr && reuseport: + lc.Control = reuseaddrandportControl case reuseport: lc.Control = reuseportControl case reuseaddr: @@ -56,6 +86,7 @@ func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, var lc net.ListenConfig switch { case reuseaddr && reuseport: + lc.Control = reuseaddrandportControl case reuseport: lc.Control = reuseportControl case reuseaddr: diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 81580d1e..b04d370f 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -226,6 +226,7 @@ type Server struct { // If NotifyStartedFunc is set it is called once the server has started listening. NotifyStartedFunc func() // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + // The decorated reader must not mutate the data read from the conn. DecorateReader DecorateReader // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. DecorateWriter DecorateWriter diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go index 2c4b1035..057bb578 100644 --- a/vendor/github.com/miekg/dns/sig0.go +++ b/vendor/github.com/miekg/dns/sig0.go @@ -7,7 +7,6 @@ import ( "crypto/rsa" "encoding/binary" "math/big" - "strings" "time" ) @@ -151,7 +150,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error { } // If key has come from the DNS name compression might // have mangled the case of the name - if !strings.EqualFold(signername, k.Header().Name) { + if !equal(signername, k.Header().Name) { return &Error{err: "signer name doesn't match key name"} } sigend := offset diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 00c8629f..e290e3df 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 62} +var Version = v{1, 1, 63} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 4d574911..743555dd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -82,10 +82,6 @@ func New(colorMode ColorMode) Formatter { return fmt.Sprintf("\x1b[38;5;%dm", colorCode) } - if _, noColor := os.LookupEnv("GINKGO_NO_COLOR"); noColor { - colorMode = ColorModeNone - } - f := Formatter{ ColorMode: colorMode, colors: map[string]string{ diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index fd172608..5db5d1a7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -2,8 +2,6 @@ package build import ( "fmt" - "os" - "path" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" @@ -55,18 +53,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go if suite.State.Is(internal.TestSuiteStateFailedToCompile) { fmt.Println(suite.CompilationError.Error()) } else { - if len(goFlagsConfig.O) == 0 { - goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test") - } else { - stat, err := os.Stat(goFlagsConfig.O) - if err != nil { - panic(err) - } - if stat.IsDir() { - goFlagsConfig.O += "/" + suite.PackageName + ".test" - } - } - fmt.Printf("Compiled %s\n", goFlagsConfig.O) + fmt.Printf("Compiled %s.test\n", suite.PackageName) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index b2dc59be..73aff0b7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig/v3" + sprig "github.com/go-task/slim-sprig" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index cf3b7cb6..48d23f91 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig/v3" + sprig "github.com/go-task/slim-sprig" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -32,9 +32,6 @@ func BuildGenerateCommand() command.Command { {Name: "template-data", KeyPath: "CustomTemplateData", UsageArgument: "template-data-file", Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, - {Name: "tags", KeyPath: "Tags", - UsageArgument: "build-tags", - Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, }, &conf, types.GinkgoFlagSections{}, @@ -62,7 +59,6 @@ You can also pass a of the form "file.go" and generate will emit "fil } type specData struct { - BuildTags string Package string Subject string PackageImportPath string @@ -97,7 +93,6 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) { } data := specData{ - BuildTags: getBuildTags(conf.Tags), Package: determinePackageName(packageName, conf.Internal), Subject: formattedName, PackageImportPath: getPackageImportPath(), @@ -174,7 +169,6 @@ func moduleName(modRoot string) string { if err != nil { return "" } - defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go index 4dab07d0..c3470adb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -1,7 +1,6 @@ package generators -var specText = `{{.BuildTags}} -package {{.Package}} +var specText = `package {{.Package}} import ( {{.GinkgoImport}} @@ -15,8 +14,7 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { }) ` -var agoutiSpecText = `{{.BuildTags}} -package {{.Package}} +var agoutiSpecText = `package {{.Package}} import ( {{.GinkgoImport}} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go index 28c7aa6f..3046a448 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -1,7 +1,6 @@ package generators import ( - "fmt" "go/build" "os" "path/filepath" @@ -15,7 +14,6 @@ type GeneratorsConfig struct { Agouti, NoDot, Internal bool CustomTemplate string CustomTemplateData string - Tags string } func getPackageAndFormattedName() (string, string, string) { @@ -64,13 +62,3 @@ func determinePackageName(name string, internal bool) string { return name + "_test" } - -// getBuildTags returns the resultant string to be added. -// If the input string is not empty, then returns a `//go:build {}` string, -// otherwise returns an empty string. -func getBuildTags(tags string) string { - if tags != "" { - return fmt.Sprintf("//go:build %s\n", tags) - } - return "" -} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 48827cc5..86da7340 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,18 +25,6 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } - if len(goFlagsConfig.O) > 0 { - userDefinedPath, err := filepath.Abs(goFlagsConfig.O) - if err != nil { - suite.State = TestSuiteStateFailedToCompile - suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) - return suite - } - path = userDefinedPath - } - - goFlagsConfig.O = path - ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -46,7 +34,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go deleted file mode 100644 index 3c5079ff..00000000 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2015, Wade Simmons -// All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: - -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. - -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package gocovmerge takes the results from multiple `go test -coverprofile` -// runs and merges them into one profile - -// this file was originally taken from the gocovmerge project -// see also: https://go.shabbyrobe.org/gocovmerge -package internal - -import ( - "fmt" - "io" - "sort" - - "golang.org/x/tools/cover" -) - -func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { - i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) - if i < len(profiles) && profiles[i].FileName == p.FileName { - MergeCoverProfiles(profiles[i], p) - } else { - profiles = append(profiles, nil) - copy(profiles[i+1:], profiles[i:]) - profiles[i] = p - } - return profiles -} - -func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { - if len(profiles) == 0 { - return nil - } - if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { - return err - } - for _, p := range profiles { - for _, b := range p.Blocks { - if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { - return err - } - } - } - return nil -} - -func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { - if into.Mode != merge.Mode { - return fmt.Errorf("cannot merge profiles with different modes") - } - // Since the blocks are sorted, we can keep track of where the last block - // was inserted and only look at the blocks after that as targets for merge - startIndex := 0 - for _, b := range merge.Blocks { - var err error - startIndex, err = mergeProfileBlock(into, b, startIndex) - if err != nil { - return err - } - } - return nil -} - -func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { - sortFunc := func(i int) bool { - pi := p.Blocks[i+startIndex] - return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) - } - - i := 0 - if sortFunc(i) != true { - i = sort.Search(len(p.Blocks)-startIndex, sortFunc) - } - - i += startIndex - if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { - if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { - return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) - } - switch p.Mode { - case "set": - p.Blocks[i].Count |= pb.Count - case "count", "atomic": - p.Blocks[i].Count += pb.Count - default: - return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) - } - - } else { - if i > 0 { - pa := p.Blocks[i-1] - if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { - return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) - } - } - if i < len(p.Blocks)-1 { - pa := p.Blocks[i+1] - if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { - return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) - } - } - p.Blocks = append(p.Blocks, cover.ProfileBlock{}) - copy(p.Blocks[i+1:], p.Blocks[i:]) - p.Blocks[i] = pb - } - - return i + 1, nil -} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 8e16d2bb..bd3c6d02 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,6 +1,7 @@ package internal import ( + "bytes" "fmt" "os" "os/exec" @@ -11,7 +12,6 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" - "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,27 +144,38 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -// loads each profile, merges them, deletes them, stores them in destination +//loads each profile, combines them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - var merged []*cover.Profile - for _, file := range profiles { - parsedProfiles, err := cover.ParseProfiles(file) + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) if err != nil { - return err + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) } - os.Remove(file) - for _, p := range parsedProfiles { - merged = AddCoverProfile(merged, p) + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) } } - dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + + err := os.WriteFile(destination, combined.Bytes(), 0666) if err != nil { - return err - } - defer dst.Close() - err = DumpCoverProfiles(merged, dst) - if err != nil { - return err + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) } return nil } @@ -173,7 +184,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) @@ -197,7 +208,6 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) - _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go index df99875b..64dcb1b7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -7,7 +7,6 @@ import ( "path" "path/filepath" "regexp" - "runtime" "strings" "github.com/onsi/ginkgo/v2/types" @@ -193,7 +192,7 @@ func precompiledTestSuite(path string) (TestSuite, error) { return TestSuite{}, errors.New("this is not a .test binary") } - if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { return TestSuite{}, errors.New("this is not executable") } @@ -226,7 +225,7 @@ func suitesInDir(dir string, recurse bool) TestSuites { files, _ := os.ReadDir(dir) re := regexp.MustCompile(`^[^._].*_test\.go$`) for _, file := range files { - if !file.IsDir() && re.MatchString(file.Name()) { + if !file.IsDir() && re.Match([]byte(file.Name())) { suite := TestSuite{ Path: relPath(dir), PackageName: packageNameForSuite(dir), @@ -241,7 +240,7 @@ func suitesInDir(dir string, recurse bool) TestSuites { if recurse { re = regexp.MustCompile(`^[._]`) for _, file := range files { - if file.IsDir() && !re.MatchString(file.Name()) { + if file.IsDir() && !re.Match([]byte(file.Name())) { suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) } } @@ -272,7 +271,7 @@ func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) for _, file := range files { - if !file.IsDir() && reTestFile.MatchString(file.Name()) { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { contents, _ := os.ReadFile(dir + "/" + file.Name()) if reGinkgo.Match(contents) { return true diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 5d8d00bb..0b9b19fe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,11 +1,10 @@ package outline import ( + "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" - - "github.com/onsi/ginkgo/v2/types" ) const ( @@ -245,7 +244,9 @@ func labelFromCallExpr(ce *ast.CallExpr) []string { } if id.Name == "Label" { ls := extractLabels(expr) - labels = append(labels, ls...) + for _, label := range ls { + labels = append(labels, label) + } } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index f0a6b5d2..67ec5ab7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,7 +28,14 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - name = "ginkgo" + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } } if name == "." { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go index e99d557d..c2327cda 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -1,13 +1,10 @@ package outline import ( - "bytes" - "encoding/csv" "encoding/json" "fmt" "go/ast" "go/token" - "strconv" "strings" "golang.org/x/tools/go/ast/inspector" @@ -87,11 +84,9 @@ func (o *outline) String() string { // StringIndent returns a CSV-formated outline, but every line is indented by // one 'width' of spaces for every level of nesting. func (o *outline) StringIndent(width int) string { - var b bytes.Buffer + var b strings.Builder b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") - csvWriter := csv.NewWriter(&b) - currentIndent := 0 pre := func(n *ginkgoNode) { b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) @@ -101,22 +96,8 @@ func (o *outline) StringIndent(width int) string { } else { labels = strings.Join(n.Labels, ", ") } - - row := []string{ - n.Name, - n.Text, - strconv.Itoa(n.Start), - strconv.Itoa(n.End), - strconv.FormatBool(n.Spec), - strconv.FormatBool(n.Focused), - strconv.FormatBool(n.Pending), - labels, - } - csvWriter.Write(row) - - // Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation - csvWriter.Flush() - + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) currentIndent += width } post := func(n *ginkgoNode) { @@ -125,6 +106,5 @@ func (o *outline) StringIndent(width int) string { for _, n := range o.Nodes { n.Walk(pre, post) } - return b.String() } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d9435..f5ddff30 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -78,7 +78,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { d.addDepIfNotPresent(pkg.Dir, depth) } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 0e6ae1f2..e9f7ec0c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,7 +4,6 @@ import ( "fmt" "os" "regexp" - "strings" "time" ) @@ -80,11 +79,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } - if isHiddenFile(info) { - continue - } - - if goTestRegExp.MatchString(info.Name()) { + if goTestRegExp.Match([]byte(info.Name())) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { testModifiedTime = info.ModTime() @@ -92,7 +87,7 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } - if p.watchRegExp.MatchString(info.Name()) { + if p.watchRegExp.Match([]byte(info.Name())) { codeHash += p.hashForFileInfo(info) if info.ModTime().After(codeModifiedTime) { codeModifiedTime = info.ModTime() @@ -108,10 +103,6 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } -func isHiddenFile(info os.FileInfo) bool { - return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") -} - func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 48073048..56b7be75 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,31 +182,10 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } -func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { - r.emitBlock("\n") - if r.conf.GithubOutput { - r.emitBlock(r.fi(1, "::group::%s", sectionName)) - } else { - r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) - } - fn() - if r.conf.GithubOutput { - r.emitBlock(r.fi(1, "::endgroup::")) - } else { - r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) - } - -} - func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel - //should we completely omit this spec? - if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { - return - } - header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -283,12 +262,9 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, just emit the header and return + // If we have no content to show, jsut emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) - if r.conf.ForceNewlines { - r.emit("\n") - } return } @@ -307,23 +283,26 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.wrapTextBlock("Captured StdOut/StdErr Output", func() { - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - }) + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) } if showSeparateVisibilityAlwaysReportsSection { - r.wrapTextBlock("Report Entries", func() { - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - }) + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) } if showTimeline { - r.wrapTextBlock("Timeline", func() { - r.emitTimeline(1, report, timeline) - }) + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) } // Emit Failure Message @@ -426,15 +405,7 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - if r.conf.GithubOutput { - level := "error" - if state.Is(types.SpecStateSkipped) { - level = "notice" - } - r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) - } else { - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) - } + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go index 5d3e8db9..7f96c450 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -4,21 +4,16 @@ import ( "encoding/json" "fmt" "os" - "path" "github.com/onsi/ginkgo/v2/types" ) -// GenerateJSONReport produces a JSON-formatted report at the passed in destination +//GenerateJSONReport produces a JSON-formatted report at the passed in destination func GenerateJSONReport(report types.Report, destination string) error { - if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { - return err - } f, err := os.Create(destination) if err != nil { return err } - defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode([]types.Report{ @@ -27,11 +22,11 @@ func GenerateJSONReport(report types.Report, destination string) error { if err != nil { return err } - return nil + return f.Close() } -// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources -// It skips over reports that fail to decode but reports on them via the returned messages []string +//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +//It skips over reports that fail to decode but reports on them via the returned messages []string func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { messages := []string{} allReports := []types.Report{} @@ -51,19 +46,15 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, allReports = append(allReports, reports...) } - if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { - return messages, err - } f, err := os.Create(destination) if err != nil { return messages, err } - defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode(allReports) if err != nil { return messages, err } - return messages, nil + return messages, f.Close() } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62..592d7f61 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -14,8 +14,6 @@ import ( "encoding/xml" "fmt" "os" - "path" - "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -105,8 +103,6 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } -var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) - type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -116,8 +112,6 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` - // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. - Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -177,7 +171,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, - {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -201,12 +194,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } - owner := "" - for _, label := range labels { - if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { - owner = matches[1] - } - } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -214,7 +201,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), - Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) @@ -299,9 +285,6 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit TestSuites: []JUnitTestSuite{suite}, } - if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { - return err - } f, err := os.Create(dst) if err != nil { return err @@ -325,7 +308,6 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) - _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue @@ -340,9 +322,6 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) } - if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { - return messages, err - } f, err := os.Create(dst) if err != nil { return messages, err diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82..c1863496 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -11,7 +11,6 @@ package reporters import ( "fmt" "os" - "path" "strings" "github.com/onsi/ginkgo/v2/types" @@ -28,9 +27,6 @@ func tcEscape(s string) string { } func GenerateTeamcityReport(report types.Report, dst string) error { - if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { - return err - } f, err := os.Create(dst) if err != nil { return err diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index 57e87517..9cd57681 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -149,7 +149,7 @@ func PruneStack(fullStackTrace string, skip int) string { re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) for i := 0; i < len(stack)/2; i++ { // We filter out based on the source code file name. - if !re.MatchString(stack[i*2+1]) { + if !re.Match([]byte(stack[i*2+1])) { prunedStack = append(prunedStack, stack[i*2]) prunedStack = append(prunedStack, stack[i*2+1]) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 8c0dfab8..1014c7b4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,10 +25,8 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool - FailOnEmpty bool FailFast bool FlakeAttempts int - MustPassRepeatedly int DryRun bool PollProgressAfter time.Duration PollProgressInterval time.Duration @@ -90,9 +88,6 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool - GithubOutput bool - SilenceSkips bool - ForceNewlines bool JSONReport string JUnitReport string @@ -202,7 +197,6 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string - BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -220,7 +214,6 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool - O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -270,7 +263,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, + Usage: "The seed used to randomize the spec suite."}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -280,8 +273,6 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, - {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", - Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -328,7 +319,7 @@ var ParallelConfigFlags = GinkgoFlags{ // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", - Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, + Usage: "If set, suppress color output in default reporter."}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -339,12 +330,6 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, - {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", - Usage: "If set, default reporter prints easier to manage output in Github Actions."}, - {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", - Usage: "If set, default reporter will not print out skipped tests."}, - {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", - Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -513,7 +498,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, + Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", @@ -529,8 +514,6 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, - {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", - Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", @@ -565,8 +548,6 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, - {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", - Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI @@ -620,7 +601,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -643,7 +624,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", packageToBuild} + args := []string{"test", "-c", "-o", destination, packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, map[string]interface{}{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 6bb72d00..1e0dbfd9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { return GinkgoError{ - Heading: "No parameters have been passed to the Table Function", - Message: "The Table Function expected at least 1 parameter", + Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), + Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), CodeLocation: cl, DocLink: "table-specs", } @@ -505,15 +505,6 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac } } -func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { - return GinkgoError{ - Heading: "Contexts cannot be used in subtree tables", - Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", - CodeLocation: cl, - DocLink: "table-specs", - } -} - /* Parallel Synchronization errors */ func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index de69f302..9186ae87 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,8 +24,7 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string - AlwaysExport bool + ExportAs string } type GinkgoFlags []GinkgoFlag @@ -432,7 +431,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -452,19 +451,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" || flag.AlwaysExport { + if iface.(string) != "" { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 || flag.AlwaysExport { + if iface.(int64) != 0 { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 || flag.AlwaysExport { + if iface.(float64) != 0 { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 || flag.AlwaysExport { + if iface.(int) != 0 { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -472,7 +471,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { + if iface.(time.Duration) != time.Duration(0) { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 7fdc8aa2..b0d3b651 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,83 +45,6 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } -func labelSetFor(key string, labels []string) map[string]bool { - key = strings.ToLower(strings.TrimSpace(key)) - out := map[string]bool{} - for _, label := range labels { - components := strings.SplitN(label, ":", 2) - if len(components) < 2 { - continue - } - if key == strings.ToLower(strings.TrimSpace(components[0])) { - out[strings.ToLower(strings.TrimSpace(components[1]))] = true - } - } - - return out -} - -func isEmptyLabelSetAction(key string) LabelFilter { - return func(labels []string) bool { - return len(labelSetFor(key, labels)) == 0 - } -} - -func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { - return func(labels []string) bool { - set := labelSetFor(key, labels) - for _, value := range expectedValues { - if set[value] { - return true - } - } - return false - } -} - -func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { - return func(labels []string) bool { - set := labelSetFor(key, labels) - for _, value := range expectedValues { - if !set[value] { - return false - } - } - return true - } -} - -func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { - return func(labels []string) bool { - set := labelSetFor(key, labels) - if len(set) != len(expectedValues) { - return false - } - for _, value := range expectedValues { - if !set[value] { - return false - } - } - return true - } -} - -func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { - expectedSet := map[string]bool{} - for _, value := range expectedValues { - expectedSet[value] = true - } - return func(labels []string) bool { - set := labelSetFor(key, labels) - for value := range set { - if !expectedSet[value] { - return false - } - } - return true - } -} - type lfToken uint const ( @@ -135,9 +58,6 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel - lfTokenSetKey - lfTokenSetOperation - lfTokenSetArgument lfTokenEOF ) @@ -151,8 +71,6 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 - case lfTokenSetOperation: - return 4 } return -1 } @@ -175,12 +93,6 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" - case lfTokenSetKey: - return "set_key" - case lfTokenSetOperation: - return "set_operation" - case lfTokenSetArgument: - return "set_argument" case lfTokenEOF: return "EOF" } @@ -236,35 +148,6 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil - case lfTokenSetOperation: - tokenSetOperation := strings.ToLower(tn.value) - if tokenSetOperation == "isempty" { - return isEmptyLabelSetAction(tn.leftNode.value), nil - } - if tn.rightNode == nil { - return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) - } - - rawValues := strings.Split(tn.rightNode.value, ",") - values := make([]string, len(rawValues)) - for i := range rawValues { - values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) - if strings.ContainsAny(values[i], "&|!,()/") { - return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) - } else if values[i] == "" { - return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") - } - } - switch tokenSetOperation { - case "containsany": - return containsAnyLabelSetAction(tn.leftNode.value, values), nil - case "containsall": - return containsAllLabelSetAction(tn.leftNode.value, values), nil - case "consistsof": - return consistsOfLabelSetAction(tn.leftNode.value, values), nil - case "issubsetof": - return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil - } } if tn.rightNode == nil { @@ -320,17 +203,7 @@ func (tn *treeNode) toString(indent int) string { return out } -var validSetOperations = map[string]string{ - "containsany": "containsAny", - "containsall": "containsAll", - "consistsof": "consistsOf", - "issubsetof": "isSubsetOf", - "isempty": "isEmpty", -} - func tokenize(input string) func() (*treeNode, error) { - lastToken := lfTokenInvalid - lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -360,53 +233,6 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} - defer func() { - lastToken = node.token - lastValue = node.value - }() - - if lastToken == lfTokenSetKey { - //we should get a valid set operation next - value, n := consumeUntil(" )") - if validSetOperations[strings.ToLower(value)] == "" { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) - } - i += n - node.token, node.value = lfTokenSetOperation, value - return node, nil - } - if lastToken == lfTokenSetOperation { - //we should get an argument next, if we aren't isempty - var arg = "" - origI := i - if runes[i] == '{' { - i += 1 - value, n := consumeUntil("}") - if i+n >= len(runes) { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") - } - i += n + 1 - arg = value - } else { - value, n := consumeUntil("&|!,()/") - i += n - arg = strings.TrimSpace(value) - } - if strings.ToLower(lastValue) == "isempty" && arg != "" { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) - } - if arg == "" && strings.ToLower(lastValue) != "isempty" { - if i < len(runes) && runes[i] == '/' { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") - } else { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) - } - } - // note that we sent an empty SetArgument token if we are isempty - node.token, node.value = lfTokenSetArgument, arg - return node, nil - } - switch runes[i] { case '&': if !peekIs('&') { @@ -438,38 +264,8 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/:") + value, n := consumeUntil("&|!,()/") i += n - value = strings.TrimSpace(value) - - //are we the beginning of a set operation? - if i < len(runes) && runes[i] == ':' { - if peekIs(' ') { - if value == "" { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") - } - i += 1 - //we are the beginning of a set operation - node.token, node.value = lfTokenSetKey, value - return node, nil - } - additionalValue, n := consumeUntil("&|!,()/") - additionalValue = strings.TrimSpace(additionalValue) - if additionalValue == ":" { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") - } - i += n - value += additionalValue - } - - valueToCheckForSetOperation := strings.ToLower(value) - for setOperation := range validSetOperations { - idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) - if idx > 0 { - return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) - } - } - node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -511,7 +307,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: + case lfTokenLabel, lfTokenRegexp: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -530,18 +326,6 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node - case lfTokenSetOperation: - if current.rightNode == nil { - return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) - } - node.setLeftNode(current.rightNode) - current.setRightNode(node) - current = node - case lfTokenSetArgument: - if current.rightNode != nil { - return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) - } - current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -570,14 +354,5 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } - if out[0] == ':' { - return "", GinkgoErrors.InvalidLabel(label, cl) - } - if strings.Contains(out, ":") { - components := strings.SplitN(out, ":", 2) - if len(components) < 2 || components[1] == "" { - return "", GinkgoErrors.InvalidLabel(label, cl) - } - } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1b..d048a8ad 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -3,21 +3,13 @@ package types import ( "encoding/json" "fmt" - "os" "sort" "strings" "time" ) const GINKGO_FOCUS_EXIT_CODE = 197 - -var GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" - -func init() { - if os.Getenv("GINKGO_TIME_FORMAT") != "" { - GINKGO_TIME_FORMAT = os.Getenv("GINKGO_TIME_FORMAT") - } -} +const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999" // Report captures information about a Ginkgo test run type Report struct { @@ -105,7 +97,9 @@ func (report Report) Add(other Report) Report { report.RunTime = report.EndTime.Sub(report.StartTime) reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) - copy(reports, report.SpecReports) + for i := range report.SpecReports { + reports[i] = report.SpecReports[i] + } offset := len(report.SpecReports) for i := range other.SpecReports { reports[i+offset] = other.SpecReports[i] diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 879e1d86..43066341 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.22.2" +const VERSION = "2.9.5" diff --git a/vendor/github.com/quic-go/quic-go/.golangci.yml b/vendor/github.com/quic-go/quic-go/.golangci.yml index 63b40cc3..1174b125 100644 --- a/vendor/github.com/quic-go/quic-go/.golangci.yml +++ b/vendor/github.com/quic-go/quic-go/.golangci.yml @@ -16,9 +16,9 @@ linters: disable-all: true enable: - asciicheck + - copyloopvar - depguard - exhaustive - - exportloopref - goimports - gofmt # redundant, since gofmt *should* be a no-op after gofumpt - gofumpt @@ -44,3 +44,8 @@ issues: linters: - exhaustive - prealloc + - unparam + - path: _test\.go + text: "SA1029:" + linters: + - staticcheck diff --git a/vendor/github.com/quic-go/quic-go/README.md b/vendor/github.com/quic-go/quic-go/README.md index 94823d99..ccc9e213 100644 --- a/vendor/github.com/quic-go/quic-go/README.md +++ b/vendor/github.com/quic-go/quic-go/README.md @@ -9,7 +9,8 @@ quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go. It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) and HTTP Datagrams ([RFC 9297](https://datatracker.ietf.org/doc/html/rfc9297)). -In addition to these base RFCs, it also implements the following RFCs: +In addition to these base RFCs, it also implements the following RFCs: + * Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) * Datagram Packetization Layer Path MTU Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)) * QUIC Version 2 ([RFC 9369](https://datatracker.ietf.org/doc/html/rfc9369)) @@ -33,6 +34,7 @@ Detailed documentation can be found on [quic-go.net](https://quic-go.net/docs/). | [Hysteria](https://github.com/apernet/hysteria) | A powerful, lightning fast and censorship resistant proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/apernet/hysteria?style=flat-square) | | [Mercure](https://github.com/dunglas/mercure) | An open, easy, fast, reliable and battery-efficient solution for real-time communications | ![GitHub Repo stars](https://img.shields.io/github/stars/dunglas/mercure?style=flat-square) | | [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | +| [reverst](https://github.com/flipt-io/reverst) | Reverse Tunnels in Go over HTTP/3 and QUIC | ![GitHub Repo stars](https://img.shields.io/github/stars/flipt-io/reverst?style=flat-square) | | [RoadRunner](https://github.com/roadrunner-server/roadrunner) | High-performance PHP application server, process manager written in Go and powered with plugins | ![GitHub Repo stars](https://img.shields.io/github/stars/roadrunner-server/roadrunner?style=flat-square) | | [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | | [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | diff --git a/vendor/github.com/quic-go/quic-go/client.go b/vendor/github.com/quic-go/quic-go/client.go index 1c5654f6..29a715cc 100644 --- a/vendor/github.com/quic-go/quic-go/client.go +++ b/vendor/github.com/quic-go/quic-go/client.go @@ -7,38 +7,8 @@ import ( "net" "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/utils" - "github.com/quic-go/quic-go/logging" ) -type client struct { - sendConn sendConn - - use0RTT bool - - packetHandlers packetHandlerManager - onClose func() - - tlsConf *tls.Config - config *Config - - connIDGenerator ConnectionIDGenerator - srcConnID protocol.ConnectionID - destConnID protocol.ConnectionID - - initialPacketNumber protocol.PacketNumber - hasNegotiatedVersion bool - version protocol.Version - - handshakeChan chan struct{} - - conn quicConn - - tracer *logging.ConnectionTracer - tracingID ConnectionTracingID - logger utils.Logger -} - // make it possible to mock connection ID for initial generation in the tests var generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial @@ -132,120 +102,3 @@ func setupTransport(c net.PacketConn, tlsConf *tls.Config, createdPacketConn boo isSingleUse: true, }, nil } - -func dial( - ctx context.Context, - conn sendConn, - connIDGenerator ConnectionIDGenerator, - packetHandlers packetHandlerManager, - tlsConf *tls.Config, - config *Config, - onClose func(), - use0RTT bool, -) (quicConn, error) { - c, err := newClient(conn, connIDGenerator, config, tlsConf, onClose, use0RTT) - if err != nil { - return nil, err - } - c.packetHandlers = packetHandlers - - c.tracingID = nextConnTracingID() - if c.config.Tracer != nil { - c.tracer = c.config.Tracer(context.WithValue(ctx, ConnectionTracingKey, c.tracingID), protocol.PerspectiveClient, c.destConnID) - } - if c.tracer != nil && c.tracer.StartedConnection != nil { - c.tracer.StartedConnection(c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID) - } - if err := c.dial(ctx); err != nil { - return nil, err - } - return c.conn, nil -} - -func newClient(sendConn sendConn, connIDGenerator ConnectionIDGenerator, config *Config, tlsConf *tls.Config, onClose func(), use0RTT bool) (*client, error) { - srcConnID, err := connIDGenerator.GenerateConnectionID() - if err != nil { - return nil, err - } - destConnID, err := generateConnectionIDForInitial() - if err != nil { - return nil, err - } - c := &client{ - connIDGenerator: connIDGenerator, - srcConnID: srcConnID, - destConnID: destConnID, - sendConn: sendConn, - use0RTT: use0RTT, - onClose: onClose, - tlsConf: tlsConf, - config: config, - version: config.Versions[0], - handshakeChan: make(chan struct{}), - logger: utils.DefaultLogger.WithPrefix("client"), - } - return c, nil -} - -func (c *client) dial(ctx context.Context) error { - c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version) - - c.conn = newClientConnection( - context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID), - c.sendConn, - c.packetHandlers, - c.destConnID, - c.srcConnID, - c.connIDGenerator, - c.config, - c.tlsConf, - c.initialPacketNumber, - c.use0RTT, - c.hasNegotiatedVersion, - c.tracer, - c.logger, - c.version, - ) - c.packetHandlers.Add(c.srcConnID, c.conn) - - errorChan := make(chan error, 1) - recreateChan := make(chan errCloseForRecreating) - go func() { - err := c.conn.run() - var recreateErr *errCloseForRecreating - if errors.As(err, &recreateErr) { - recreateChan <- *recreateErr - return - } - if c.onClose != nil { - c.onClose() - } - errorChan <- err // returns as soon as the connection is closed - }() - - // only set when we're using 0-RTT - // Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever. - var earlyConnChan <-chan struct{} - if c.use0RTT { - earlyConnChan = c.conn.earlyConnReady() - } - - select { - case <-ctx.Done(): - c.conn.destroy(nil) - return context.Cause(ctx) - case err := <-errorChan: - return err - case recreateErr := <-recreateChan: - c.initialPacketNumber = recreateErr.nextPacketNumber - c.version = recreateErr.nextVersion - c.hasNegotiatedVersion = true - return c.dial(ctx) - case <-earlyConnChan: - // ready to send 0-RTT data - return nil - case <-c.conn.HandshakeComplete(): - // handshake successfully completed - return nil - } -} diff --git a/vendor/github.com/quic-go/quic-go/codecov.yml b/vendor/github.com/quic-go/quic-go/codecov.yml index 59e4b58f..77e47fbe 100644 --- a/vendor/github.com/quic-go/quic-go/codecov.yml +++ b/vendor/github.com/quic-go/quic-go/codecov.yml @@ -6,6 +6,8 @@ coverage: - internal/handshake/cipher_suite.go - internal/utils/linkedlist/linkedlist.go - internal/testdata + - logging/connection_tracer_multiplexer.go + - logging/tracer_multiplexer.go - testutils/ - fuzzing/ - metrics/ diff --git a/vendor/github.com/quic-go/quic-go/conn_id_generator.go b/vendor/github.com/quic-go/quic-go/conn_id_generator.go index d7be6540..c309c2cd 100644 --- a/vendor/github.com/quic-go/quic-go/conn_id_generator.go +++ b/vendor/github.com/quic-go/quic-go/conn_id_generator.go @@ -15,19 +15,19 @@ type connIDGenerator struct { activeSrcConnIDs map[uint64]protocol.ConnectionID initialClientDestConnID *protocol.ConnectionID // nil for the client - addConnectionID func(protocol.ConnectionID) - getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken - removeConnectionID func(protocol.ConnectionID) - retireConnectionID func(protocol.ConnectionID) - replaceWithClosed func([]protocol.ConnectionID, []byte) - queueControlFrame func(wire.Frame) + addConnectionID func(protocol.ConnectionID) + statelessResetter *statelessResetter + removeConnectionID func(protocol.ConnectionID) + retireConnectionID func(protocol.ConnectionID) + replaceWithClosed func([]protocol.ConnectionID, []byte) + queueControlFrame func(wire.Frame) } func newConnIDGenerator( initialConnectionID protocol.ConnectionID, initialClientDestConnID *protocol.ConnectionID, // nil for the client addConnectionID func(protocol.ConnectionID), - getStatelessResetToken func(protocol.ConnectionID) protocol.StatelessResetToken, + statelessResetter *statelessResetter, removeConnectionID func(protocol.ConnectionID), retireConnectionID func(protocol.ConnectionID), replaceWithClosed func([]protocol.ConnectionID, []byte), @@ -35,14 +35,14 @@ func newConnIDGenerator( generator ConnectionIDGenerator, ) *connIDGenerator { m := &connIDGenerator{ - generator: generator, - activeSrcConnIDs: make(map[uint64]protocol.ConnectionID), - addConnectionID: addConnectionID, - getStatelessResetToken: getStatelessResetToken, - removeConnectionID: removeConnectionID, - retireConnectionID: retireConnectionID, - replaceWithClosed: replaceWithClosed, - queueControlFrame: queueControlFrame, + generator: generator, + activeSrcConnIDs: make(map[uint64]protocol.ConnectionID), + addConnectionID: addConnectionID, + statelessResetter: statelessResetter, + removeConnectionID: removeConnectionID, + retireConnectionID: retireConnectionID, + replaceWithClosed: replaceWithClosed, + queueControlFrame: queueControlFrame, } m.activeSrcConnIDs[0] = initialConnectionID m.initialClientDestConnID = initialClientDestConnID @@ -104,7 +104,7 @@ func (m *connIDGenerator) issueNewConnID() error { m.queueControlFrame(&wire.NewConnectionIDFrame{ SequenceNumber: m.highestSeq + 1, ConnectionID: connID, - StatelessResetToken: m.getStatelessResetToken(connID), + StatelessResetToken: m.statelessResetter.GetStatelessResetToken(connID), }) m.highestSeq++ return nil diff --git a/vendor/github.com/quic-go/quic-go/conn_id_manager.go b/vendor/github.com/quic-go/quic-go/conn_id_manager.go index 4aa3f749..4030913d 100644 --- a/vendor/github.com/quic-go/quic-go/conn_id_manager.go +++ b/vendor/github.com/quic-go/quic-go/conn_id_manager.go @@ -35,6 +35,8 @@ type connIDManager struct { addStatelessResetToken func(protocol.StatelessResetToken) removeStatelessResetToken func(protocol.StatelessResetToken) queueControlFrame func(wire.Frame) + + closed bool } func newConnIDManager( @@ -66,6 +68,12 @@ func (h *connIDManager) Add(f *wire.NewConnectionIDFrame) error { } func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error { + if h.activeConnectionID.Len() == 0 { + return &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "received NEW_CONNECTION_ID frame but zero-length connection IDs are in use", + } + } // If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active // connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately. if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired { @@ -142,6 +150,7 @@ func (h *connIDManager) addConnectionID(seq uint64, connID protocol.ConnectionID } func (h *connIDManager) updateConnectionID() { + h.assertNotClosed() h.queueControlFrame(&wire.RetireConnectionIDFrame{ SequenceNumber: h.activeSequenceNumber, }) @@ -160,6 +169,7 @@ func (h *connIDManager) updateConnectionID() { } func (h *connIDManager) Close() { + h.closed = true if h.activeStatelessResetToken != nil { h.removeStatelessResetToken(*h.activeStatelessResetToken) } @@ -176,6 +186,7 @@ func (h *connIDManager) ChangeInitialConnID(newConnID protocol.ConnectionID) { // is called when the server provides a stateless reset token in the transport parameters func (h *connIDManager) SetStatelessResetToken(token protocol.StatelessResetToken) { + h.assertNotClosed() if h.activeSequenceNumber != 0 { panic("expected first connection ID to have sequence number 0") } @@ -203,6 +214,7 @@ func (h *connIDManager) shouldUpdateConnID() bool { } func (h *connIDManager) Get() protocol.ConnectionID { + h.assertNotClosed() if h.shouldUpdateConnID() { h.updateConnectionID() } @@ -212,3 +224,13 @@ func (h *connIDManager) Get() protocol.ConnectionID { func (h *connIDManager) SetHandshakeComplete() { h.handshakeComplete = true } + +// Using the connIDManager after it has been closed can have disastrous effects: +// If the connection ID is rotated, a new entry would be inserted into the packet handler map, +// leading to a memory leak of the connection struct. +// See https://github.com/quic-go/quic-go/pull/4852 for more details. +func (h *connIDManager) assertNotClosed() { + if h.closed { + panic("connection ID manager is closed") + } +} diff --git a/vendor/github.com/quic-go/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go index 4390f5ca..879faec0 100644 --- a/vendor/github.com/quic-go/quic-go/connection.go +++ b/vendor/github.com/quic-go/quic-go/connection.go @@ -85,7 +85,6 @@ func (p *receivedPacket) Clone() *receivedPacket { type connRunner interface { Add(protocol.ConnectionID, packetHandler) bool - GetStatelessResetToken(protocol.ConnectionID) protocol.StatelessResetToken Retire(protocol.ConnectionID) Remove(protocol.ConnectionID) ReplaceWithClosed([]protocol.ConnectionID, []byte) @@ -225,7 +224,7 @@ var newConnection = func( destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, connIDGenerator ConnectionIDGenerator, - statelessResetToken protocol.StatelessResetToken, + statelessResetter *statelessResetter, conf *Config, tlsConf *tls.Config, tokenGenerator *handshake.TokenGenerator, @@ -263,7 +262,7 @@ var newConnection = func( srcConnID, &clientDestConnID, func(connID protocol.ConnectionID) { runner.Add(connID, s) }, - runner.GetStatelessResetToken, + statelessResetter, runner.Remove, runner.Retire, runner.ReplaceWithClosed, @@ -282,6 +281,7 @@ var newConnection = func( s.logger, ) s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize)))) + statelessResetToken := statelessResetter.GetStatelessResetToken(srcConnID) params := &wire.TransportParameters{ InitialMaxStreamDataBidiLocal: protocol.ByteCount(s.config.InitialStreamReceiveWindow), InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow), @@ -340,6 +340,7 @@ var newClientConnection = func( destConnID protocol.ConnectionID, srcConnID protocol.ConnectionID, connIDGenerator ConnectionIDGenerator, + statelessResetter *statelessResetter, conf *Config, tlsConf *tls.Config, initialPacketNumber protocol.PacketNumber, @@ -372,7 +373,7 @@ var newClientConnection = func( srcConnID, nil, func(connID protocol.ConnectionID) { runner.Add(connID, s) }, - runner.GetStatelessResetToken, + statelessResetter, runner.Remove, runner.Retire, runner.ReplaceWithClosed, @@ -477,7 +478,7 @@ func (s *connection) preSetup() { uint64(s.config.MaxIncomingUniStreams), s.perspective, ) - s.framer = newFramer() + s.framer = newFramer(s.connFlowController) s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets) s.closeChan = make(chan closeError, 1) s.sendingScheduled = make(chan struct{}, 1) @@ -496,12 +497,28 @@ func (s *connection) run() error { var closeErr closeError defer func() { s.ctxCancel(closeErr.err) }() + defer func() { + // Drain queued packets that will never be processed. + for { + select { + case p, ok := <-s.receivedPackets: + if !ok { + return + } + p.buffer.Decrement() + p.buffer.MaybeRelease() + default: + return + } + } + }() + s.timer = *newTimer() if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil { return err } - if err := s.handleHandshakeEvents(); err != nil { + if err := s.handleHandshakeEvents(time.Now()); err != nil { return err } go func() { @@ -602,7 +619,7 @@ runLoop: if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) { // This could cause packets to be retransmitted. // Check it before trying to send packets. - if err := s.sentPacketHandler.OnLossDetectionTimeout(); err != nil { + if err := s.sentPacketHandler.OnLossDetectionTimeout(now); err != nil { s.closeLocal(err) } } @@ -727,7 +744,7 @@ func (s *connection) idleTimeoutStartTime() time.Time { return startTime } -func (s *connection) handleHandshakeComplete() error { +func (s *connection) handleHandshakeComplete(now time.Time) error { defer close(s.handshakeCompleteChan) // Once the handshake completes, we have derived 1-RTT keys. // There's no point in queueing undecryptable packets for later decryption anymore. @@ -748,7 +765,7 @@ func (s *connection) handleHandshakeComplete() error { } // All these only apply to the server side. - if err := s.handleHandshakeConfirmed(); err != nil { + if err := s.handleHandshakeConfirmed(now); err != nil { return err } @@ -771,23 +788,22 @@ func (s *connection) handleHandshakeComplete() error { return nil } -func (s *connection) handleHandshakeConfirmed() error { - if err := s.dropEncryptionLevel(protocol.EncryptionHandshake); err != nil { +func (s *connection) handleHandshakeConfirmed(now time.Time) error { + if err := s.dropEncryptionLevel(protocol.EncryptionHandshake, now); err != nil { return err } s.handshakeConfirmed = true - s.sentPacketHandler.SetHandshakeConfirmed() s.cryptoStreamHandler.SetHandshakeConfirmed() if !s.config.DisablePathMTUDiscovery && s.conn.capabilities().DF { - s.mtuDiscoverer.Start() + s.mtuDiscoverer.Start(now) } return nil } func (s *connection) handlePacketImpl(rp receivedPacket) bool { - s.sentPacketHandler.ReceivedBytes(rp.Size()) + s.sentPacketHandler.ReceivedBytes(rp.Size(), rp.rcvTime) if wire.IsVersionNegotiationPacket(rp.data) { s.handleVersionNegotiationPacket(rp) @@ -958,7 +974,7 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) // drop 0-RTT packets, if we are a client if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT { if s.tracer != nil && s.tracer.DroppedPacket != nil { - s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable) + s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket) } return false } @@ -1068,6 +1084,15 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime ti return false } + newDestConnID := hdr.SrcConnectionID + s.receivedRetry = true + s.sentPacketHandler.ResetForRetry(rcvTime) + s.handshakeDestConnID = newDestConnID + s.retrySrcConnID = &newDestConnID + s.cryptoStreamHandler.ChangeConnectionID(newDestConnID) + s.packer.SetToken(hdr.Token) + s.connIDManager.ChangeInitialConnID(newDestConnID) + if s.logger.Debug() { s.logger.Debugf("<- Received Retry:") (&wire.ExtendedHeader{Header: *hdr}).Log(s.logger) @@ -1076,17 +1101,7 @@ func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime ti if s.tracer != nil && s.tracer.ReceivedRetry != nil { s.tracer.ReceivedRetry(hdr) } - newDestConnID := hdr.SrcConnectionID - s.receivedRetry = true - if err := s.sentPacketHandler.ResetForRetry(rcvTime); err != nil { - s.closeLocal(err) - return false - } - s.handshakeDestConnID = newDestConnID - s.retrySrcConnID = &newDestConnID - s.cryptoStreamHandler.ChangeConnectionID(newDestConnID) - s.packer.SetToken(hdr.Token) - s.connIDManager.ChangeInitialConnID(newDestConnID) + s.scheduleSending() return true } @@ -1195,7 +1210,7 @@ func (s *connection) handleUnpackedLongHeaderPacket( !s.droppedInitialKeys { // On the server side, Initial keys are dropped as soon as the first Handshake packet is received. // See Section 4.9.1 of RFC 9001. - if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil { + if err := s.dropEncryptionLevel(protocol.EncryptionInitial, rcvTime); err != nil { return err } } @@ -1210,7 +1225,7 @@ func (s *connection) handleUnpackedLongHeaderPacket( s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames) } } - isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log) + isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log, rcvTime) if err != nil { return err } @@ -1229,7 +1244,7 @@ func (s *connection) handleUnpackedShortHeaderPacket( s.firstAckElicitingPacketAfterIdleSentTime = time.Time{} s.keepAlivePingSent = false - isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log) + isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log, rcvTime) if err != nil { return err } @@ -1241,6 +1256,7 @@ func (s *connection) handleFrames( destConnID protocol.ConnectionID, encLevel protocol.EncryptionLevel, log func([]logging.Frame), + rcvTime time.Time, ) (isAckEliciting bool, _ error) { // Only used for tracing. // If we're not tracing, this slice will always remain empty. @@ -1270,7 +1286,7 @@ func (s *connection) handleFrames( if handleErr != nil { continue } - if err := s.handleFrame(frame, encLevel, destConnID); err != nil { + if err := s.handleFrame(frame, encLevel, destConnID, rcvTime); err != nil { if log == nil { return false, err } @@ -1291,7 +1307,7 @@ func (s *connection) handleFrames( // We receive a Handshake packet that contains the CRYPTO frame that allows us to complete the handshake, // and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame. if !handshakeWasComplete && s.handshakeComplete { - if err := s.handleHandshakeComplete(); err != nil { + if err := s.handleHandshakeComplete(rcvTime); err != nil { return false, err } } @@ -1299,20 +1315,25 @@ func (s *connection) handleFrames( return } -func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel, destConnID protocol.ConnectionID) error { +func (s *connection) handleFrame( + f wire.Frame, + encLevel protocol.EncryptionLevel, + destConnID protocol.ConnectionID, + rcvTime time.Time, +) error { var err error wire.LogFrame(s.logger, f, false) switch frame := f.(type) { case *wire.CryptoFrame: - err = s.handleCryptoFrame(frame, encLevel) + err = s.handleCryptoFrame(frame, encLevel, rcvTime) case *wire.StreamFrame: - err = s.handleStreamFrame(frame) + err = s.handleStreamFrame(frame, rcvTime) case *wire.AckFrame: - err = s.handleAckFrame(frame, encLevel) + err = s.handleAckFrame(frame, encLevel, rcvTime) case *wire.ConnectionCloseFrame: s.handleConnectionCloseFrame(frame) case *wire.ResetStreamFrame: - err = s.handleResetStreamFrame(frame) + err = s.handleResetStreamFrame(frame, rcvTime) case *wire.MaxDataFrame: s.handleMaxDataFrame(frame) case *wire.MaxStreamDataFrame: @@ -1321,6 +1342,7 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel s.handleMaxStreamsFrame(frame) case *wire.DataBlockedFrame: case *wire.StreamDataBlockedFrame: + err = s.handleStreamDataBlockedFrame(frame) case *wire.StreamsBlockedFrame: case *wire.StopSendingFrame: err = s.handleStopSendingFrame(frame) @@ -1329,7 +1351,10 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel s.handlePathChallengeFrame(frame) case *wire.PathResponseFrame: // since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs - err = errors.New("unexpected PATH_RESPONSE frame") + err = &qerr.TransportError{ + ErrorCode: qerr.ProtocolViolation, + ErrorMessage: "unexpected PATH_RESPONSE frame", + } case *wire.NewTokenFrame: err = s.handleNewTokenFrame(frame) case *wire.NewConnectionIDFrame: @@ -1337,7 +1362,7 @@ func (s *connection) handleFrame(f wire.Frame, encLevel protocol.EncryptionLevel case *wire.RetireConnectionIDFrame: err = s.handleRetireConnectionIDFrame(frame, destConnID) case *wire.HandshakeDoneFrame: - err = s.handleHandshakeDoneFrame() + err = s.handleHandshakeDoneFrame(rcvTime) case *wire.DatagramFrame: err = s.handleDatagramFrame(frame) default: @@ -1376,7 +1401,7 @@ func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame }) } -func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { +func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error { if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil { return err } @@ -1389,10 +1414,10 @@ func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protoco return err } } - return s.handleHandshakeEvents() + return s.handleHandshakeEvents(rcvTime) } -func (s *connection) handleHandshakeEvents() error { +func (s *connection) handleHandshakeEvents(now time.Time) error { for { ev := s.cryptoStreamHandler.NextEvent() var err error @@ -1413,7 +1438,7 @@ func (s *connection) handleHandshakeEvents() error { s.undecryptablePacketsToProcess = s.undecryptablePackets s.undecryptablePackets = nil case handshake.EventDiscard0RTTKeys: - err = s.dropEncryptionLevel(protocol.Encryption0RTT) + err = s.dropEncryptionLevel(protocol.Encryption0RTT, now) case handshake.EventWriteInitialData: _, err = s.initialStream.Write(ev.Data) case handshake.EventWriteHandshakeData: @@ -1425,17 +1450,15 @@ func (s *connection) handleHandshakeEvents() error { } } -func (s *connection) handleStreamFrame(frame *wire.StreamFrame) error { +func (s *connection) handleStreamFrame(frame *wire.StreamFrame, rcvTime time.Time) error { str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err } - if str == nil { - // Stream is closed and already garbage collected - // ignore this StreamFrame + if str == nil { // stream was already closed and garbage collected return nil } - return str.handleStreamFrame(frame) + return str.handleStreamFrame(frame, rcvTime) } func (s *connection) handleMaxDataFrame(frame *wire.MaxDataFrame) { @@ -1455,11 +1478,18 @@ func (s *connection) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) er return nil } +func (s *connection) handleStreamDataBlockedFrame(frame *wire.StreamDataBlockedFrame) error { + // We don't need to do anything in response to a STREAM_DATA_BLOCKED frame, + // but we need to make sure that the stream ID is valid. + _, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) + return err +} + func (s *connection) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) { s.streamsMap.HandleMaxStreamsFrame(frame) } -func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { +func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame, rcvTime time.Time) error { str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) if err != nil { return err @@ -1468,7 +1498,7 @@ func (s *connection) handleResetStreamFrame(frame *wire.ResetStreamFrame) error // stream is closed and already garbage collected return nil } - return str.handleResetStreamFrame(frame) + return str.handleResetStreamFrame(frame, rcvTime) } func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error { @@ -1509,7 +1539,7 @@ func (s *connection) handleRetireConnectionIDFrame(f *wire.RetireConnectionIDFra return s.connIDGenerator.Retire(f.SequenceNumber, destConnID) } -func (s *connection) handleHandshakeDoneFrame() error { +func (s *connection) handleHandshakeDoneFrame(rcvTime time.Time) error { if s.perspective == protocol.PerspectiveServer { return &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, @@ -1517,12 +1547,12 @@ func (s *connection) handleHandshakeDoneFrame() error { } } if !s.handshakeConfirmed { - return s.handleHandshakeConfirmed() + return s.handleHandshakeConfirmed(rcvTime) } return nil } -func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel) error { +func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error { acked1RTTPacket, err := s.sentPacketHandler.ReceivedAck(frame, encLevel, s.lastPacketReceivedTime) if err != nil { return err @@ -1534,7 +1564,7 @@ func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.Encr // This is only possible if the ACK was sent in a 1-RTT packet. // This is an optimization over simply waiting for a HANDSHAKE_DONE frame, see section 4.1.2 of RFC 9001. if s.perspective == protocol.PerspectiveClient && !s.handshakeConfirmed { - if err := s.handleHandshakeConfirmed(); err != nil { + if err := s.handleHandshakeConfirmed(rcvTime); err != nil { return err } } @@ -1627,6 +1657,8 @@ func (s *connection) handleCloseError(closeErr *closeError) { errors.As(e, &recreateErr), errors.As(e, &applicationErr), errors.As(e, &transportErr): + case closeErr.immediate: + e = closeErr.err default: e = &qerr.TransportError{ ErrorCode: qerr.InternalError, @@ -1635,11 +1667,16 @@ func (s *connection) handleCloseError(closeErr *closeError) { } s.streamsMap.CloseWithError(e) - s.connIDManager.Close() if s.datagramQueue != nil { s.datagramQueue.CloseWithError(e) } + // In rare instances, the connection ID manager might switch to a new connection ID + // when sending the CONNECTION_CLOSE frame. + // The connection ID manager removes the active stateless reset token from the packet + // handler map when it is closed, so we need to make sure that this happens last. + defer s.connIDManager.Close() + if s.tracer != nil && s.tracer.ClosedConnection != nil && !errors.As(e, &recreateErr) { s.tracer.ClosedConnection(e) } @@ -1666,11 +1703,11 @@ func (s *connection) handleCloseError(closeErr *closeError) { s.connIDGenerator.ReplaceWithClosed(connClosePacket) } -func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) error { +func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel, now time.Time) error { if s.tracer != nil && s.tracer.DroppedEncryptionLevel != nil { s.tracer.DroppedEncryptionLevel(encLevel) } - s.sentPacketHandler.DropPackets(encLevel) + s.sentPacketHandler.DropPackets(encLevel, now) s.receivedPacketHandler.DropPackets(encLevel) //nolint:exhaustive // only Initial and 0-RTT need special treatment switch encLevel { @@ -1772,7 +1809,7 @@ func (s *connection) applyTransportParameters() { if params.MaxIdleTimeout > 0 { s.idleTimeout = min(s.idleTimeout, params.MaxIdleTimeout) } - s.keepAliveInterval = min(s.config.KeepAlivePeriod, min(s.idleTimeout/2, protocol.MaxKeepAliveInterval)) + s.keepAliveInterval = min(s.config.KeepAlivePeriod, s.idleTimeout/2) s.streamsMap.UpdateLimits(params) s.frameParser.SetAckDelayExponent(params.AckDelayExponent) s.connFlowController.UpdateSendWindow(params.InitialMaxData) @@ -1822,28 +1859,10 @@ func (s *connection) triggerSending(now time.Time) error { case ackhandler.SendAck: // We can at most send a single ACK only packet. // There will only be a new ACK after receiving new packets. - // SendAck is only returned when we're congestion limited, so we don't need to set the pacinggs timer. + // SendAck is only returned when we're congestion limited, so we don't need to set the pacing timer. return s.maybeSendAckOnlyPacket(now) - case ackhandler.SendPTOInitial: - if err := s.sendProbePacket(protocol.EncryptionInitial, now); err != nil { - return err - } - if s.sendQueue.WouldBlock() { - s.scheduleSending() - return nil - } - return s.triggerSending(now) - case ackhandler.SendPTOHandshake: - if err := s.sendProbePacket(protocol.EncryptionHandshake, now); err != nil { - return err - } - if s.sendQueue.WouldBlock() { - s.scheduleSending() - return nil - } - return s.triggerSending(now) - case ackhandler.SendPTOAppData: - if err := s.sendProbePacket(protocol.Encryption1RTT, now); err != nil { + case ackhandler.SendPTOInitial, ackhandler.SendPTOHandshake, ackhandler.SendPTOAppData: + if err := s.sendProbePacket(sendMode, now); err != nil { return err } if s.sendQueue.WouldBlock() { @@ -1862,7 +1881,7 @@ func (s *connection) sendPackets(now time.Time) error { // Performance-wise, this doesn't matter, since we only send a very small (<10) number of // MTU probe packets per connection. if s.handshakeConfirmed && s.mtuDiscoverer != nil && s.mtuDiscoverer.ShouldSendProbe(now) { - ping, size := s.mtuDiscoverer.GetPing() + ping, size := s.mtuDiscoverer.GetPing(now) p, buf, err := s.packer.PackMTUProbePacket(ping, size, s.version) if err != nil { return err @@ -1871,15 +1890,12 @@ func (s *connection) sendPackets(now time.Time) error { s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, buf.Len(), false) s.registerPackedShortHeaderPacket(p, ecn, now) s.sendQueue.Send(buf, 0, ecn) - // This is kind of a hack. We need to trigger sending again somehow. - s.pacingDeadline = deadlineSendImmediately + // There's (likely) more data to send. Loop around again. + s.scheduleSending() return nil } - if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { - s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset}) - } - if offset := s.connFlowController.GetWindowUpdate(); offset > 0 { + if offset := s.connFlowController.GetWindowUpdate(now); offset > 0 { s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset}) } if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil { @@ -1887,7 +1903,7 @@ func (s *connection) sendPackets(now time.Time) error { } if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), s.version) + packet, err := s.packer.PackCoalescedPacket(false, s.maxPacketSize(), now, s.version) if err != nil || packet == nil { return err } @@ -1999,6 +2015,7 @@ func (s *connection) sendPacketsWithGSO(now time.Time) error { return nil } + ecn = nextECN buf = getLargePacketBuffer() } } @@ -2014,7 +2031,7 @@ func (s *connection) resetPacingDeadline() { func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { if !s.handshakeConfirmed { ecn := s.sentPacketHandler.ECNMode(false) - packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), s.version) + packet, err := s.packer.PackCoalescedPacket(true, s.maxPacketSize(), now, s.version) if err != nil { return err } @@ -2025,7 +2042,7 @@ func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { } ecn := s.sentPacketHandler.ECNMode(true) - p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), s.version) + p, buf, err := s.packer.PackAckOnlyPacket(s.maxPacketSize(), now, s.version) if err != nil { if err == errNothingToPack { return nil @@ -2038,7 +2055,19 @@ func (s *connection) maybeSendAckOnlyPacket(now time.Time) error { return nil } -func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time.Time) error { +func (s *connection) sendProbePacket(sendMode ackhandler.SendMode, now time.Time) error { + var encLevel protocol.EncryptionLevel + //nolint:exhaustive // We only need to handle the PTO send modes here. + switch sendMode { + case ackhandler.SendPTOInitial: + encLevel = protocol.EncryptionInitial + case ackhandler.SendPTOHandshake: + encLevel = protocol.EncryptionHandshake + case ackhandler.SendPTOAppData: + encLevel = protocol.Encryption1RTT + default: + return fmt.Errorf("connection BUG: unexpected send mode: %d", sendMode) + } // Queue probe packets until we actually send out a packet, // or until there are no more packets to queue. var packet *coalescedPacket @@ -2047,7 +2076,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time break } var err error - packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version) + packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), now, s.version) if err != nil { return err } @@ -2058,7 +2087,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time if packet == nil { s.retransmissionQueue.AddPing(encLevel) var err error - packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), s.version) + packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), now, s.version) if err != nil { return err } @@ -2073,7 +2102,7 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel, now time // If there was nothing to pack, the returned size is 0. func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize protocol.ByteCount, ecn protocol.ECN, now time.Time) (protocol.ByteCount, error) { startLen := buf.Len() - p, err := s.packer.AppendPacket(buf, maxSize, s.version) + p, err := s.packer.AppendPacket(buf, maxSize, now, s.version) if err != nil { return 0, err } @@ -2111,7 +2140,7 @@ func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn prot !s.droppedInitialKeys { // On the client side, Initial keys are dropped as soon as the first Handshake packet is sent. // See Section 4.9.1 of RFC 9001. - if err := s.dropEncryptionLevel(protocol.EncryptionInitial); err != nil { + if err := s.dropEncryptionLevel(protocol.EncryptionInitial, now); err != nil { return err } } @@ -2251,6 +2280,8 @@ func (s *connection) queueControlFrame(f wire.Frame) { s.scheduleSending() } +func (s *connection) onHasConnectionData() { s.scheduleSending() } + func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) { s.framer.AddActiveStream(id, str) s.scheduleSending() @@ -2300,17 +2331,8 @@ func (s *connection) ReceiveDatagram(ctx context.Context) ([]byte, error) { return s.datagramQueue.Receive(ctx) } -func (s *connection) LocalAddr() net.Addr { - return s.conn.LocalAddr() -} - -func (s *connection) RemoteAddr() net.Addr { - return s.conn.RemoteAddr() -} - -func (s *connection) GetVersion() protocol.Version { - return s.version -} +func (s *connection) LocalAddr() net.Addr { return s.conn.LocalAddr() } +func (s *connection) RemoteAddr() net.Addr { return s.conn.RemoteAddr() } func (s *connection) NextConnection(ctx context.Context) (Connection, error) { // The handshake might fail after the server rejected 0-RTT. diff --git a/vendor/github.com/quic-go/quic-go/connection_logging.go b/vendor/github.com/quic-go/quic-go/connection_logging.go index f75b39f6..a314a6cd 100644 --- a/vendor/github.com/quic-go/quic-go/connection_logging.go +++ b/vendor/github.com/quic-go/quic-go/connection_logging.go @@ -125,12 +125,7 @@ func (s *connection) logShortHeaderPacket( ack = toLoggingAckFrame(ackFrame) } s.tracer.SentShortHeaderPacket( - &logging.ShortHeader{ - DestConnectionID: destConnID, - PacketNumber: pn, - PacketNumberLen: pnLen, - KeyPhase: kp, - }, + &logging.ShortHeader{DestConnectionID: destConnID, PacketNumber: pn, PacketNumberLen: pnLen, KeyPhase: kp}, size, ecn, ack, diff --git a/vendor/github.com/quic-go/quic-go/errors.go b/vendor/github.com/quic-go/quic-go/errors.go index 3fe1e0a9..4a69a7f1 100644 --- a/vendor/github.com/quic-go/quic-go/errors.go +++ b/vendor/github.com/quic-go/quic-go/errors.go @@ -50,8 +50,8 @@ type StreamError struct { } func (e *StreamError) Is(target error) bool { - _, ok := target.(*StreamError) - return ok + t, ok := target.(*StreamError) + return ok && e.StreamID == t.StreamID && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote } func (e *StreamError) Error() string { @@ -68,8 +68,8 @@ type DatagramTooLargeError struct { } func (e *DatagramTooLargeError) Is(target error) bool { - _, ok := target.(*DatagramTooLargeError) - return ok + t, ok := target.(*DatagramTooLargeError) + return ok && e.MaxDatagramPayloadSize == t.MaxDatagramPayloadSize } func (e *DatagramTooLargeError) Error() string { return "DATAGRAM frame too large" } diff --git a/vendor/github.com/quic-go/quic-go/framer.go b/vendor/github.com/quic-go/quic-go/framer.go index e162f6b8..fee31631 100644 --- a/vendor/github.com/quic-go/quic-go/framer.go +++ b/vendor/github.com/quic-go/quic-go/framer.go @@ -3,8 +3,10 @@ package quic import ( "slices" "sync" + "time" "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/utils/ringbuffer" "github.com/quic-go/quic-go/internal/wire" @@ -21,7 +23,7 @@ const ( const maxStreamControlFrameSize = 25 type streamControlFrameGetter interface { - getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) + getControlFrame(time.Time) (_ ackhandler.Frame, ok, hasMore bool) } type framer struct { @@ -34,13 +36,15 @@ type framer struct { controlFrameMutex sync.Mutex controlFrames []wire.Frame pathResponses []*wire.PathResponseFrame + connFlowController flowcontrol.ConnectionFlowController queuedTooManyControlFrames bool } -func newFramer() *framer { +func newFramer(connFlowController flowcontrol.ConnectionFlowController) *framer { return &framer{ activeStreams: make(map[protocol.StreamID]sendStreamI), streamsWithControlFrames: make(map[protocol.StreamID]streamControlFrameGetter), + connFlowController: connFlowController, } } @@ -78,10 +82,80 @@ func (f *framer) QueueControlFrame(frame wire.Frame) { f.controlFrames = append(f.controlFrames, frame) } -func (f *framer) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framer) Append( + frames []ackhandler.Frame, + streamFrames []ackhandler.StreamFrame, + maxLen protocol.ByteCount, + now time.Time, + v protocol.Version, +) ([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount) { f.controlFrameMutex.Lock() - defer f.controlFrameMutex.Unlock() + frames, controlFrameLen := f.appendControlFrames(frames, maxLen, now, v) + maxLen -= controlFrameLen + var lastFrame ackhandler.StreamFrame + var streamFrameLen protocol.ByteCount + f.mutex.Lock() + // pop STREAM frames, until less than 128 bytes are left in the packet + numActiveStreams := f.streamQueue.Len() + for i := 0; i < numActiveStreams; i++ { + if protocol.MinStreamFrameSize > maxLen { + break + } + sf, blocked := f.getNextStreamFrame(maxLen, v) + if sf.Frame != nil { + streamFrames = append(streamFrames, sf) + maxLen -= sf.Frame.Length(v) + lastFrame = sf + streamFrameLen += sf.Frame.Length(v) + } + // If the stream just became blocked on stream flow control, attempt to pack the + // STREAM_DATA_BLOCKED into the same packet. + if blocked != nil { + l := blocked.Length(v) + // In case it doesn't fit, queue it for the next packet. + if maxLen < l { + f.controlFrames = append(f.controlFrames, blocked) + break + } + frames = append(frames, ackhandler.Frame{Frame: blocked}) + maxLen -= l + controlFrameLen += l + } + } + + // The only way to become blocked on connection-level flow control is by sending STREAM frames. + if isBlocked, offset := f.connFlowController.IsNewlyBlocked(); isBlocked { + blocked := &wire.DataBlockedFrame{MaximumData: offset} + l := blocked.Length(v) + // In case it doesn't fit, queue it for the next packet. + if maxLen >= l { + frames = append(frames, ackhandler.Frame{Frame: blocked}) + controlFrameLen += l + } else { + f.controlFrames = append(f.controlFrames, blocked) + } + } + + f.mutex.Unlock() + f.controlFrameMutex.Unlock() + + if lastFrame.Frame != nil { + // account for the smaller size of the last STREAM frame + streamFrameLen -= lastFrame.Frame.Length(v) + lastFrame.Frame.DataLenPresent = false + streamFrameLen += lastFrame.Frame.Length(v) + } + + return frames, streamFrames, controlFrameLen + streamFrameLen +} + +func (f *framer) appendControlFrames( + frames []ackhandler.Frame, + maxLen protocol.ByteCount, + now time.Time, + v protocol.Version, +) ([]ackhandler.Frame, protocol.ByteCount) { var length protocol.ByteCount // add a PATH_RESPONSE first, but only pack a single PATH_RESPONSE per packet if len(f.pathResponses) > 0 { @@ -101,7 +175,7 @@ func (f *framer) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol. if remainingLen <= maxStreamControlFrameSize { break } - fr, ok, hasMore := str.getControlFrame() + fr, ok, hasMore := str.getControlFrame(now) if !hasMore { delete(f.streamsWithControlFrames, id) } @@ -163,56 +237,33 @@ func (f *framer) RemoveActiveStream(id protocol.StreamID) { delete(f.activeStreams, id) // We don't delete the stream from the streamQueue, // since we'd have to iterate over the ringbuffer. - // Instead, we check if the stream is still in activeStreams in AppendStreamFrames. + // Instead, we check if the stream is still in activeStreams when appending STREAM frames. f.mutex.Unlock() } -func (f *framer) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) { - startLen := len(frames) - var length protocol.ByteCount - f.mutex.Lock() - // pop STREAM frames, until less than 128 bytes are left in the packet - numActiveStreams := f.streamQueue.Len() - for i := 0; i < numActiveStreams; i++ { - if protocol.MinStreamFrameSize+length > maxLen { - break - } - id := f.streamQueue.PopFront() - // This should never return an error. Better check it anyway. - // The stream will only be in the streamQueue, if it enqueued itself there. - str, ok := f.activeStreams[id] - // The stream might have been removed after being enqueued. - if !ok { - continue - } - remainingLen := maxLen - length - // For the last STREAM frame, we'll remove the DataLen field later. - // Therefore, we can pretend to have more bytes available when popping - // the STREAM frame (which will always have the DataLen set). - remainingLen += protocol.ByteCount(quicvarint.Len(uint64(remainingLen))) - frame, ok, hasMoreData := str.popStreamFrame(remainingLen, v) - if hasMoreData { // put the stream back in the queue (at the end) - f.streamQueue.PushBack(id) - } else { // no more data to send. Stream is not active - delete(f.activeStreams, id) - } - // The frame can be "nil" - // * if the stream was canceled after it said it had data - // * the remaining size doesn't allow us to add another STREAM frame - if !ok { - continue - } - frames = append(frames, frame) - length += frame.Frame.Length(v) +func (f *framer) getNextStreamFrame(maxLen protocol.ByteCount, v protocol.Version) (ackhandler.StreamFrame, *wire.StreamDataBlockedFrame) { + id := f.streamQueue.PopFront() + // This should never return an error. Better check it anyway. + // The stream will only be in the streamQueue, if it enqueued itself there. + str, ok := f.activeStreams[id] + // The stream might have been removed after being enqueued. + if !ok { + return ackhandler.StreamFrame{}, nil } - f.mutex.Unlock() - if len(frames) > startLen { - l := frames[len(frames)-1].Frame.Length(v) - // account for the smaller size of the last STREAM frame - frames[len(frames)-1].Frame.DataLenPresent = false - length += frames[len(frames)-1].Frame.Length(v) - l + // For the last STREAM frame, we'll remove the DataLen field later. + // Therefore, we can pretend to have more bytes available when popping + // the STREAM frame (which will always have the DataLen set). + maxLen += protocol.ByteCount(quicvarint.Len(uint64(maxLen))) + frame, blocked, hasMoreData := str.popStreamFrame(maxLen, v) + if hasMoreData { // put the stream back in the queue (at the end) + f.streamQueue.PushBack(id) + } else { // no more data to send. Stream is not active + delete(f.activeStreams, id) } - return frames, length + // Note that the frame.Frame can be nil: + // * if the stream was canceled after it said it had data + // * the remaining size doesn't allow us to add another STREAM frame + return frame, blocked } func (f *framer) Handle0RTTRejection() { diff --git a/vendor/github.com/quic-go/quic-go/http3/body.go b/vendor/github.com/quic-go/quic-go/http3/body.go index fa023ce4..698275fe 100644 --- a/vendor/github.com/quic-go/quic-go/http3/body.go +++ b/vendor/github.com/quic-go/quic-go/http3/body.go @@ -4,6 +4,7 @@ import ( "context" "errors" "io" + "sync" "github.com/quic-go/quic-go" ) @@ -95,8 +96,8 @@ type hijackableBody struct { // only set for the http.Response // The channel is closed when the user is done with this response: // either when Read() errors, or when Close() is called. - reqDone chan<- struct{} - reqDoneClosed bool + reqDone chan<- struct{} + reqDoneOnce sync.Once } var _ io.ReadCloser = &hijackableBody{} @@ -117,13 +118,11 @@ func (r *hijackableBody) Read(b []byte) (int, error) { } func (r *hijackableBody) requestDone() { - if r.reqDoneClosed || r.reqDone == nil { - return - } if r.reqDone != nil { - close(r.reqDone) + r.reqDoneOnce.Do(func() { + close(r.reqDone) + }) } - r.reqDoneClosed = true } func (r *hijackableBody) Close() error { diff --git a/vendor/github.com/quic-go/quic-go/http3/client.go b/vendor/github.com/quic-go/quic-go/http3/client.go index 83502240..9214bb80 100644 --- a/vendor/github.com/quic-go/quic-go/http3/client.go +++ b/vendor/github.com/quic-go/quic-go/http3/client.go @@ -294,10 +294,13 @@ func (c *ClientConn) sendRequestBody(str Stream, body io.ReadCloser, contentLeng } func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Response, error) { + trace := httptrace.ContextClientTrace(req.Context()) if err := str.SendRequestHeader(req); err != nil { + traceWroteRequest(trace, err) return nil, err } if req.Body == nil { + traceWroteRequest(trace, nil) str.Close() } else { // send the request body asynchronously @@ -308,7 +311,9 @@ func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Res if req.ContentLength > 0 { contentLength = req.ContentLength } - if err := c.sendRequestBody(str, req.Body, contentLength); err != nil { + err := c.sendRequestBody(str, req.Body, contentLength) + traceWroteRequest(trace, err) + if err != nil { if c.logger != nil { c.logger.Debug("error writing request", "error", err) } @@ -318,7 +323,6 @@ func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Res } // copy from net/http: support 1xx responses - trace := httptrace.ContextClientTrace(req.Context()) num1xx := 0 // number of informational 1xx headers received const max1xxResponses = 5 // arbitrary bound on number of informational responses @@ -338,10 +342,9 @@ func (c *ClientConn) doRequest(req *http.Request, str *requestStream) (*http.Res if num1xx > max1xxResponses { return nil, errors.New("http: too many 1xx informational responses") } - if trace != nil && trace.Got1xxResponse != nil { - if err := trace.Got1xxResponse(resCode, textproto.MIMEHeader(res.Header)); err != nil { - return nil, err - } + traceGot1xxResponse(trace, resCode, textproto.MIMEHeader(res.Header)) + if resCode == 100 { + traceGot100Continue(trace) } continue } diff --git a/vendor/github.com/quic-go/quic-go/http3/conn.go b/vendor/github.com/quic-go/quic-go/http3/conn.go index ec62ed3f..bb17a5e5 100644 --- a/vendor/github.com/quic-go/quic-go/http3/conn.go +++ b/vendor/github.com/quic-go/quic-go/http3/conn.go @@ -7,6 +7,7 @@ import ( "log/slog" "net" "net/http" + "net/http/httptrace" "sync" "sync/atomic" "time" @@ -123,7 +124,8 @@ func (c *connection) openRequestStream( rsp.Trailer = hdr return nil }) - return newRequestStream(hstr, requestWriter, reqDone, c.decoder, disableCompression, maxHeaderBytes, rsp), nil + trace := httptrace.ContextClientTrace(ctx) + return newRequestStream(hstr, requestWriter, reqDone, c.decoder, disableCompression, maxHeaderBytes, rsp, trace), nil } func (c *connection) decodeTrailers(r io.Reader, l, maxHeaderBytes uint64) (http.Header, error) { diff --git a/vendor/github.com/quic-go/quic-go/http3/error.go b/vendor/github.com/quic-go/quic-go/http3/error.go index b96ebeec..82fdae6a 100644 --- a/vendor/github.com/quic-go/quic-go/http3/error.go +++ b/vendor/github.com/quic-go/quic-go/http3/error.go @@ -33,6 +33,11 @@ func (e *Error) Error() string { return s } +func (e *Error) Is(target error) bool { + t, ok := target.(*Error) + return ok && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote +} + func maybeReplaceError(err error) error { if err == nil { return nil diff --git a/vendor/github.com/quic-go/quic-go/http3/http_stream.go b/vendor/github.com/quic-go/quic-go/http3/http_stream.go index f02e778e..4593d473 100644 --- a/vendor/github.com/quic-go/quic-go/http3/http_stream.go +++ b/vendor/github.com/quic-go/quic-go/http3/http_stream.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "net/http/httptrace" "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/internal/protocol" @@ -147,10 +148,12 @@ type requestStream struct { reqDone chan<- struct{} disableCompression bool response *http.Response + trace *httptrace.ClientTrace sentRequest bool requestedGzip bool isConnect bool + firstByte bool } var _ RequestStream = &requestStream{} @@ -163,6 +166,7 @@ func newRequestStream( disableCompression bool, maxHeaderBytes uint64, rsp *http.Response, + trace *httptrace.ClientTrace, ) *requestStream { return &requestStream{ stream: str, @@ -172,6 +176,7 @@ func newRequestStream( disableCompression: disableCompression, maxHeaderBytes: maxHeaderBytes, response: rsp, + trace: trace, } } @@ -197,8 +202,12 @@ func (s *requestStream) SendRequestHeader(req *http.Request) error { func (s *requestStream) ReadResponse() (*http.Response, error) { fp := &frameParser{ - r: s.Stream, conn: s.conn, + r: &tracingReader{ + Reader: s.Stream, + first: &s.firstByte, + trace: s.trace, + }, } frame, err := fp.ParseNext() if err != nil { @@ -268,3 +277,18 @@ func (s *stream) ReceiveDatagram(ctx context.Context) ([]byte, error) { // TODO: reject if datagrams are not negotiated (yet) return s.datagrams.Receive(ctx) } + +type tracingReader struct { + io.Reader + first *bool + trace *httptrace.ClientTrace +} + +func (r *tracingReader) Read(b []byte) (int, error) { + n, err := r.Reader.Read(b) + if n > 0 && r.first != nil && !*r.first { + traceGotFirstResponseByte(r.trace) + *r.first = true + } + return n, err +} diff --git a/vendor/github.com/quic-go/quic-go/http3/ip_addr.go b/vendor/github.com/quic-go/quic-go/http3/ip_addr.go new file mode 100644 index 00000000..876a1e35 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/ip_addr.go @@ -0,0 +1,48 @@ +package http3 + +import ( + "net" + "strings" +) + +// An addrList represents a list of network endpoint addresses. +// Copy from [net.addrList] and change type from [net.Addr] to [net.IPAddr] +type addrList []net.IPAddr + +// isIPv4 reports whether addr contains an IPv4 address. +func isIPv4(addr net.IPAddr) bool { + return addr.IP.To4() != nil +} + +// isNotIPv4 reports whether addr does not contain an IPv4 address. +func isNotIPv4(addr net.IPAddr) bool { return !isIPv4(addr) } + +// forResolve returns the most appropriate address in address for +// a call to ResolveTCPAddr, ResolveUDPAddr, or ResolveIPAddr. +// IPv4 is preferred, unless addr contains an IPv6 literal. +func (addrs addrList) forResolve(network, addr string) net.IPAddr { + var want6 bool + switch network { + case "ip": + // IPv6 literal (addr does NOT contain a port) + want6 = strings.ContainsRune(addr, ':') + case "tcp", "udp": + // IPv6 literal. (addr contains a port, so look for '[') + want6 = strings.ContainsRune(addr, '[') + } + if want6 { + return addrs.first(isNotIPv4) + } + return addrs.first(isIPv4) +} + +// first returns the first address which satisfies strategy, or if +// none do, then the first address of any kind. +func (addrs addrList) first(strategy func(net.IPAddr) bool) net.IPAddr { + for _, addr := range addrs { + if strategy(addr) { + return addr + } + } + return addrs[0] +} diff --git a/vendor/github.com/quic-go/quic-go/http3/mockgen.go b/vendor/github.com/quic-go/quic-go/http3/mockgen.go index 83a3974f..1d790e11 100644 --- a/vendor/github.com/quic-go/quic-go/http3/mockgen.go +++ b/vendor/github.com/quic-go/quic-go/http3/mockgen.go @@ -2,7 +2,7 @@ package http3 -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package http3 -destination mock_singleroundtripper_test.go github.com/quic-go/quic-go/http3 SingleRoundTripper" -type SingleRoundTripper = singleRoundTripper +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -mock_names=TestClientConnInterface=MockClientConn -package http3 -destination mock_clientconn_test.go github.com/quic-go/quic-go/http3 TestClientConnInterface" +type TestClientConnInterface = clientConn //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package http3 -destination mock_quic_early_listener_test.go github.com/quic-go/quic-go/http3 QUICEarlyListener" diff --git a/vendor/github.com/quic-go/quic-go/http3/request_writer.go b/vendor/github.com/quic-go/quic-go/http3/request_writer.go index e83e9cc8..2dbacb15 100644 --- a/vendor/github.com/quic-go/quic-go/http3/request_writer.go +++ b/vendor/github.com/quic-go/quic-go/http3/request_writer.go @@ -7,6 +7,7 @@ import ( "io" "net" "net/http" + "net/http/httptrace" "strconv" "strings" "sync" @@ -42,8 +43,12 @@ func (w *requestWriter) WriteRequestHeader(str quic.Stream, req *http.Request, g if err := w.writeHeaders(buf, req, gzip); err != nil { return err } - _, err := str.Write(buf.Bytes()) - return err + if _, err := str.Write(buf.Bytes()); err != nil { + return err + } + trace := httptrace.ContextClientTrace(req.Context()) + traceWroteHeaders(trace) + return nil } func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool) error { @@ -198,16 +203,16 @@ func (w *requestWriter) encodeHeaders(req *http.Request, addGzipHeader bool, tra // return errRequestHeaderListSize // } - // trace := httptrace.ContextClientTrace(req.Context()) - // traceHeaders := traceHasWroteHeaderField(trace) + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { name = strings.ToLower(name) w.encoder.WriteField(qpack.HeaderField{Name: name, Value: value}) - // if traceHeaders { - // traceWroteHeaderField(trace, name, value) - // } + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } }) return nil diff --git a/vendor/github.com/quic-go/quic-go/http3/trace.go b/vendor/github.com/quic-go/quic-go/http3/trace.go new file mode 100644 index 00000000..76de6fb1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/trace.go @@ -0,0 +1,105 @@ +package http3 + +import ( + "crypto/tls" + "net" + "net/http/httptrace" + "net/textproto" + "time" + + "github.com/quic-go/quic-go" +) + +func traceGetConn(trace *httptrace.ClientTrace, hostPort string) { + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } +} + +// fakeConn is a wrapper for quic.EarlyConnection +// because the quic connection does not implement net.Conn. +type fakeConn struct { + conn quic.EarlyConnection +} + +func (c *fakeConn) Close() error { panic("connection operation prohibited") } +func (c *fakeConn) Read(p []byte) (int, error) { panic("connection operation prohibited") } +func (c *fakeConn) Write(p []byte) (int, error) { panic("connection operation prohibited") } +func (c *fakeConn) SetDeadline(t time.Time) error { panic("connection operation prohibited") } +func (c *fakeConn) SetReadDeadline(t time.Time) error { panic("connection operation prohibited") } +func (c *fakeConn) SetWriteDeadline(t time.Time) error { panic("connection operation prohibited") } +func (c *fakeConn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } +func (c *fakeConn) LocalAddr() net.Addr { return c.conn.LocalAddr() } + +func traceGotConn(trace *httptrace.ClientTrace, conn quic.EarlyConnection, reused bool) { + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: &fakeConn{conn: conn}, + Reused: reused, + }) + } +} + +func traceGotFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + +func traceGot1xxResponse(trace *httptrace.ClientTrace, code int, header textproto.MIMEHeader) { + if trace != nil && trace.Got1xxResponse != nil { + trace.Got1xxResponse(code, header) + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceConnectStart(trace *httptrace.ClientTrace, network, addr string) { + if trace != nil && trace.ConnectStart != nil { + trace.ConnectStart(network, addr) + } +} + +func traceConnectDone(trace *httptrace.ClientTrace, network, addr string, err error) { + if trace != nil && trace.ConnectDone != nil { + trace.ConnectDone(network, addr, err) + } +} + +func traceTLSHandshakeStart(trace *httptrace.ClientTrace) { + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } +} + +func traceTLSHandshakeDone(trace *httptrace.ClientTrace, state tls.ConnectionState, err error) { + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(state, err) + } +} diff --git a/vendor/github.com/quic-go/quic-go/http3/transport.go b/vendor/github.com/quic-go/quic-go/http3/transport.go index 8dcaef4d..b90f1190 100644 --- a/vendor/github.com/quic-go/quic-go/http3/transport.go +++ b/vendor/github.com/quic-go/quic-go/http3/transport.go @@ -9,6 +9,7 @@ import ( "log/slog" "net" "net/http" + "net/http/httptrace" "strings" "sync" "sync/atomic" @@ -36,17 +37,17 @@ type RoundTripOpt struct { OnlyCachedConn bool } -type singleRoundTripper interface { +type clientConn interface { OpenRequestStream(context.Context) (RequestStream, error) RoundTrip(*http.Request) (*http.Response, error) } type roundTripperWithCount struct { - cancel context.CancelFunc - dialing chan struct{} // closed as soon as quic.Dial(Early) returned - dialErr error - conn quic.EarlyConnection - rt singleRoundTripper + cancel context.CancelFunc + dialing chan struct{} // closed as soon as quic.Dial(Early) returned + dialErr error + conn quic.EarlyConnection + clientConn clientConn useCount atomic.Int64 } @@ -106,7 +107,7 @@ type Transport struct { initOnce sync.Once initErr error - newClient func(quic.EarlyConnection) singleRoundTripper + newClientConn func(quic.EarlyConnection) clientConn clients map[string]*roundTripperWithCount transport *quic.Transport @@ -124,8 +125,8 @@ type RoundTripper = Transport var ErrNoCachedConn = errors.New("http3: no cached connection was available") func (t *Transport) init() error { - if t.newClient == nil { - t.newClient = func(conn quic.EarlyConnection) singleRoundTripper { + if t.newClientConn == nil { + t.newClientConn = func(conn quic.EarlyConnection) clientConn { return newClientConn( conn, t.EnableDatagrams, @@ -160,27 +161,37 @@ func (t *Transport) init() error { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + rsp, err := t.roundTripOpt(req, opt) + if err != nil { + if req.Body != nil { + req.Body.Close() + } + return nil, err + } + return rsp, nil +} + +func (t *Transport) roundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { t.initOnce.Do(func() { t.initErr = t.init() }) if t.initErr != nil { return nil, t.initErr } if req.URL == nil { - closeRequestBody(req) return nil, errors.New("http3: nil Request.URL") } if req.URL.Scheme != "https" { - closeRequestBody(req) return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme) } if req.URL.Host == "" { - closeRequestBody(req) return nil, errors.New("http3: no Host in request URL") } if req.Header == nil { - closeRequestBody(req) return nil, errors.New("http3: nil Request.Header") } + if req.Method != "" && !validMethod(req.Method) { + return nil, fmt.Errorf("http3: invalid method %q", req.Method) + } for k, vv := range req.Header { if !httpguts.ValidHeaderFieldName(k) { return nil, fmt.Errorf("http3: invalid http header field name %q", k) @@ -192,12 +203,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } - if req.Method != "" && !validMethod(req.Method) { - closeRequestBody(req) - return nil, fmt.Errorf("http3: invalid method %q", req.Method) - } - + trace := httptrace.ContextClientTrace(req.Context()) hostname := authorityAddr(hostnameFromURL(req.URL)) + traceGetConn(trace, hostname) cl, isReused, err := t.getClient(req.Context(), hostname, opt.OnlyCachedConn) if err != nil { return nil, err @@ -213,23 +221,36 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.removeClient(hostname) return nil, cl.dialErr } + traceGotConn(trace, cl.conn, isReused) defer cl.useCount.Add(-1) - rsp, err := cl.rt.RoundTrip(req) + rsp, err := cl.clientConn.RoundTrip(req) if err != nil { - // non-nil errors on roundtrip are likely due to a problem with the connection - // so we remove the client from the cache so that subsequent trips reconnect - // context cancelation is excluded as is does not signify a connection error - if !errors.Is(err, context.Canceled) { - t.removeClient(hostname) + // request aborted due to context cancellation + select { + case <-req.Context().Done(): + return nil, err + default: } - if isReused { - if nerr, ok := err.(net.Error); ok && nerr.Timeout() { - return t.RoundTripOpt(req, opt) + // Retry the request on a new connection if: + // 1. it was sent on a reused connection, + // 2. this connection is now closed, + // 3. and the error is a timeout error. + select { + case <-cl.conn.Context().Done(): + t.removeClient(hostname) + if isReused { + var nerr net.Error + if errors.As(err, &nerr) && nerr.Timeout() { + return t.RoundTripOpt(req, opt) + } } + return nil, err + default: + return nil, err } } - return rsp, err + return rsp, nil } // RoundTrip does a round trip. @@ -264,7 +285,7 @@ func (t *Transport) getClient(ctx context.Context, hostname string, onlyCached b return } cl.conn = conn - cl.rt = rt + cl.clientConn = rt }() t.clients[hostname] = cl } @@ -285,7 +306,7 @@ func (t *Transport) getClient(ctx context.Context, hostname string, onlyCached b return cl, isReused, nil } -func (t *Transport) dial(ctx context.Context, hostname string) (quic.EarlyConnection, singleRoundTripper, error) { +func (t *Transport) dial(ctx context.Context, hostname string) (quic.EarlyConnection, clientConn, error) { var tlsConf *tls.Config if t.TLSClientConfig == nil { tlsConf = &tls.Config{} @@ -313,19 +334,48 @@ func (t *Transport) dial(ctx context.Context, hostname string) (quic.EarlyConnec t.transport = &quic.Transport{Conn: udpConn} } dial = func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) { - udpAddr, err := net.ResolveUDPAddr("udp", addr) + network := "udp" + udpAddr, err := t.resolveUDPAddr(ctx, network, addr) if err != nil { return nil, err } - return t.transport.DialEarly(ctx, udpAddr, tlsCfg, cfg) + trace := httptrace.ContextClientTrace(ctx) + traceConnectStart(trace, network, udpAddr.String()) + traceTLSHandshakeStart(trace) + conn, err := t.transport.DialEarly(ctx, udpAddr, tlsCfg, cfg) + var state tls.ConnectionState + if conn != nil { + state = conn.ConnectionState().TLS + } + traceTLSHandshakeDone(trace, state, err) + traceConnectDone(trace, network, udpAddr.String(), err) + return conn, err } } - conn, err := dial(ctx, hostname, tlsConf, t.QUICConfig) if err != nil { return nil, nil, err } - return conn, t.newClient(conn), nil + return conn, t.newClientConn(conn), nil +} + +func (t *Transport) resolveUDPAddr(ctx context.Context, network, addr string) (*net.UDPAddr, error) { + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := net.LookupPort(network, portStr) + if err != nil { + return nil, err + } + resolver := net.DefaultResolver + ipAddrs, err := resolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + addrs := addrList(ipAddrs) + ip := addrs.forResolve(network, addr) + return &net.UDPAddr{IP: ip.IP, Port: port, Zone: ip.Zone}, nil } func (t *Transport) removeClient(hostname string) { @@ -378,12 +428,6 @@ func (t *Transport) Close() error { return nil } -func closeRequestBody(req *http.Request) { - if req.Body != nil { - req.Body.Close() - } -} - func validMethod(method string) bool { /* Method = "OPTIONS" ; Section 9.2 diff --git a/vendor/github.com/quic-go/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go index 2071b596..7f3c40c2 100644 --- a/vendor/github.com/quic-go/quic-go/interface.go +++ b/vendor/github.com/quic-go/quic-go/interface.go @@ -98,7 +98,6 @@ type ReceiveStream interface { // SetReadDeadline sets the deadline for future Read calls and // any currently-blocked Read call. // A zero value for t means Read will not time out. - SetReadDeadline(t time.Time) error } @@ -357,10 +356,10 @@ type ClientHelloInfo struct { type ConnectionState struct { // TLS contains information about the TLS connection state, incl. the tls.ConnectionState. TLS tls.ConnectionState - // SupportsDatagrams says if support for QUIC datagrams (RFC 9221) was negotiated. - // This requires both nodes to support and enable the datagram extensions (via Config.EnableDatagrams). - // If datagram support was negotiated, datagrams can be sent and received using the - // SendDatagram and ReceiveDatagram methods on the Connection. + // SupportsDatagrams indicates whether the peer advertised support for QUIC datagrams (RFC 9221). + // When true, datagrams can be sent using the Connection's SendDatagram method. + // This is a unilateral declaration by the peer - receiving datagrams is only possible if + // datagram support was enabled locally via Config.EnableDatagrams. SupportsDatagrams bool // Used0RTT says if 0-RTT resumption was used. Used0RTT bool diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go index ba8cbbda..acf95426 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go @@ -14,10 +14,9 @@ type SentPacketHandler interface { // ReceivedAck processes an ACK frame. // It does not store a copy of the frame. ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* 1-RTT packet acked */, error) - ReceivedBytes(protocol.ByteCount) - DropPackets(protocol.EncryptionLevel) - ResetForRetry(rcvTime time.Time) error - SetHandshakeConfirmed() + ReceivedBytes(_ protocol.ByteCount, rcvTime time.Time) + DropPackets(_ protocol.EncryptionLevel, rcvTime time.Time) + ResetForRetry(rcvTime time.Time) // The SendMode determines if and what kind of packets can be sent. SendMode(now time.Time) SendMode @@ -34,12 +33,12 @@ type SentPacketHandler interface { PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber GetLossDetectionTimeout() time.Time - OnLossDetectionTimeout() error + OnLossDetectionTimeout(now time.Time) error } type sentPacketTracker interface { GetLowestPacketNotConfirmedAcked() protocol.PacketNumber - ReceivedPacket(protocol.EncryptionLevel) + ReceivedPacket(_ protocol.EncryptionLevel, rcvTime time.Time) } // ReceivedPacketHandler handles ACKs needed to send for incoming packets @@ -49,5 +48,5 @@ type ReceivedPacketHandler interface { DropPackets(protocol.EncryptionLevel) GetAlarmTimeout() time.Time - GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame + GetAckFrame(_ protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go index 1175c790..eda0826c 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go @@ -38,7 +38,7 @@ func (h *receivedPacketHandler) ReceivedPacket( rcvTime time.Time, ackEliciting bool, ) error { - h.sentPackets.ReceivedPacket(encLevel) + h.sentPackets.ReceivedPacket(encLevel, rcvTime) switch encLevel { case protocol.EncryptionInitial: return h.initialPackets.ReceivedPacket(pn, ecn, rcvTime, ackEliciting) @@ -87,7 +87,7 @@ func (h *receivedPacketHandler) GetAlarmTimeout() time.Time { return h.appDataPackets.GetAlarmTimeout() } -func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame { +func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame { //nolint:exhaustive // 0-RTT packets can't contain ACK frames. switch encLevel { case protocol.EncryptionInitial: @@ -101,7 +101,7 @@ func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel, o } return nil case protocol.Encryption1RTT: - return h.appDataPackets.GetAckFrame(onlyIfQueued) + return h.appDataPackets.GetAckFrame(now, onlyIfQueued) default: // 0-RTT packets can't contain ACK frames return nil diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go index 08af6f1e..d1d26f4a 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go @@ -196,8 +196,7 @@ func (h *appDataReceivedPacketTracker) shouldQueueACK(pn protocol.PacketNumber, return false } -func (h *appDataReceivedPacketTracker) GetAckFrame(onlyIfQueued bool) *wire.AckFrame { - now := time.Now() +func (h *appDataReceivedPacketTracker) GetAckFrame(now time.Time, onlyIfQueued bool) *wire.AckFrame { if onlyIfQueued && !h.ackQueued { if h.ackAlarm.IsZero() || h.ackAlarm.After(now) { return nil diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go index b84f0dcb..5276fe19 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go @@ -53,6 +53,12 @@ func newPacketNumberSpace(initialPN protocol.PacketNumber, isAppData bool) *pack } } +type alarmTimer struct { + Time time.Time + TimerType logging.TimerType + EncryptionLevel protocol.EncryptionLevel +} + type sentPacketHandler struct { initialPackets *packetNumberSpace handshakePackets *packetNumberSpace @@ -90,7 +96,7 @@ type sentPacketHandler struct { numProbesToSend int // The alarm timeout - alarm time.Time + alarm alarmTimer enableECN bool ecnTracker ecnHandler @@ -155,7 +161,7 @@ func (h *sentPacketHandler) removeFromBytesInFlight(p *packet) { } } -func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { +func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel, now time.Time) { // The server won't await address validation after the handshake is confirmed. // This applies even if we didn't receive an ACK for a Handshake packet. if h.perspective == protocol.PerspectiveClient && encLevel == protocol.EncryptionHandshake { @@ -179,6 +185,9 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { case protocol.EncryptionInitial: h.initialPackets = nil case protocol.EncryptionHandshake: + // Dropping the handshake packet number space means that the handshake is confirmed, + // see section 4.9.2 of RFC 9001. + h.handshakeConfirmed = true h.handshakePackets = nil case protocol.Encryption0RTT: // This function is only called when 0-RTT is rejected, @@ -202,21 +211,21 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel) { h.ptoCount = 0 h.numProbesToSend = 0 h.ptoMode = SendNone - h.setLossDetectionTimer() + h.setLossDetectionTimer(now) } -func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount) { +func (h *sentPacketHandler) ReceivedBytes(n protocol.ByteCount, t time.Time) { wasAmplificationLimit := h.isAmplificationLimited() h.bytesReceived += n if wasAmplificationLimit && !h.isAmplificationLimited() { - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } } -func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel) { +func (h *sentPacketHandler) ReceivedPacket(l protocol.EncryptionLevel, t time.Time) { if h.perspective == protocol.PerspectiveServer && l == protocol.EncryptionHandshake && !h.peerAddressValidated { h.peerAddressValidated = true - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } } @@ -269,7 +278,7 @@ func (h *sentPacketHandler) SentPacket( if !isAckEliciting { pnSpace.history.SentNonAckElicitingPacket(pn) if !h.peerCompletedAddressValidation { - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } return } @@ -289,7 +298,7 @@ func (h *sentPacketHandler) SentPacket( if h.tracer != nil && h.tracer.UpdatedMetrics != nil { h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) } - h.setLossDetectionTimer() + h.setLossDetectionTimer(t) } func (h *sentPacketHandler) getPacketNumberSpace(encLevel protocol.EncryptionLevel) *packetNumberSpace { @@ -322,7 +331,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En h.peerCompletedAddressValidation = true h.logger.Debugf("Peer doesn't await address validation any longer.") // Make sure that the timer is reset, even if this ACK doesn't acknowledge any (ack-eliciting) packets. - h.setLossDetectionTimer() + h.setLossDetectionTimer(rcvTime) } priorInFlight := h.bytesInFlight @@ -338,7 +347,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En if encLevel == protocol.Encryption1RTT { ackDelay = min(ack.DelayTime, h.rttStats.MaxAckDelay()) } - h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay, rcvTime) + h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackDelay) if h.logger.Debug() { h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) } @@ -387,7 +396,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En h.tracer.UpdatedMetrics(h.rttStats, h.congestion.GetCongestionWindow(), h.bytesInFlight, h.packetsInFlight()) } - h.setLossDetectionTimer() + h.setLossDetectionTimer(rcvTime) return acked1RTTPacket, nil } @@ -498,14 +507,14 @@ func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration } // same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime -func (h *sentPacketHandler) getPTOTimeAndSpace() (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) { +func (h *sentPacketHandler) getPTOTimeAndSpace(now time.Time) (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) { // We only send application data probe packets once the handshake is confirmed, // because before that, we don't have the keys to decrypt ACKs sent in 1-RTT packets. if !h.handshakeConfirmed && !h.hasOutstandingCryptoPackets() { if h.peerCompletedAddressValidation { return } - t := time.Now().Add(h.getScaledPTO(false)) + t := now.Add(h.getScaledPTO(false)) if h.initialPackets != nil { return t, protocol.EncryptionInitial, true } @@ -545,61 +554,53 @@ func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool { return false } -func (h *sentPacketHandler) hasOutstandingPackets() bool { - return h.appDataPackets.history.HasOutstandingPackets() || h.hasOutstandingCryptoPackets() +func (h *sentPacketHandler) setLossDetectionTimer(now time.Time) { + oldAlarm := h.alarm // only needed in case tracing is enabled + newAlarm := h.lossDetectionTime(now) + h.alarm = newAlarm + + if newAlarm.Time.IsZero() && !oldAlarm.Time.IsZero() { + h.logger.Debugf("Canceling loss detection timer.") + if h.tracer != nil && h.tracer.LossTimerCanceled != nil { + h.tracer.LossTimerCanceled() + } + } + + if h.tracer != nil && h.tracer.SetLossTimer != nil && newAlarm != oldAlarm { + h.tracer.SetLossTimer(newAlarm.TimerType, newAlarm.EncryptionLevel, newAlarm.Time) + } } -func (h *sentPacketHandler) setLossDetectionTimer() { - oldAlarm := h.alarm // only needed in case tracing is enabled +func (h *sentPacketHandler) lossDetectionTime(now time.Time) alarmTimer { + // cancel the alarm if no packets are outstanding + if h.peerCompletedAddressValidation && + !h.hasOutstandingCryptoPackets() && !h.appDataPackets.history.HasOutstandingPackets() { + return alarmTimer{} + } + + // cancel the alarm if amplification limited + if h.isAmplificationLimited() { + return alarmTimer{} + } + + // early retransmit timer or time loss detection lossTime, encLevel := h.getLossTimeAndSpace() if !lossTime.IsZero() { - // Early retransmit timer or time loss detection. - h.alarm = lossTime - if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm { - h.tracer.SetLossTimer(logging.TimerTypeACK, encLevel, h.alarm) + return alarmTimer{ + Time: lossTime, + TimerType: logging.TimerTypeACK, + EncryptionLevel: encLevel, } - return } - // Cancel the alarm if amplification limited. - if h.isAmplificationLimited() { - h.alarm = time.Time{} - if !oldAlarm.IsZero() { - h.logger.Debugf("Canceling loss detection timer. Amplification limited.") - if h.tracer != nil && h.tracer.LossTimerCanceled != nil { - h.tracer.LossTimerCanceled() - } - } - return - } - - // Cancel the alarm if no packets are outstanding - if !h.hasOutstandingPackets() && h.peerCompletedAddressValidation { - h.alarm = time.Time{} - if !oldAlarm.IsZero() { - h.logger.Debugf("Canceling loss detection timer. No packets in flight.") - if h.tracer != nil && h.tracer.LossTimerCanceled != nil { - h.tracer.LossTimerCanceled() - } - } - return - } - - // PTO alarm - ptoTime, encLevel, ok := h.getPTOTimeAndSpace() + ptoTime, encLevel, ok := h.getPTOTimeAndSpace(now) if !ok { - if !oldAlarm.IsZero() { - h.alarm = time.Time{} - h.logger.Debugf("Canceling loss detection timer. No PTO needed..") - if h.tracer != nil && h.tracer.LossTimerCanceled != nil { - h.tracer.LossTimerCanceled() - } - } - return + return alarmTimer{} } - h.alarm = ptoTime - if h.tracer != nil && h.tracer.SetLossTimer != nil && h.alarm != oldAlarm { - h.tracer.SetLossTimer(logging.TimerTypePTO, encLevel, h.alarm) + return alarmTimer{ + Time: ptoTime, + TimerType: logging.TimerTypePTO, + EncryptionLevel: encLevel, } } @@ -623,7 +624,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E } var packetLost bool - if p.SendTime.Before(lostSendTime) { + if !p.SendTime.After(lostSendTime) { packetLost = true if !p.skippedPacket { if h.logger.Debug() { @@ -669,8 +670,8 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E }) } -func (h *sentPacketHandler) OnLossDetectionTimeout() error { - defer h.setLossDetectionTimer() +func (h *sentPacketHandler) OnLossDetectionTimeout(now time.Time) error { + defer h.setLossDetectionTimer(now) earliestLossTime, encLevel := h.getLossTimeAndSpace() if !earliestLossTime.IsZero() { if h.logger.Debug() { @@ -680,13 +681,13 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error { h.tracer.LossTimerExpired(logging.TimerTypeACK, encLevel) } // Early retransmit or time loss detection - return h.detectLostPackets(time.Now(), encLevel) + return h.detectLostPackets(now, encLevel) } // PTO - // When all outstanding are acknowledged, the alarm is canceled in - // setLossDetectionTimer. This doesn't reset the timer in the session though. - // When OnAlarm is called, we therefore need to make sure that there are + // When all outstanding are acknowledged, the alarm is canceled in setLossDetectionTimer. + // However, there's no way to reset the timer in the connection. + // When OnLossDetectionTimeout is called, we therefore need to make sure that there are // actually packets outstanding. if h.bytesInFlight == 0 && !h.peerCompletedAddressValidation { h.ptoCount++ @@ -701,7 +702,7 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error { return nil } - _, encLevel, ok := h.getPTOTimeAndSpace() + _, encLevel, ok := h.getPTOTimeAndSpace(now) if !ok { return nil } @@ -739,7 +740,7 @@ func (h *sentPacketHandler) OnLossDetectionTimeout() error { } func (h *sentPacketHandler) GetLossDetectionTimeout() time.Time { - return h.alarm + return h.alarm.Time } func (h *sentPacketHandler) ECNMode(isShortHeaderPacket bool) protocol.ECN { @@ -864,7 +865,7 @@ func (h *sentPacketHandler) queueFramesForRetransmission(p *packet) { p.Frames = nil } -func (h *sentPacketHandler) ResetForRetry(now time.Time) error { +func (h *sentPacketHandler) ResetForRetry(now time.Time) { h.bytesInFlight = 0 var firstPacketSendTime time.Time h.initialPackets.history.Iterate(func(p *packet) (bool, error) { @@ -890,7 +891,7 @@ func (h *sentPacketHandler) ResetForRetry(now time.Time) error { // Otherwise, we don't know which Initial the Retry was sent in response to. if h.ptoCount == 0 { // Don't set the RTT to a value lower than 5ms here. - h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0, now) + h.rttStats.UpdateRTT(max(minRTTAfterRetry, now.Sub(firstPacketSendTime)), 0) if h.logger.Debug() { h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) } @@ -901,28 +902,14 @@ func (h *sentPacketHandler) ResetForRetry(now time.Time) error { h.initialPackets = newPacketNumberSpace(h.initialPackets.pns.Peek(), false) h.appDataPackets = newPacketNumberSpace(h.appDataPackets.pns.Peek(), true) oldAlarm := h.alarm - h.alarm = time.Time{} + h.alarm = alarmTimer{} if h.tracer != nil { if h.tracer.UpdatedPTOCount != nil { h.tracer.UpdatedPTOCount(0) } - if !oldAlarm.IsZero() && h.tracer.LossTimerCanceled != nil { + if !oldAlarm.Time.IsZero() && h.tracer.LossTimerCanceled != nil { h.tracer.LossTimerCanceled() } } h.ptoCount = 0 - return nil -} - -func (h *sentPacketHandler) SetHandshakeConfirmed() { - if h.initialPackets != nil { - panic("didn't drop initial correctly") - } - if h.handshakePackets != nil { - panic("didn't drop handshake correctly") - } - h.handshakeConfirmed = true - // We don't send PTOs for application data packets before the handshake completes. - // Make sure the timer is armed now, if necessary. - h.setLossDetectionTimer() } diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go index 3d88d577..950e5f72 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go @@ -36,7 +36,7 @@ type baseFlowController struct { // For every offset, it only returns true once. // If it is blocked, the offset is returned. func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) { - if c.sendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt { + if c.SendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt { return false, 0 } c.lastBlockedAt = c.sendWindow @@ -56,7 +56,7 @@ func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) (update return false } -func (c *baseFlowController) sendWindowSize() protocol.ByteCount { +func (c *baseFlowController) SendWindowSize() protocol.ByteCount { // this only happens during connection establishment, when data is sent before we receive the peer's transport parameters if c.bytesSent > c.sendWindow { return 0 @@ -66,11 +66,6 @@ func (c *baseFlowController) sendWindowSize() protocol.ByteCount { // needs to be called with locked mutex func (c *baseFlowController) addBytesRead(n protocol.ByteCount) { - // pretend we sent a WindowUpdate when reading the first byte - // this way auto-tuning of the window size already works for the first WindowUpdate - if c.bytesRead == 0 { - c.startNewAutoTuningEpoch(time.Now()) - } c.bytesRead += n } @@ -82,19 +77,19 @@ func (c *baseFlowController) hasWindowUpdate() bool { // getWindowUpdate updates the receive window, if necessary // it returns the new offset -func (c *baseFlowController) getWindowUpdate() protocol.ByteCount { +func (c *baseFlowController) getWindowUpdate(now time.Time) protocol.ByteCount { if !c.hasWindowUpdate() { return 0 } - c.maybeAdjustWindowSize() + c.maybeAdjustWindowSize(now) c.receiveWindow = c.bytesRead + c.receiveWindowSize return c.receiveWindow } // maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often. // For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing. -func (c *baseFlowController) maybeAdjustWindowSize() { +func (c *baseFlowController) maybeAdjustWindowSize(now time.Time) { bytesReadInEpoch := c.bytesRead - c.epochStartOffset // don't do anything if less than half the window has been consumed if bytesReadInEpoch <= c.receiveWindowSize/2 { @@ -106,7 +101,6 @@ func (c *baseFlowController) maybeAdjustWindowSize() { } fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize) - now := time.Now() if now.Sub(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) { // window is consumed too fast, try to increase the window size newSize := min(2*c.receiveWindowSize, c.maxReceiveWindowSize) diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go index 2efcad74..bbeb7889 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go @@ -24,7 +24,7 @@ func NewConnectionFlowController( allowWindowIncrease func(size protocol.ByteCount) bool, rttStats *utils.RTTStats, logger utils.Logger, -) ConnectionFlowController { +) *connectionFlowController { return &connectionFlowController{ baseFlowController: baseFlowController{ rttStats: rttStats, @@ -37,16 +37,17 @@ func NewConnectionFlowController( } } -func (c *connectionFlowController) SendWindowSize() protocol.ByteCount { - return c.baseFlowController.sendWindowSize() -} - // IncrementHighestReceived adds an increment to the highestReceived value -func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount) error { +func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount, now time.Time) error { c.mutex.Lock() defer c.mutex.Unlock() + // If this is the first frame received on this connection, start flow-control auto-tuning. + if c.highestReceived == 0 { + c.startNewAutoTuningEpoch(now) + } c.highestReceived += increment + if c.checkFlowControlViolation() { return &qerr.TransportError{ ErrorCode: qerr.FlowControlError, @@ -56,40 +57,47 @@ func (c *connectionFlowController) IncrementHighestReceived(increment protocol.B return nil } -func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) { +func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) (hasWindowUpdate bool) { c.mutex.Lock() + defer c.mutex.Unlock() + c.baseFlowController.addBytesRead(n) - c.mutex.Unlock() + return c.baseFlowController.hasWindowUpdate() } -func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount { +func (c *connectionFlowController) GetWindowUpdate(now time.Time) protocol.ByteCount { c.mutex.Lock() + defer c.mutex.Unlock() + oldWindowSize := c.receiveWindowSize - offset := c.baseFlowController.getWindowUpdate() + offset := c.baseFlowController.getWindowUpdate(now) if c.logger.Debug() && oldWindowSize < c.receiveWindowSize { c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10)) } - c.mutex.Unlock() return offset } // EnsureMinimumWindowSize sets a minimum window size // it should make sure that the connection-level window is increased when a stream-level window grows -func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) { +func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount, now time.Time) { c.mutex.Lock() - if inc > c.receiveWindowSize { - c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10)) - newSize := min(inc, c.maxReceiveWindowSize) - if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) { - c.receiveWindowSize = newSize - } - c.startNewAutoTuningEpoch(time.Now()) + defer c.mutex.Unlock() + + if inc <= c.receiveWindowSize { + return } - c.mutex.Unlock() + newSize := min(inc, c.maxReceiveWindowSize) + if delta := newSize - c.receiveWindowSize; delta > 0 && c.allowWindowIncrease(delta) { + c.receiveWindowSize = newSize + if c.logger.Debug() { + c.logger.Debugf("Increasing receive flow control window for the connection to %d, in response to stream flow control window increase", newSize) + } + } + c.startNewAutoTuningEpoch(now) } // Reset rests the flow controller. This happens when 0-RTT is rejected. -// All stream data is invalidated, it's if we had never opened a stream and never sent any data. +// All stream data is invalidated, it's as if we had never opened a stream and never sent any data. // At that point, we only have sent stream data, but we didn't have the keys to open 1-RTT keys yet. func (c *connectionFlowController) Reset() error { c.mutex.Lock() @@ -100,5 +108,6 @@ func (c *connectionFlowController) Reset() error { } c.bytesSent = 0 c.lastBlockedAt = 0 + c.sendWindow = 0 return nil } diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go index 57d12a95..23cf30c5 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go @@ -1,6 +1,10 @@ package flowcontrol -import "github.com/quic-go/quic-go/internal/protocol" +import ( + "time" + + "github.com/quic-go/quic-go/internal/protocol" +) type flowController interface { // for sending @@ -8,17 +12,17 @@ type flowController interface { UpdateSendWindow(protocol.ByteCount) (updated bool) AddBytesSent(protocol.ByteCount) // for receiving - GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary + GetWindowUpdate(time.Time) protocol.ByteCount // returns 0 if no update is necessary } // A StreamFlowController is a flow controller for a QUIC stream. type StreamFlowController interface { flowController - AddBytesRead(protocol.ByteCount) (shouldQueueWindowUpdate bool) + AddBytesRead(protocol.ByteCount) (hasStreamWindowUpdate, hasConnWindowUpdate bool) // UpdateHighestReceived is called when a new highest offset is received // final has to be to true if this is the final offset of the stream, // as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame - UpdateHighestReceived(offset protocol.ByteCount, final bool) error + UpdateHighestReceived(offset protocol.ByteCount, final bool, now time.Time) error // Abandon is called when reading from the stream is aborted early, // and there won't be any further calls to AddBytesRead. Abandon() @@ -28,7 +32,7 @@ type StreamFlowController interface { // The ConnectionFlowController is the flow controller for the connection. type ConnectionFlowController interface { flowController - AddBytesRead(protocol.ByteCount) + AddBytesRead(protocol.ByteCount) (hasWindowUpdate bool) Reset() error IsNewlyBlocked() (bool, protocol.ByteCount) } @@ -37,7 +41,7 @@ type connectionFlowControllerI interface { ConnectionFlowController // The following two methods are not supposed to be called from outside this packet, but are needed internally // for sending - EnsureMinimumWindowSize(protocol.ByteCount) + EnsureMinimumWindowSize(protocol.ByteCount, time.Time) // for receiving - IncrementHighestReceived(protocol.ByteCount) error + IncrementHighestReceived(protocol.ByteCount, time.Time) error } diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go index 2d58351c..ba005122 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go @@ -2,6 +2,7 @@ package flowcontrol import ( "fmt" + "time" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" @@ -45,7 +46,7 @@ func NewStreamFlowController( } // UpdateHighestReceived updates the highestReceived value, if the offset is higher. -func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool) error { +func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool, now time.Time) error { // If the final offset for this stream is already known, check for consistency. if c.receivedFinalOffset { // If we receive another final offset, check that it's the same. @@ -70,9 +71,8 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, if offset == c.highestReceived { return nil } - // A higher offset was received before. - // This can happen due to reordering. - if offset <= c.highestReceived { + // A higher offset was received before. This can happen due to reordering. + if offset < c.highestReceived { if final { return &qerr.TransportError{ ErrorCode: qerr.FinalSizeError, @@ -82,23 +82,28 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, return nil } + // If this is the first frame received for this stream, start flow-control auto-tuning. + if c.highestReceived == 0 { + c.startNewAutoTuningEpoch(now) + } increment := offset - c.highestReceived c.highestReceived = offset + if c.checkFlowControlViolation() { return &qerr.TransportError{ ErrorCode: qerr.FlowControlError, ErrorMessage: fmt.Sprintf("received %d bytes on stream %d, allowed %d bytes", offset, c.streamID, c.receiveWindow), } } - return c.connection.IncrementHighestReceived(increment) + return c.connection.IncrementHighestReceived(increment, now) } -func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (shouldQueueWindowUpdate bool) { +func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (hasStreamWindowUpdate, hasConnWindowUpdate bool) { c.mutex.Lock() c.baseFlowController.addBytesRead(n) - shouldQueueWindowUpdate = c.shouldQueueWindowUpdate() + hasStreamWindowUpdate = c.shouldQueueWindowUpdate() c.mutex.Unlock() - c.connection.AddBytesRead(n) + hasConnWindowUpdate = c.connection.AddBytesRead(n) return } @@ -118,7 +123,7 @@ func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) { } func (c *streamFlowController) SendWindowSize() protocol.ByteCount { - return min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize()) + return min(c.baseFlowController.SendWindowSize(), c.connection.SendWindowSize()) } func (c *streamFlowController) IsNewlyBlocked() bool { @@ -130,20 +135,20 @@ func (c *streamFlowController) shouldQueueWindowUpdate() bool { return !c.receivedFinalOffset && c.hasWindowUpdate() } -func (c *streamFlowController) GetWindowUpdate() protocol.ByteCount { +func (c *streamFlowController) GetWindowUpdate(now time.Time) protocol.ByteCount { // If we already received the final offset for this stream, the peer won't need any additional flow control credit. if c.receivedFinalOffset { return 0 } - // Don't use defer for unlocking the mutex here, GetWindowUpdate() is called frequently and defer shows up in the profiler c.mutex.Lock() + defer c.mutex.Unlock() + oldWindowSize := c.receiveWindowSize - offset := c.baseFlowController.getWindowUpdate() + offset := c.baseFlowController.getWindowUpdate(now) if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size - c.logger.Debugf("Increasing receive flow control window for stream %d to %d kB", c.streamID, c.receiveWindowSize/(1<<10)) - c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize) * protocol.ConnectionFlowControlMultiplier)) + c.logger.Debugf("Increasing receive flow control window for stream %d to %d", c.streamID, c.receiveWindowSize) + c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize)*protocol.ConnectionFlowControlMultiplier), now) } - c.mutex.Unlock() return offset } diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go index 30643cdf..27a09e22 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go @@ -10,16 +10,13 @@ import ( "github.com/quic-go/quic-go/internal/protocol" ) +// Instead of using an init function, the AEADs are created lazily. +// For more details see https://github.com/quic-go/quic-go/issues/4894. var ( retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000) retryAEADv2 cipher.AEAD // used for QUIC v2 (RFC 9369) ) -func init() { - retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) - retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) -} - func initAEAD(key [16]byte) cipher.AEAD { aes, err := aes.NewCipher(key[:]) if err != nil { @@ -52,8 +49,14 @@ func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, ve var tag [16]byte var sealed []byte if version == protocol.Version2 { + if retryAEADv2 == nil { + retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) + } sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes()) } else { + if retryAEADv1 == nil { + retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) + } sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes()) } if len(sealed) != 16 { diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go index 7c4d8d4d..f0aa3ad9 100644 --- a/vendor/github.com/quic-go/quic-go/internal/protocol/params.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go @@ -102,10 +102,6 @@ const DefaultIdleTimeout = 30 * time.Second // DefaultHandshakeIdleTimeout is the default idle timeout used before handshake completion. const DefaultHandshakeIdleTimeout = 5 * time.Second -// MaxKeepAliveInterval is the maximum time until we send a packet to keep a connection alive. -// It should be shorter than the time that NATs clear their mapping. -const MaxKeepAliveInterval = 20 * time.Second - // RetiredConnectionIDDeleteTimeout is the time we keep closed connections around in order to retransmit the CONNECTION_CLOSE. // after this time all information about the old connection will be deleted const RetiredConnectionIDDeleteTimeout = 5 * time.Second diff --git a/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go index 8f5936df..7fe1c293 100644 --- a/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go +++ b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go @@ -48,21 +48,16 @@ func (e *TransportError) Error() string { return str + ": " + msg } -func (e *TransportError) Is(target error) bool { - return target == net.ErrClosed -} +func (e *TransportError) Unwrap() []error { return []error{net.ErrClosed, e.error} } -func (e *TransportError) Unwrap() error { - return e.error +func (e *TransportError) Is(target error) bool { + t, ok := target.(*TransportError) + return ok && e.ErrorCode == t.ErrorCode && e.FrameType == t.FrameType && e.Remote == t.Remote } // An ApplicationErrorCode is an application-defined error code. type ApplicationErrorCode uint64 -func (e *ApplicationError) Is(target error) bool { - return target == net.ErrClosed -} - // A StreamErrorCode is an error code used to cancel streams. type StreamErrorCode uint64 @@ -81,23 +76,30 @@ func (e *ApplicationError) Error() string { return fmt.Sprintf("Application error %#x (%s): %s", e.ErrorCode, getRole(e.Remote), e.ErrorMessage) } +func (e *ApplicationError) Unwrap() error { return net.ErrClosed } + +func (e *ApplicationError) Is(target error) bool { + t, ok := target.(*ApplicationError) + return ok && e.ErrorCode == t.ErrorCode && e.Remote == t.Remote +} + type IdleTimeoutError struct{} var _ error = &IdleTimeoutError{} -func (e *IdleTimeoutError) Timeout() bool { return true } -func (e *IdleTimeoutError) Temporary() bool { return false } -func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" } -func (e *IdleTimeoutError) Is(target error) bool { return target == net.ErrClosed } +func (e *IdleTimeoutError) Timeout() bool { return true } +func (e *IdleTimeoutError) Temporary() bool { return false } +func (e *IdleTimeoutError) Error() string { return "timeout: no recent network activity" } +func (e *IdleTimeoutError) Unwrap() error { return net.ErrClosed } type HandshakeTimeoutError struct{} var _ error = &HandshakeTimeoutError{} -func (e *HandshakeTimeoutError) Timeout() bool { return true } -func (e *HandshakeTimeoutError) Temporary() bool { return false } -func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" } -func (e *HandshakeTimeoutError) Is(target error) bool { return target == net.ErrClosed } +func (e *HandshakeTimeoutError) Timeout() bool { return true } +func (e *HandshakeTimeoutError) Temporary() bool { return false } +func (e *HandshakeTimeoutError) Error() string { return "timeout: handshake did not complete in time" } +func (e *HandshakeTimeoutError) Unwrap() error { return net.ErrClosed } // A VersionNegotiationError occurs when the client and the server can't agree on a QUIC version. type VersionNegotiationError struct { @@ -109,25 +111,18 @@ func (e *VersionNegotiationError) Error() string { return fmt.Sprintf("no compatible QUIC version found (we support %s, server offered %s)", e.Ours, e.Theirs) } -func (e *VersionNegotiationError) Is(target error) bool { - return target == net.ErrClosed -} +func (e *VersionNegotiationError) Unwrap() error { return net.ErrClosed } // A StatelessResetError occurs when we receive a stateless reset. -type StatelessResetError struct { - Token protocol.StatelessResetToken -} +type StatelessResetError struct{} var _ net.Error = &StatelessResetError{} func (e *StatelessResetError) Error() string { - return fmt.Sprintf("received a stateless reset with token %x", e.Token) -} - -func (e *StatelessResetError) Is(target error) bool { - return target == net.ErrClosed + return "received a stateless reset" } +func (e *StatelessResetError) Unwrap() error { return net.ErrClosed } func (e *StatelessResetError) Timeout() bool { return false } func (e *StatelessResetError) Temporary() bool { return true } diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go index dcfac67d..92fec2e2 100644 --- a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go @@ -58,7 +58,7 @@ func (r *RTTStats) PTO(includeMaxAckDelay bool) time.Duration { } // UpdateRTT updates the RTT based on a new sample. -func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) { +func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration) { if sendDelta <= 0 { return } diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/frame.go new file mode 100644 index 00000000..10d4eebc --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/frame.go @@ -0,0 +1,21 @@ +package wire + +import ( + "github.com/quic-go/quic-go/internal/protocol" +) + +// A Frame in QUIC +type Frame interface { + Append(b []byte, version protocol.Version) ([]byte, error) + Length(version protocol.Version) protocol.ByteCount +} + +// IsProbingFrame returns true if the frame is a probing frame. +// See section 9.1 of RFC 9000. +func IsProbingFrame(f Frame) bool { + switch f.(type) { + case *PathChallengeFrame, *PathResponseFrame, *NewConnectionIDFrame: + return true + } + return false +} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/interface.go b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go deleted file mode 100644 index bc17883b..00000000 --- a/vendor/github.com/quic-go/quic-go/internal/wire/interface.go +++ /dev/null @@ -1,11 +0,0 @@ -package wire - -import ( - "github.com/quic-go/quic-go/internal/protocol" -) - -// A Frame in QUIC -type Frame interface { - Append(b []byte, version protocol.Version) ([]byte, error) - Length(version protocol.Version) protocol.ByteCount -} diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go index f9470ecd..cdc32722 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go @@ -58,7 +58,10 @@ func parseStreamFrame(b []byte, typ uint64, _ protocol.Version) (*StreamFrame, i var frame *StreamFrame if dataLen < protocol.MinStreamFrameBufferSize { - frame = &StreamFrame{Data: make([]byte, dataLen)} + frame = &StreamFrame{} + if dataLen > 0 { + frame.Data = make([]byte, dataLen) + } } else { frame = GetStreamFrame() // The STREAM frame can't be larger than the StreamFrame we obtained from the buffer, @@ -74,7 +77,7 @@ func parseStreamFrame(b []byte, typ uint64, _ protocol.Version) (*StreamFrame, i frame.Fin = fin frame.DataLenPresent = hasDataLen - if dataLen != 0 { + if dataLen > 0 { copy(frame.Data, b) } if frame.Offset+frame.DataLen() > protocol.MaxByteCount { diff --git a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go index 96bf4617..f218e046 100644 --- a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go @@ -5,34 +5,36 @@ import ( "time" ) +//go:generate go run generate_multiplexer.go ConnectionTracer connection_tracer.go multiplexer.tmpl connection_tracer_multiplexer.go + // A ConnectionTracer records events. type ConnectionTracer struct { StartedConnection func(local, remote net.Addr, srcConnID, destConnID ConnectionID) NegotiatedVersion func(chosen Version, clientVersions, serverVersions []Version) - ClosedConnection func(error) - SentTransportParameters func(*TransportParameters) - ReceivedTransportParameters func(*TransportParameters) + ClosedConnection func(err error) + SentTransportParameters func(parameters *TransportParameters) + ReceivedTransportParameters func(parameters *TransportParameters) RestoredTransportParameters func(parameters *TransportParameters) // for 0-RTT - SentLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, *AckFrame, []Frame) - SentShortHeaderPacket func(*ShortHeader, ByteCount, ECN, *AckFrame, []Frame) - ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, _ []Version) - ReceivedRetry func(*Header) - ReceivedLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, []Frame) - ReceivedShortHeaderPacket func(*ShortHeader, ByteCount, ECN, []Frame) - BufferedPacket func(PacketType, ByteCount) - DroppedPacket func(PacketType, PacketNumber, ByteCount, PacketDropReason) + SentLongHeaderPacket func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) + SentShortHeaderPacket func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) + ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, versions []Version) + ReceivedRetry func(hdr *Header) + ReceivedLongHeaderPacket func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) + ReceivedShortHeaderPacket func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) + BufferedPacket func(packetType PacketType, size ByteCount) + DroppedPacket func(packetType PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) UpdatedMetrics func(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) - AcknowledgedPacket func(EncryptionLevel, PacketNumber) - LostPacket func(EncryptionLevel, PacketNumber, PacketLossReason) + AcknowledgedPacket func(encLevel EncryptionLevel, pn PacketNumber) + LostPacket func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) UpdatedMTU func(mtu ByteCount, done bool) - UpdatedCongestionState func(CongestionState) + UpdatedCongestionState func(state CongestionState) UpdatedPTOCount func(value uint32) - UpdatedKeyFromTLS func(EncryptionLevel, Perspective) + UpdatedKeyFromTLS func(encLevel EncryptionLevel, p Perspective) UpdatedKey func(keyPhase KeyPhase, remote bool) - DroppedEncryptionLevel func(EncryptionLevel) + DroppedEncryptionLevel func(encLevel EncryptionLevel) DroppedKey func(keyPhase KeyPhase) - SetLossTimer func(TimerType, EncryptionLevel, time.Time) - LossTimerExpired func(TimerType, EncryptionLevel) + SetLossTimer func(timerType TimerType, encLevel EncryptionLevel, time time.Time) + LossTimerExpired func(timerType TimerType, encLevel EncryptionLevel) LossTimerCanceled func() ECNStateUpdated func(state ECNState, trigger ECNStateTrigger) ChoseALPN func(protocol string) @@ -40,232 +42,3 @@ type ConnectionTracer struct { Close func() Debug func(name, msg string) } - -// NewMultiplexedConnectionTracer creates a new connection tracer that multiplexes events to multiple tracers. -func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTracer { - if len(tracers) == 0 { - return nil - } - if len(tracers) == 1 { - return tracers[0] - } - return &ConnectionTracer{ - StartedConnection: func(local, remote net.Addr, srcConnID, destConnID ConnectionID) { - for _, t := range tracers { - if t.StartedConnection != nil { - t.StartedConnection(local, remote, srcConnID, destConnID) - } - } - }, - NegotiatedVersion: func(chosen Version, clientVersions, serverVersions []Version) { - for _, t := range tracers { - if t.NegotiatedVersion != nil { - t.NegotiatedVersion(chosen, clientVersions, serverVersions) - } - } - }, - ClosedConnection: func(e error) { - for _, t := range tracers { - if t.ClosedConnection != nil { - t.ClosedConnection(e) - } - } - }, - SentTransportParameters: func(tp *TransportParameters) { - for _, t := range tracers { - if t.SentTransportParameters != nil { - t.SentTransportParameters(tp) - } - } - }, - ReceivedTransportParameters: func(tp *TransportParameters) { - for _, t := range tracers { - if t.ReceivedTransportParameters != nil { - t.ReceivedTransportParameters(tp) - } - } - }, - RestoredTransportParameters: func(tp *TransportParameters) { - for _, t := range tracers { - if t.RestoredTransportParameters != nil { - t.RestoredTransportParameters(tp) - } - } - }, - SentLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { - for _, t := range tracers { - if t.SentLongHeaderPacket != nil { - t.SentLongHeaderPacket(hdr, size, ecn, ack, frames) - } - } - }, - SentShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { - for _, t := range tracers { - if t.SentShortHeaderPacket != nil { - t.SentShortHeaderPacket(hdr, size, ecn, ack, frames) - } - } - }, - ReceivedVersionNegotiationPacket: func(dest, src ArbitraryLenConnectionID, versions []Version) { - for _, t := range tracers { - if t.ReceivedVersionNegotiationPacket != nil { - t.ReceivedVersionNegotiationPacket(dest, src, versions) - } - } - }, - ReceivedRetry: func(hdr *Header) { - for _, t := range tracers { - if t.ReceivedRetry != nil { - t.ReceivedRetry(hdr) - } - } - }, - ReceivedLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) { - for _, t := range tracers { - if t.ReceivedLongHeaderPacket != nil { - t.ReceivedLongHeaderPacket(hdr, size, ecn, frames) - } - } - }, - ReceivedShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) { - for _, t := range tracers { - if t.ReceivedShortHeaderPacket != nil { - t.ReceivedShortHeaderPacket(hdr, size, ecn, frames) - } - } - }, - BufferedPacket: func(typ PacketType, size ByteCount) { - for _, t := range tracers { - if t.BufferedPacket != nil { - t.BufferedPacket(typ, size) - } - } - }, - DroppedPacket: func(typ PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) { - for _, t := range tracers { - if t.DroppedPacket != nil { - t.DroppedPacket(typ, pn, size, reason) - } - } - }, - UpdatedMetrics: func(rttStats *RTTStats, cwnd, bytesInFlight ByteCount, packetsInFlight int) { - for _, t := range tracers { - if t.UpdatedMetrics != nil { - t.UpdatedMetrics(rttStats, cwnd, bytesInFlight, packetsInFlight) - } - } - }, - AcknowledgedPacket: func(encLevel EncryptionLevel, pn PacketNumber) { - for _, t := range tracers { - if t.AcknowledgedPacket != nil { - t.AcknowledgedPacket(encLevel, pn) - } - } - }, - LostPacket: func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) { - for _, t := range tracers { - if t.LostPacket != nil { - t.LostPacket(encLevel, pn, reason) - } - } - }, - UpdatedMTU: func(mtu ByteCount, done bool) { - for _, t := range tracers { - if t.UpdatedMTU != nil { - t.UpdatedMTU(mtu, done) - } - } - }, - UpdatedCongestionState: func(state CongestionState) { - for _, t := range tracers { - if t.UpdatedCongestionState != nil { - t.UpdatedCongestionState(state) - } - } - }, - UpdatedPTOCount: func(value uint32) { - for _, t := range tracers { - if t.UpdatedPTOCount != nil { - t.UpdatedPTOCount(value) - } - } - }, - UpdatedKeyFromTLS: func(encLevel EncryptionLevel, perspective Perspective) { - for _, t := range tracers { - if t.UpdatedKeyFromTLS != nil { - t.UpdatedKeyFromTLS(encLevel, perspective) - } - } - }, - UpdatedKey: func(generation KeyPhase, remote bool) { - for _, t := range tracers { - if t.UpdatedKey != nil { - t.UpdatedKey(generation, remote) - } - } - }, - DroppedEncryptionLevel: func(encLevel EncryptionLevel) { - for _, t := range tracers { - if t.DroppedEncryptionLevel != nil { - t.DroppedEncryptionLevel(encLevel) - } - } - }, - DroppedKey: func(generation KeyPhase) { - for _, t := range tracers { - if t.DroppedKey != nil { - t.DroppedKey(generation) - } - } - }, - SetLossTimer: func(typ TimerType, encLevel EncryptionLevel, exp time.Time) { - for _, t := range tracers { - if t.SetLossTimer != nil { - t.SetLossTimer(typ, encLevel, exp) - } - } - }, - LossTimerExpired: func(typ TimerType, encLevel EncryptionLevel) { - for _, t := range tracers { - if t.LossTimerExpired != nil { - t.LossTimerExpired(typ, encLevel) - } - } - }, - LossTimerCanceled: func() { - for _, t := range tracers { - if t.LossTimerCanceled != nil { - t.LossTimerCanceled() - } - } - }, - ECNStateUpdated: func(state ECNState, trigger ECNStateTrigger) { - for _, t := range tracers { - if t.ECNStateUpdated != nil { - t.ECNStateUpdated(state, trigger) - } - } - }, - ChoseALPN: func(protocol string) { - for _, t := range tracers { - if t.ChoseALPN != nil { - t.ChoseALPN(protocol) - } - } - }, - Close: func() { - for _, t := range tracers { - if t.Close != nil { - t.Close() - } - } - }, - Debug: func(name, msg string) { - for _, t := range tracers { - if t.Debug != nil { - t.Debug(name, msg) - } - } - }, - } -} diff --git a/vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go b/vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go new file mode 100644 index 00000000..3a87058c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/connection_tracer_multiplexer.go @@ -0,0 +1,236 @@ +// Code generated by generate_multiplexer.go; DO NOT EDIT. + +package logging + +import ( + "net" + "time" +) + +func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTracer { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &ConnectionTracer{ + StartedConnection: func(local net.Addr, remote net.Addr, srcConnID ConnectionID, destConnID ConnectionID) { + for _, t := range tracers { + if t.StartedConnection != nil { + t.StartedConnection(local, remote, srcConnID, destConnID) + } + } + }, + NegotiatedVersion: func(chosen Version, clientVersions []Version, serverVersions []Version) { + for _, t := range tracers { + if t.NegotiatedVersion != nil { + t.NegotiatedVersion(chosen, clientVersions, serverVersions) + } + } + }, + ClosedConnection: func(err error) { + for _, t := range tracers { + if t.ClosedConnection != nil { + t.ClosedConnection(err) + } + } + }, + SentTransportParameters: func(parameters *TransportParameters) { + for _, t := range tracers { + if t.SentTransportParameters != nil { + t.SentTransportParameters(parameters) + } + } + }, + ReceivedTransportParameters: func(parameters *TransportParameters) { + for _, t := range tracers { + if t.ReceivedTransportParameters != nil { + t.ReceivedTransportParameters(parameters) + } + } + }, + RestoredTransportParameters: func(parameters *TransportParameters) { + for _, t := range tracers { + if t.RestoredTransportParameters != nil { + t.RestoredTransportParameters(parameters) + } + } + }, + SentLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { + for _, t := range tracers { + if t.SentLongHeaderPacket != nil { + t.SentLongHeaderPacket(hdr, size, ecn, ack, frames) + } + } + }, + SentShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, ack *AckFrame, frames []Frame) { + for _, t := range tracers { + if t.SentShortHeaderPacket != nil { + t.SentShortHeaderPacket(hdr, size, ecn, ack, frames) + } + } + }, + ReceivedVersionNegotiationPacket: func(dest ArbitraryLenConnectionID, src ArbitraryLenConnectionID, versions []Version) { + for _, t := range tracers { + if t.ReceivedVersionNegotiationPacket != nil { + t.ReceivedVersionNegotiationPacket(dest, src, versions) + } + } + }, + ReceivedRetry: func(hdr *Header) { + for _, t := range tracers { + if t.ReceivedRetry != nil { + t.ReceivedRetry(hdr) + } + } + }, + ReceivedLongHeaderPacket: func(hdr *ExtendedHeader, size ByteCount, ecn ECN, frames []Frame) { + for _, t := range tracers { + if t.ReceivedLongHeaderPacket != nil { + t.ReceivedLongHeaderPacket(hdr, size, ecn, frames) + } + } + }, + ReceivedShortHeaderPacket: func(hdr *ShortHeader, size ByteCount, ecn ECN, frames []Frame) { + for _, t := range tracers { + if t.ReceivedShortHeaderPacket != nil { + t.ReceivedShortHeaderPacket(hdr, size, ecn, frames) + } + } + }, + BufferedPacket: func(packetType PacketType, size ByteCount) { + for _, t := range tracers { + if t.BufferedPacket != nil { + t.BufferedPacket(packetType, size) + } + } + }, + DroppedPacket: func(packetType PacketType, pn PacketNumber, size ByteCount, reason PacketDropReason) { + for _, t := range tracers { + if t.DroppedPacket != nil { + t.DroppedPacket(packetType, pn, size, reason) + } + } + }, + UpdatedMetrics: func(rttStats *RTTStats, cwnd ByteCount, bytesInFlight ByteCount, packetsInFlight int) { + for _, t := range tracers { + if t.UpdatedMetrics != nil { + t.UpdatedMetrics(rttStats, cwnd, bytesInFlight, packetsInFlight) + } + } + }, + AcknowledgedPacket: func(encLevel EncryptionLevel, pn PacketNumber) { + for _, t := range tracers { + if t.AcknowledgedPacket != nil { + t.AcknowledgedPacket(encLevel, pn) + } + } + }, + LostPacket: func(encLevel EncryptionLevel, pn PacketNumber, reason PacketLossReason) { + for _, t := range tracers { + if t.LostPacket != nil { + t.LostPacket(encLevel, pn, reason) + } + } + }, + UpdatedMTU: func(mtu ByteCount, done bool) { + for _, t := range tracers { + if t.UpdatedMTU != nil { + t.UpdatedMTU(mtu, done) + } + } + }, + UpdatedCongestionState: func(state CongestionState) { + for _, t := range tracers { + if t.UpdatedCongestionState != nil { + t.UpdatedCongestionState(state) + } + } + }, + UpdatedPTOCount: func(value uint32) { + for _, t := range tracers { + if t.UpdatedPTOCount != nil { + t.UpdatedPTOCount(value) + } + } + }, + UpdatedKeyFromTLS: func(encLevel EncryptionLevel, p Perspective) { + for _, t := range tracers { + if t.UpdatedKeyFromTLS != nil { + t.UpdatedKeyFromTLS(encLevel, p) + } + } + }, + UpdatedKey: func(keyPhase KeyPhase, remote bool) { + for _, t := range tracers { + if t.UpdatedKey != nil { + t.UpdatedKey(keyPhase, remote) + } + } + }, + DroppedEncryptionLevel: func(encLevel EncryptionLevel) { + for _, t := range tracers { + if t.DroppedEncryptionLevel != nil { + t.DroppedEncryptionLevel(encLevel) + } + } + }, + DroppedKey: func(keyPhase KeyPhase) { + for _, t := range tracers { + if t.DroppedKey != nil { + t.DroppedKey(keyPhase) + } + } + }, + SetLossTimer: func(timerType TimerType, encLevel EncryptionLevel, time time.Time) { + for _, t := range tracers { + if t.SetLossTimer != nil { + t.SetLossTimer(timerType, encLevel, time) + } + } + }, + LossTimerExpired: func(timerType TimerType, encLevel EncryptionLevel) { + for _, t := range tracers { + if t.LossTimerExpired != nil { + t.LossTimerExpired(timerType, encLevel) + } + } + }, + LossTimerCanceled: func() { + for _, t := range tracers { + if t.LossTimerCanceled != nil { + t.LossTimerCanceled() + } + } + }, + ECNStateUpdated: func(state ECNState, trigger ECNStateTrigger) { + for _, t := range tracers { + if t.ECNStateUpdated != nil { + t.ECNStateUpdated(state, trigger) + } + } + }, + ChoseALPN: func(protocol string) { + for _, t := range tracers { + if t.ChoseALPN != nil { + t.ChoseALPN(protocol) + } + } + }, + Close: func() { + for _, t := range tracers { + if t.Close != nil { + t.Close() + } + } + }, + Debug: func(name string, msg string) { + for _, t := range tracers { + if t.Debug != nil { + t.Debug(name, msg) + } + } + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go b/vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go new file mode 100644 index 00000000..c152b846 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/generate_multiplexer.go @@ -0,0 +1,161 @@ +//go:build generate + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "strings" + "text/template" + + "golang.org/x/tools/imports" +) + +func main() { + if len(os.Args) != 5 { + log.Fatalf("Usage: %s ", os.Args[0]) + } + + structName := os.Args[1] + inputFile := os.Args[2] + templateFile := os.Args[3] + outputFile := os.Args[4] + + fset := token.NewFileSet() + + // Parse the input file containing the struct type + file, err := parser.ParseFile(fset, inputFile, nil, parser.AllErrors) + if err != nil { + log.Fatalf("Failed to parse file: %v", err) + } + + var fields []*ast.Field + + // Find the specified struct type in the AST + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok || typeSpec.Name.Name != structName { + continue + } + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + log.Fatalf("%s is not a struct", structName) + } + fields = structType.Fields.List + break + } + } + + if fields == nil { + log.Fatalf("Could not find %s type", structName) + } + + // Prepare data for the template + type FieldData struct { + Name string + Params string + Args string + HasParams bool + ReturnTypes string + HasReturn bool + } + + var fieldDataList []FieldData + + for _, field := range fields { + funcType, ok := field.Type.(*ast.FuncType) + if !ok { + continue + } + for _, name := range field.Names { + fieldData := FieldData{Name: name.Name} + + // extract parameters + var params []string + var args []string + if funcType.Params != nil { + for i, param := range funcType.Params.List { + // We intentionally reject unnamed (and, further down, "_") function parameters. + // We could auto-generate parameter names, + // but having meaningful variable names will be more helpful for the user. + if len(param.Names) == 0 { + log.Fatalf("encountered unnamed parameter at position %d in function %s", i, fieldData.Name) + } + var buf bytes.Buffer + printer.Fprint(&buf, fset, param.Type) + paramType := buf.String() + for _, paramName := range param.Names { + if paramName.Name == "_" { + log.Fatalf("encountered underscore parameter at position %d in function %s", i, fieldData.Name) + } + params = append(params, fmt.Sprintf("%s %s", paramName.Name, paramType)) + args = append(args, paramName.Name) + } + } + } + fieldData.Params = strings.Join(params, ", ") + fieldData.Args = strings.Join(args, ", ") + fieldData.HasParams = len(params) > 0 + + // extract return types + if funcType.Results != nil && len(funcType.Results.List) > 0 { + fieldData.HasReturn = true + var returns []string + for _, result := range funcType.Results.List { + var buf bytes.Buffer + printer.Fprint(&buf, fset, result.Type) + returns = append(returns, buf.String()) + } + if len(returns) == 1 { + fieldData.ReturnTypes = fmt.Sprintf(" %s", returns[0]) + } else { + fieldData.ReturnTypes = fmt.Sprintf(" (%s)", strings.Join(returns, ", ")) + } + } + + fieldDataList = append(fieldDataList, fieldData) + } + } + + // Read the template from file + templateContent, err := os.ReadFile(templateFile) + if err != nil { + log.Fatalf("Failed to read template file: %v", err) + } + + // Generate the code using the template + tmpl, err := template.New("multiplexer").Funcs(template.FuncMap{"join": strings.Join}).Parse(string(templateContent)) + if err != nil { + log.Fatalf("Failed to parse template: %v", err) + } + + var generatedCode bytes.Buffer + generatedCode.WriteString("// Code generated by generate_multiplexer.go; DO NOT EDIT.\n\n") + if err = tmpl.Execute(&generatedCode, map[string]interface{}{ + "Fields": fieldDataList, + "StructName": structName, + }); err != nil { + log.Fatalf("Failed to execute template: %v", err) + } + + // Format the generated code and add imports + formattedCode, err := imports.Process(outputFile, generatedCode.Bytes(), nil) + if err != nil { + log.Fatalf("Failed to process imports: %v", err) + } + + if err := os.WriteFile(outputFile, formattedCode, 0o644); err != nil { + log.Fatalf("Failed to write output file: %v", err) + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl b/vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl new file mode 100644 index 00000000..9ba52e0f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/multiplexer.tmpl @@ -0,0 +1,21 @@ +package logging + +func NewMultiplexed{{ .StructName }} (tracers ...*{{ .StructName }}) *{{ .StructName }} { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &{{ .StructName }}{ + {{- range .Fields }} + {{ .Name }}: func({{ .Params }}){{ .ReturnTypes }} { + for _, t := range tracers { + if t.{{ .Name }} != nil { + t.{{ .Name }}({{ .Args }}) + } + } + }, + {{- end }} + } +} diff --git a/vendor/github.com/quic-go/quic-go/logging/tracer.go b/vendor/github.com/quic-go/quic-go/logging/tracer.go index 625a809e..4fe01462 100644 --- a/vendor/github.com/quic-go/quic-go/logging/tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/tracer.go @@ -2,58 +2,13 @@ package logging import "net" +//go:generate go run generate_multiplexer.go Tracer tracer.go multiplexer.tmpl tracer_multiplexer.go + // A Tracer traces events. type Tracer struct { - SentPacket func(net.Addr, *Header, ByteCount, []Frame) - SentVersionNegotiationPacket func(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []Version) - DroppedPacket func(net.Addr, PacketType, ByteCount, PacketDropReason) + SentPacket func(dest net.Addr, hdr *Header, size ByteCount, frames []Frame) + SentVersionNegotiationPacket func(dest net.Addr, destConnID, srcConnID ArbitraryLenConnectionID, versions []Version) + DroppedPacket func(addr net.Addr, packetType PacketType, size ByteCount, reason PacketDropReason) Debug func(name, msg string) Close func() } - -// NewMultiplexedTracer creates a new tracer that multiplexes events to multiple tracers. -func NewMultiplexedTracer(tracers ...*Tracer) *Tracer { - if len(tracers) == 0 { - return nil - } - if len(tracers) == 1 { - return tracers[0] - } - return &Tracer{ - SentPacket: func(remote net.Addr, hdr *Header, size ByteCount, frames []Frame) { - for _, t := range tracers { - if t.SentPacket != nil { - t.SentPacket(remote, hdr, size, frames) - } - } - }, - SentVersionNegotiationPacket: func(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []Version) { - for _, t := range tracers { - if t.SentVersionNegotiationPacket != nil { - t.SentVersionNegotiationPacket(remote, dest, src, versions) - } - } - }, - DroppedPacket: func(remote net.Addr, typ PacketType, size ByteCount, reason PacketDropReason) { - for _, t := range tracers { - if t.DroppedPacket != nil { - t.DroppedPacket(remote, typ, size, reason) - } - } - }, - Debug: func(name, msg string) { - for _, t := range tracers { - if t.Debug != nil { - t.Debug(name, msg) - } - } - }, - Close: func() { - for _, t := range tracers { - if t.Close != nil { - t.Close() - } - } - }, - } -} diff --git a/vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go b/vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go new file mode 100644 index 00000000..f0878cfe --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/tracer_multiplexer.go @@ -0,0 +1,51 @@ +// Code generated by generate_multiplexer.go; DO NOT EDIT. + +package logging + +import "net" + +func NewMultiplexedTracer(tracers ...*Tracer) *Tracer { + if len(tracers) == 0 { + return nil + } + if len(tracers) == 1 { + return tracers[0] + } + return &Tracer{ + SentPacket: func(dest net.Addr, hdr *Header, size ByteCount, frames []Frame) { + for _, t := range tracers { + if t.SentPacket != nil { + t.SentPacket(dest, hdr, size, frames) + } + } + }, + SentVersionNegotiationPacket: func(dest net.Addr, destConnID ArbitraryLenConnectionID, srcConnID ArbitraryLenConnectionID, versions []Version) { + for _, t := range tracers { + if t.SentVersionNegotiationPacket != nil { + t.SentVersionNegotiationPacket(dest, destConnID, srcConnID, versions) + } + } + }, + DroppedPacket: func(addr net.Addr, packetType PacketType, size ByteCount, reason PacketDropReason) { + for _, t := range tracers { + if t.DroppedPacket != nil { + t.DroppedPacket(addr, packetType, size, reason) + } + } + }, + Debug: func(name string, msg string) { + for _, t := range tracers { + if t.Debug != nil { + t.Debug(name, msg) + } + } + }, + Close: func() { + for _, t := range tracers { + if t.Close != nil { + t.Close() + } + } + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/mockgen.go b/vendor/github.com/quic-go/quic-go/mockgen.go index 65ec465a..1a8b28db 100644 --- a/vendor/github.com/quic-go/quic-go/mockgen.go +++ b/vendor/github.com/quic-go/quic-go/mockgen.go @@ -61,9 +61,4 @@ type PacketHandler = packetHandler //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_packet_handler_manager_test.go github.com/quic-go/quic-go PacketHandlerManager" type PacketHandlerManager = packetHandlerManager -// Need to use source mode for the batchConn, since reflect mode follows type aliases. -// See https://github.com/golang/mock/issues/244 for details. -// -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -source sys_conn_oob.go -destination mock_batch_conn_test.go -mock_names batchConn=MockBatchConn" - //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -self_package github.com/quic-go/quic-go -destination mock_packetconn_test.go net PacketConn" diff --git a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go index 3f3a640a..ee636a6d 100644 --- a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go +++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go @@ -13,16 +13,16 @@ import ( type mtuDiscoverer interface { // Start starts the MTU discovery process. // It's unnecessary to call ShouldSendProbe before that. - Start() + Start(now time.Time) ShouldSendProbe(now time.Time) bool CurrentSize() protocol.ByteCount - GetPing() (ping ackhandler.Frame, datagramSize protocol.ByteCount) + GetPing(now time.Time) (ping ackhandler.Frame, datagramSize protocol.ByteCount) } const ( // At some point, we have to stop searching for a higher MTU. // We're happy to send a packet that's 10 bytes smaller than the actual MTU. - maxMTUDiff = 20 + maxMTUDiff protocol.ByteCount = 20 // send a probe packet every mtuProbeDelay RTTs mtuProbeDelay = 5 // Once maxLostMTUProbes MTU probe packets larger than a certain size are lost, @@ -94,7 +94,6 @@ type mtuFinder struct { inFlight protocol.ByteCount // the size of the probe packet currently in flight. InvalidByteCount if none is in flight min protocol.ByteCount - limit protocol.ByteCount // on initialization, we treat the maximum size as the first "lost" packet lost [maxLostMTUProbes]protocol.ByteCount @@ -114,7 +113,6 @@ func newMTUDiscoverer( f := &mtuFinder{ inFlight: protocol.InvalidByteCount, min: start, - limit: max, rttStats: rttStats, mtuIncreased: mtuIncreased, tracer: tracer, @@ -142,8 +140,8 @@ func (f *mtuFinder) max() protocol.ByteCount { return f.lost[len(f.lost)-1] } -func (f *mtuFinder) Start() { - f.lastProbeTime = time.Now() // makes sure the first probe packet is not sent immediately +func (f *mtuFinder) Start(now time.Time) { + f.lastProbeTime = now // makes sure the first probe packet is not sent immediately } func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { @@ -156,14 +154,14 @@ func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { return !now.Before(f.lastProbeTime.Add(mtuProbeDelay * f.rttStats.SmoothedRTT())) } -func (f *mtuFinder) GetPing() (ackhandler.Frame, protocol.ByteCount) { +func (f *mtuFinder) GetPing(now time.Time) (ackhandler.Frame, protocol.ByteCount) { var size protocol.ByteCount if f.lastProbeWasLost { size = (f.min + f.lost[0]) / 2 } else { size = (f.min + f.max()) / 2 } - f.lastProbeTime = time.Now() + f.lastProbeTime = now f.inFlight = size return ackhandler.Frame{ Frame: &wire.PingFrame{}, diff --git a/vendor/github.com/quic-go/quic-go/multiplexer.go b/vendor/github.com/quic-go/quic-go/multiplexer.go deleted file mode 100644 index 85f7f403..00000000 --- a/vendor/github.com/quic-go/quic-go/multiplexer.go +++ /dev/null @@ -1,75 +0,0 @@ -package quic - -import ( - "fmt" - "net" - "sync" - - "github.com/quic-go/quic-go/internal/utils" -) - -var ( - connMuxerOnce sync.Once - connMuxer multiplexer -) - -type indexableConn interface{ LocalAddr() net.Addr } - -type multiplexer interface { - AddConn(conn indexableConn) - RemoveConn(indexableConn) error -} - -// The connMultiplexer listens on multiple net.PacketConns and dispatches -// incoming packets to the connection handler. -type connMultiplexer struct { - mutex sync.Mutex - - conns map[string] /* LocalAddr().String() */ indexableConn - logger utils.Logger -} - -var _ multiplexer = &connMultiplexer{} - -func getMultiplexer() multiplexer { - connMuxerOnce.Do(func() { - connMuxer = &connMultiplexer{ - conns: make(map[string]indexableConn), - logger: utils.DefaultLogger.WithPrefix("muxer"), - } - }) - return connMuxer -} - -func (m *connMultiplexer) index(addr net.Addr) string { - return addr.Network() + " " + addr.String() -} - -func (m *connMultiplexer) AddConn(c indexableConn) { - m.mutex.Lock() - defer m.mutex.Unlock() - - connIndex := m.index(c.LocalAddr()) - p, ok := m.conns[connIndex] - if ok { - // Panics if we're already listening on this connection. - // This is a safeguard because we're introducing a breaking API change, see - // https://github.com/quic-go/quic-go/issues/3727 for details. - // We'll remove this at a later time, when most users of the library have made the switch. - panic("connection already exists") // TODO: write a nice message - } - m.conns[connIndex] = p -} - -func (m *connMultiplexer) RemoveConn(c indexableConn) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - connIndex := m.index(c.LocalAddr()) - if _, ok := m.conns[connIndex]; !ok { - return fmt.Errorf("cannote remove connection, connection is unknown") - } - - delete(m.conns, connIndex) - return nil -} diff --git a/vendor/github.com/quic-go/quic-go/packet_handler_map.go b/vendor/github.com/quic-go/quic-go/packet_handler_map.go index 7840202c..84841984 100644 --- a/vendor/github.com/quic-go/quic-go/packet_handler_map.go +++ b/vendor/github.com/quic-go/quic-go/packet_handler_map.go @@ -1,10 +1,6 @@ package quic import ( - "crypto/hmac" - "crypto/rand" - "crypto/sha256" - "hash" "io" "net" "sync" @@ -56,15 +52,12 @@ type packetHandlerMap struct { deleteRetiredConnsAfter time.Duration - statelessResetMutex sync.Mutex - statelessResetHasher hash.Hash - logger utils.Logger } var _ packetHandlerManager = &packetHandlerMap{} -func newPacketHandlerMap(key *StatelessResetKey, enqueueClosePacket func(closePacket), logger utils.Logger) *packetHandlerMap { +func newPacketHandlerMap(enqueueClosePacket func(closePacket), logger utils.Logger) *packetHandlerMap { h := &packetHandlerMap{ closeChan: make(chan struct{}), handlers: make(map[protocol.ConnectionID]packetHandler), @@ -73,9 +66,6 @@ func newPacketHandlerMap(key *StatelessResetKey, enqueueClosePacket func(closePa enqueueClosePacket: enqueueClosePacket, logger: logger, } - if key != nil { - h.statelessResetHasher = hmac.New(sha256.New, key[:]) - } if h.logger.Debug() { go h.logUsage() } @@ -236,20 +226,3 @@ func (h *packetHandlerMap) Close(e error) { h.mutex.Unlock() wg.Wait() } - -func (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken { - var token protocol.StatelessResetToken - if h.statelessResetHasher == nil { - // Return a random stateless reset token. - // This token will be sent in the server's transport parameters. - // By using a random token, an off-path attacker won't be able to disrupt the connection. - rand.Read(token[:]) - return token - } - h.statelessResetMutex.Lock() - h.statelessResetHasher.Write(connID.Bytes()) - copy(token[:], h.statelessResetHasher.Sum(nil)) - h.statelessResetHasher.Reset() - h.statelessResetMutex.Unlock() - return token -} diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go index 8b8a03d4..7724b503 100644 --- a/vendor/github.com/quic-go/quic-go/packet_packer.go +++ b/vendor/github.com/quic-go/quic-go/packet_packer.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "errors" "fmt" + "time" "golang.org/x/exp/rand" @@ -18,10 +19,10 @@ import ( var errNothingToPack = errors.New("nothing to pack") type packer interface { - PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) - PackAckOnlyPacket(maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) - AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) - MaybePackProbePacket(protocol.EncryptionLevel, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) + PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (*coalescedPacket, error) + PackAckOnlyPacket(maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) + AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, error) + MaybePackProbePacket(protocol.EncryptionLevel, protocol.ByteCount, time.Time, protocol.Version) (*coalescedPacket, error) PackConnectionClose(*qerr.TransportError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) PackApplicationClose(*qerr.ApplicationError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) @@ -106,12 +107,11 @@ type sealingManager interface { type frameSource interface { HasData() bool - AppendStreamFrames([]ackhandler.StreamFrame, protocol.ByteCount, protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount, protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) + Append([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount, time.Time, protocol.Version) ([]ackhandler.Frame, []ackhandler.StreamFrame, protocol.ByteCount) } type ackFrameSource interface { - GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame + GetAckFrame(_ protocol.EncryptionLevel, now time.Time, onlyIfQueued bool) *wire.AckFrame } type packetPacker struct { @@ -328,7 +328,7 @@ func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, currentSize, // PackCoalescedPacket packs a new packet. // It packs an Initial / Handshake if there is data to send in these packet number spaces. // It should only be called before the handshake is confirmed. -func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { +func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxSize protocol.ByteCount, now time.Time, v protocol.Version) (*coalescedPacket, error) { var ( initialHdr, handshakeHdr, zeroRTTHdr *wire.ExtendedHeader initialPayload, handshakePayload, zeroRTTPayload, oneRTTPayload payload @@ -342,7 +342,14 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. } var size protocol.ByteCount if initialSealer != nil { - initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true, v) + initialHdr, initialPayload = p.maybeGetCryptoPacket( + maxSize-protocol.ByteCount(initialSealer.Overhead()), + protocol.EncryptionInitial, + now, + onlyAck, + true, + v, + ) if initialPayload.length > 0 { size += p.longHeaderPacketLength(initialHdr, initialPayload, v) + protocol.ByteCount(initialSealer.Overhead()) } @@ -350,14 +357,21 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. // Add a Handshake packet. var handshakeSealer sealer - if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + if (onlyAck && size == 0) || (!onlyAck && size < maxSize-protocol.MinCoalescedPacketSize) { var err error handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { return nil, err } if handshakeSealer != nil { - handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0, v) + handshakeHdr, handshakePayload = p.maybeGetCryptoPacket( + maxSize-size-protocol.ByteCount(handshakeSealer.Overhead()), + protocol.EncryptionHandshake, + now, + onlyAck, + size == 0, + v, + ) if handshakePayload.length > 0 { s := p.longHeaderPacketLength(handshakeHdr, handshakePayload, v) + protocol.ByteCount(handshakeSealer.Overhead()) size += s @@ -370,7 +384,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. var oneRTTSealer handshake.ShortHeaderSealer var connID protocol.ConnectionID var kp protocol.KeyPhaseBit - if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + if (onlyAck && size == 0) || (!onlyAck && size < maxSize-protocol.MinCoalescedPacketSize) { var err error oneRTTSealer, err = p.cryptoSetup.Get1RTTSealer() if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { @@ -381,7 +395,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. connID = p.getDestConnID() oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen) - oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxPacketSize-size, onlyAck, size == 0, v) + oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxSize-size, onlyAck, size == 0, now, v) if oneRTTPayload.length > 0 { size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead()) } @@ -392,7 +406,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. return nil, err } if zeroRTTSealer != nil { - zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxPacketSize-size, v) + zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxSize-size, now, v) if zeroRTTPayload.length > 0 { size += p.longHeaderPacketLength(zeroRTTHdr, zeroRTTPayload, v) + protocol.ByteCount(zeroRTTSealer.Overhead()) } @@ -410,7 +424,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. longHdrPackets: make([]*longHeaderPacket, 0, 3), } if initialPayload.length > 0 { - padding := p.initialPaddingLen(initialPayload.frames, size, maxPacketSize) + padding := p.initialPaddingLen(initialPayload.frames, size, maxSize) cont, err := p.appendLongHeaderPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, v) if err != nil { return nil, err @@ -431,7 +445,7 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. } packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) } else if oneRTTPayload.length > 0 { - shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, maxPacketSize, oneRTTSealer, false, v) + shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, maxSize, oneRTTSealer, false, v) if err != nil { return nil, err } @@ -442,19 +456,25 @@ func (p *packetPacker) PackCoalescedPacket(onlyAck bool, maxPacketSize protocol. // PackAckOnlyPacket packs a packet containing only an ACK in the application data packet number space. // It should be called after the handshake is confirmed. -func (p *packetPacker) PackAckOnlyPacket(maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { +func (p *packetPacker) PackAckOnlyPacket(maxSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) { buf := getPacketBuffer() - packet, err := p.appendPacket(buf, true, maxPacketSize, v) + packet, err := p.appendPacket(buf, true, maxSize, now, v) return packet, buf, err } // AppendPacket packs a packet in the application data packet number space. // It should be called after the handshake is confirmed. -func (p *packetPacker) AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) { - return p.appendPacket(buf, false, maxPacketSize, v) +func (p *packetPacker) AppendPacket(buf *packetBuffer, maxSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, error) { + return p.appendPacket(buf, false, maxSize, now, v) } -func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSize protocol.ByteCount, v protocol.Version) (shortHeaderPacket, error) { +func (p *packetPacker) appendPacket( + buf *packetBuffer, + onlyAck bool, + maxPacketSize protocol.ByteCount, + now time.Time, + v protocol.Version, +) (shortHeaderPacket, error) { sealer, err := p.cryptoSetup.Get1RTTSealer() if err != nil { return shortHeaderPacket{}, err @@ -462,7 +482,7 @@ func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSi pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) connID := p.getDestConnID() hdrLen := wire.ShortHeaderLen(connID, pnLen) - pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, true, v) + pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, maxPacketSize, onlyAck, true, now, v) if pl.length == 0 { return shortHeaderPacket{}, errNothingToPack } @@ -471,9 +491,15 @@ func (p *packetPacker) appendPacket(buf *packetBuffer, onlyAck bool, maxPacketSi return p.appendShortHeaderPacket(buf, connID, pn, pnLen, kp, pl, 0, maxPacketSize, sealer, false, v) } -func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool, v protocol.Version) (*wire.ExtendedHeader, payload) { +func (p *packetPacker) maybeGetCryptoPacket( + maxPacketSize protocol.ByteCount, + encLevel protocol.EncryptionLevel, + now time.Time, + onlyAck, ackAllowed bool, + v protocol.Version, +) (*wire.ExtendedHeader, payload) { if onlyAck { - if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { + if ack := p.acks.GetAckFrame(encLevel, now, true); ack != nil { return p.getLongHeader(encLevel, v), payload{ ack: ack, length: ack.Length(v), @@ -500,7 +526,7 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en hasData := s.HasData() var ack *wire.AckFrame if ackAllowed { - ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData) + ack = p.acks.GetAckFrame(encLevel, now, !hasRetransmission && !hasData) } if !hasData && !hasRetransmission && ack == nil { // nothing to send @@ -518,7 +544,7 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en if hasRetransmission { for { var f ackhandler.Frame - //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s + //nolint:exhaustive // 0-RTT packets can't contain any retransmissions switch encLevel { case protocol.EncryptionInitial: f.Frame = p.retransmissionQueue.GetInitialFrame(maxPacketSize, v) @@ -543,23 +569,39 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en return hdr, pl } -func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount, v protocol.Version) (*wire.ExtendedHeader, payload) { +func (p *packetPacker) maybeGetAppDataPacketFor0RTT( + sealer sealer, + maxPacketSize protocol.ByteCount, + now time.Time, + v protocol.Version, +) (*wire.ExtendedHeader, payload) { if p.perspective != protocol.PerspectiveClient { return nil, payload{} } hdr := p.getLongHeader(protocol.Encryption0RTT, v) maxPayloadSize := maxPacketSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead()) - return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, v) + return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, now, v) } -func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, hdrLen protocol.ByteCount, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { +func (p *packetPacker) maybeGetShortHeaderPacket( + sealer handshake.ShortHeaderSealer, + hdrLen, maxPacketSize protocol.ByteCount, + onlyAck, ackAllowed bool, + now time.Time, + v protocol.Version, +) payload { maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead()) - return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, v) + return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, now, v) } -func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { - pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, v) +func (p *packetPacker) maybeGetAppDataPacket( + maxPayloadSize protocol.ByteCount, + onlyAck, ackAllowed bool, + now time.Time, + v protocol.Version, +) payload { + pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, now, v) // check if we have anything to send if len(pl.frames) == 0 && len(pl.streamFrames) == 0 { @@ -581,9 +623,14 @@ func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, return pl } -func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.Version) payload { +func (p *packetPacker) composeNextPacket( + maxFrameSize protocol.ByteCount, + onlyAck, ackAllowed bool, + now time.Time, + v protocol.Version, +) payload { if onlyAck { - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, now, true); ack != nil { return payload{ack: ack, length: ack.Length(v)} } return payload{} @@ -595,7 +642,7 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc var hasAck bool var pl payload if ackAllowed { - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, now, !hasRetransmission && !hasData); ack != nil { pl.ack = ack pl.length += ack.Length(v) hasAck = true @@ -641,7 +688,7 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc if hasData { var lengthAdded protocol.ByteCount startLen := len(pl.frames) - pl.frames, lengthAdded = p.framer.AppendControlFrames(pl.frames, maxFrameSize-pl.length, v) + pl.frames, pl.streamFrames, lengthAdded = p.framer.Append(pl.frames, pl.streamFrames, maxFrameSize-pl.length, now, v) pl.length += lengthAdded // add handlers for the control frames that were added for i := startLen; i < len(pl.frames); i++ { @@ -656,14 +703,16 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc pl.frames[i].Handler = p.retransmissionQueue.AppDataAckHandler() } } - - pl.streamFrames, lengthAdded = p.framer.AppendStreamFrames(pl.streamFrames, maxFrameSize-pl.length, v) - pl.length += lengthAdded } return pl } -func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, maxPacketSize protocol.ByteCount, v protocol.Version) (*coalescedPacket, error) { +func (p *packetPacker) MaybePackProbePacket( + encLevel protocol.EncryptionLevel, + maxPacketSize protocol.ByteCount, + now time.Time, + v protocol.Version, +) (*coalescedPacket, error) { if encLevel == protocol.Encryption1RTT { s, err := p.cryptoSetup.Get1RTTSealer() if err != nil { @@ -673,7 +722,7 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, m connID := p.getDestConnID() pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) hdrLen := wire.ShortHeaderLen(connID, pnLen) - pl := p.maybeGetAppDataPacket(maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, v) + pl := p.maybeGetAppDataPacket(maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, now, v) if pl.length == 0 { return nil, nil } @@ -687,8 +736,6 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, m return packet, nil } - var hdr *wire.ExtendedHeader - var pl payload var sealer handshake.LongHeaderSealer //nolint:exhaustive // Probe packets are never sent for 0-RTT. switch encLevel { @@ -698,18 +745,16 @@ func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, m if err != nil { return nil, err } - hdr, pl = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true, v) case protocol.EncryptionHandshake: var err error sealer, err = p.cryptoSetup.GetHandshakeSealer() if err != nil { return nil, err } - hdr, pl = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true, v) default: panic("unknown encryption level") } - + hdr, pl := p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(sealer.Overhead()), encLevel, now, false, true, v) if pl.length == 0 { return nil, nil } diff --git a/vendor/github.com/quic-go/quic-go/receive_stream.go b/vendor/github.com/quic-go/quic-go/receive_stream.go index b8535ef5..192b92f7 100644 --- a/vendor/github.com/quic-go/quic-go/receive_stream.go +++ b/vendor/github.com/quic-go/quic-go/receive_stream.go @@ -17,8 +17,8 @@ import ( type receiveStreamI interface { ReceiveStream - handleStreamFrame(*wire.StreamFrame) error - handleResetStreamFrame(*wire.ResetStreamFrame) error + handleStreamFrame(*wire.StreamFrame, time.Time) error + handleResetStreamFrame(*wire.ResetStreamFrame, time.Time) error closeForShutdown(error) } @@ -91,16 +91,19 @@ func (s *receiveStream) Read(p []byte) (int, error) { defer func() { <-s.readOnce }() s.mutex.Lock() - queuedNewControlFrame, n, err := s.readImpl(p) + queuedStreamWindowUpdate, queuedConnWindowUpdate, n, err := s.readImpl(p) completed := s.isNewlyCompleted() s.mutex.Unlock() if completed { s.sender.onStreamCompleted(s.streamID) } - if queuedNewControlFrame { + if queuedStreamWindowUpdate { s.sender.onHasStreamControlFrame(s.streamID, s) } + if queuedConnWindowUpdate { + s.sender.onHasConnectionData() + } return n, err } @@ -125,20 +128,19 @@ func (s *receiveStream) isNewlyCompleted() bool { return false } -func (s *receiveStream) readImpl(p []byte) (bool, int, error) { +func (s *receiveStream) readImpl(p []byte) (hasStreamWindowUpdate bool, hasConnWindowUpdate bool, _ int, _ error) { if s.currentFrameIsLast && s.currentFrame == nil { s.errorRead = true - return false, 0, io.EOF + return false, false, 0, io.EOF } if s.cancelledRemotely || s.cancelledLocally { s.errorRead = true - return false, 0, s.cancelErr + return false, false, 0, s.cancelErr } if s.closeForShutdownErr != nil { - return false, 0, s.closeForShutdownErr + return false, false, 0, s.closeForShutdownErr } - var queuedNewControlFrame bool var bytesRead int var deadlineTimer *utils.Timer for bytesRead < len(p) { @@ -146,23 +148,23 @@ func (s *receiveStream) readImpl(p []byte) (bool, int, error) { s.dequeueNextFrame() } if s.currentFrame == nil && bytesRead > 0 { - return queuedNewControlFrame, bytesRead, s.closeForShutdownErr + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.closeForShutdownErr } for { // Stop waiting on errors if s.closeForShutdownErr != nil { - return queuedNewControlFrame, bytesRead, s.closeForShutdownErr + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, s.closeForShutdownErr } if s.cancelledRemotely || s.cancelledLocally { s.errorRead = true - return queuedNewControlFrame, 0, s.cancelErr + return hasStreamWindowUpdate, hasConnWindowUpdate, 0, s.cancelErr } deadline := s.deadline if !deadline.IsZero() { if !time.Now().Before(deadline) { - return queuedNewControlFrame, bytesRead, errDeadline + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, errDeadline } if deadlineTimer == nil { deadlineTimer = utils.NewTimer() @@ -192,10 +194,10 @@ func (s *receiveStream) readImpl(p []byte) (bool, int, error) { } if bytesRead > len(p) { - return queuedNewControlFrame, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) } if s.readPosInFrame > len(s.currentFrame) { - return queuedNewControlFrame, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) } m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:]) @@ -205,9 +207,13 @@ func (s *receiveStream) readImpl(p []byte) (bool, int, error) { // when a RESET_STREAM was received, the flow controller was already // informed about the final byteOffset for this stream if !s.cancelledRemotely { - if queueMaxStreamData := s.flowController.AddBytesRead(protocol.ByteCount(m)); queueMaxStreamData { + hasStream, hasConn := s.flowController.AddBytesRead(protocol.ByteCount(m)) + if hasStream { s.queuedMaxStreamData = true - queuedNewControlFrame = true + hasStreamWindowUpdate = true + } + if hasConn { + hasConnWindowUpdate = true } } @@ -217,10 +223,10 @@ func (s *receiveStream) readImpl(p []byte) (bool, int, error) { s.currentFrameDone() } s.errorRead = true - return queuedNewControlFrame, bytesRead, io.EOF + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, io.EOF } } - return queuedNewControlFrame, bytesRead, nil + return hasStreamWindowUpdate, hasConnWindowUpdate, bytesRead, nil } func (s *receiveStream) dequeueNextFrame() { @@ -266,9 +272,9 @@ func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) (queuedNe return true } -func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { +func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame, now time.Time) error { s.mutex.Lock() - err := s.handleStreamFrameImpl(frame) + err := s.handleStreamFrameImpl(frame, now) completed := s.isNewlyCompleted() s.mutex.Unlock() @@ -279,9 +285,9 @@ func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { return err } -func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) error { +func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame, now time.Time) error { maxOffset := frame.Offset + frame.DataLen() - if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin); err != nil { + if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin, now); err != nil { return err } if frame.Fin { @@ -297,9 +303,9 @@ func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) error { return nil } -func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { +func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame, now time.Time) error { s.mutex.Lock() - err := s.handleResetStreamFrameImpl(frame) + err := s.handleResetStreamFrameImpl(frame, now) completed := s.isNewlyCompleted() s.mutex.Unlock() @@ -309,11 +315,11 @@ func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) err return err } -func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) error { +func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame, now time.Time) error { if s.closeForShutdownErr != nil { return nil } - if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil { + if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true, now); err != nil { return err } s.finalOffset = frame.FinalSize @@ -333,7 +339,7 @@ func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) return nil } -func (s *receiveStream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) { +func (s *receiveStream) getControlFrame(now time.Time) (_ ackhandler.Frame, ok, hasMore bool) { s.mutex.Lock() defer s.mutex.Unlock() @@ -349,7 +355,10 @@ func (s *receiveStream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) s.queuedMaxStreamData = false return ackhandler.Frame{ - Frame: &wire.MaxStreamDataFrame{StreamID: s.streamID, MaximumStreamData: s.flowController.GetWindowUpdate()}, + Frame: &wire.MaxStreamDataFrame{ + StreamID: s.streamID, + MaximumStreamData: s.flowController.GetWindowUpdate(now), + }, }, true, false } diff --git a/vendor/github.com/quic-go/quic-go/send_stream.go b/vendor/github.com/quic-go/quic-go/send_stream.go index 699c40ef..a588cc8a 100644 --- a/vendor/github.com/quic-go/quic-go/send_stream.go +++ b/vendor/github.com/quic-go/quic-go/send_stream.go @@ -18,7 +18,7 @@ type sendStreamI interface { SendStream handleStopSendingFrame(*wire.StopSendingFrame) hasData() bool - popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (frame ackhandler.StreamFrame, ok, hasMore bool) + popStreamFrame(protocol.ByteCount, protocol.Version) (_ ackhandler.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMore bool) closeForShutdown(error) updateSendWindow(protocol.ByteCount) } @@ -37,11 +37,10 @@ type sendStream struct { writeOffset protocol.ByteCount - cancelWriteErr *StreamError - closeForShutdownErr error - - queuedResetStreamFrame bool - queuedBlockedFrame bool + // finalError is the error that is returned by Write. + // It can either be a cancellation error or the shutdown error. + finalError error + queuedResetStreamFrame *wire.ResetStreamFrame finishedWriting bool // set once Close() is called finSent bool // set when a STREAM_FRAME with FIN bit has been sent @@ -49,6 +48,8 @@ type sendStream struct { // This can happen because the application called CancelWrite, // or because Write returned the error (for remote cancellations). cancellationFlagged bool + cancelled bool // both local and remote cancellations + closedForShutdown bool // set by closeForShutdown completed bool // set when this stream has been reported to the streamSender as completed dataForWriting []byte // during a Write() call, this slice is the part of p that still needs to be sent out @@ -106,16 +107,15 @@ func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) s.mutex.Lock() defer s.mutex.Unlock() + if s.finalError != nil { + if s.cancelled { + s.cancellationFlagged = true + } + return s.isNewlyCompleted(), 0, s.finalError + } if s.finishedWriting { return false, 0, fmt.Errorf("write on closed stream %d", s.streamID) } - if s.cancelWriteErr != nil { - s.cancellationFlagged = true - return s.isNewlyCompleted(), 0, s.cancelWriteErr - } - if s.closeForShutdownErr != nil { - return false, 0, s.closeForShutdownErr - } if !s.deadline.IsZero() && !time.Now().Before(s.deadline) { return false, 0, errDeadline } @@ -169,7 +169,7 @@ func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) } deadlineTimer.Reset(deadline) } - if s.dataForWriting == nil || s.cancelWriteErr != nil || s.closeForShutdownErr != nil { + if s.dataForWriting == nil || s.finalError != nil { break } } @@ -198,11 +198,11 @@ func (s *sendStream) write(p []byte) (bool /* is newly completed */, int, error) if bytesWritten == len(p) { return false, bytesWritten, nil } - if s.closeForShutdownErr != nil { - return false, bytesWritten, s.closeForShutdownErr - } else if s.cancelWriteErr != nil { - s.cancellationFlagged = true - return s.isNewlyCompleted(), bytesWritten, s.cancelWriteErr + if s.finalError != nil { + if s.cancelled { + s.cancellationFlagged = true + } + return s.isNewlyCompleted(), bytesWritten, s.finalError } return false, bytesWritten, nil } @@ -217,40 +217,37 @@ func (s *sendStream) canBufferStreamFrame() bool { // popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream // maxBytes is the maximum length this frame (including frame header) will have. -func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (af ackhandler.StreamFrame, ok, hasMore bool) { +func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (_ ackhandler.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMore bool) { s.mutex.Lock() - f, hasMoreData, queuedControlFrame := s.popNewOrRetransmittedStreamFrame(maxBytes, v) + f, blocked, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v) if f != nil { s.numOutstandingFrames++ } s.mutex.Unlock() - if queuedControlFrame { - s.sender.onHasStreamControlFrame(s.streamID, s) - } if f == nil { - return ackhandler.StreamFrame{}, false, hasMoreData + return ackhandler.StreamFrame{}, blocked, hasMoreData } return ackhandler.StreamFrame{ Frame: f, Handler: (*sendStreamAckHandler)(s), - }, true, hasMoreData + }, blocked, hasMoreData } -func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (_ *wire.StreamFrame, hasMoreData, queuedControlFrame bool) { - if s.cancelWriteErr != nil || s.closeForShutdownErr != nil { - return nil, false, false +func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (_ *wire.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMoreData bool) { + if s.finalError != nil { + return nil, nil, false } if len(s.retransmissionQueue) > 0 { f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v) if f != nil || hasMoreRetransmissions { if f == nil { - return nil, true, false + return nil, nil, true } // We always claim that we have more data to send. // This might be incorrect, in which case there'll be a spurious call to popStreamFrame in the future. - return f, true, false + return f, nil, true } } @@ -262,38 +259,45 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun Offset: s.writeOffset, DataLenPresent: true, Fin: true, - }, false, false + }, nil, false } - return nil, false, false + return nil, nil, false } sendWindow := s.flowController.SendWindowSize() if sendWindow == 0 { - if s.flowController.IsNewlyBlocked() { - s.queuedBlockedFrame = true - return nil, false, true - } - return nil, true, false + return nil, nil, true } f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v) - if dataLen := f.DataLen(); dataLen > 0 { + if f == nil { + return nil, nil, hasMoreData + } + if f.DataLen() > 0 { s.writeOffset += f.DataLen() s.flowController.AddBytesSent(f.DataLen()) } + var blocked *wire.StreamDataBlockedFrame + // If the entire send window is used, the stream might have become blocked on stream-level flow control. + // This is not guaranteed though, because the stream might also have been blocked on connection-level flow control. + if f.DataLen() == sendWindow && s.flowController.IsNewlyBlocked() { + blocked = &wire.StreamDataBlockedFrame{StreamID: s.streamID, MaximumStreamData: s.writeOffset} + } f.Fin = s.finishedWriting && s.dataForWriting == nil && s.nextFrame == nil && !s.finSent if f.Fin { s.finSent = true } - return f, hasMoreData, false + return f, blocked, hasMoreData } func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.Version) (*wire.StreamFrame, bool) { if s.nextFrame != nil { + maxDataLen := min(sendWindow, s.nextFrame.MaxDataLen(maxBytes, v)) + if maxDataLen == 0 { + return nil, true + } nextFrame := s.nextFrame s.nextFrame = nil - - maxDataLen := min(sendWindow, nextFrame.MaxDataLen(maxBytes, v)) if nextFrame.DataLen() > maxDataLen { s.nextFrame = wire.GetStreamFrame() s.nextFrame.StreamID = s.streamID @@ -371,7 +375,7 @@ func (s *sendStream) isNewlyCompleted() bool { return false } // We need to keep the stream around until all frames have been sent and acknowledged. - if s.numOutstandingFrames > 0 || len(s.retransmissionQueue) > 0 || s.queuedResetStreamFrame { + if s.numOutstandingFrames > 0 || len(s.retransmissionQueue) > 0 || s.queuedResetStreamFrame != nil { return false } // The stream is completed if we sent the FIN. @@ -384,7 +388,7 @@ func (s *sendStream) isNewlyCompleted() bool { // 2. we received a STOP_SENDING, and // * the application consumed the error via Write, or // * the application called Close - if s.cancelWriteErr != nil && (s.cancellationFlagged || s.finishedWriting) { + if s.cancelled && (s.cancellationFlagged || s.finishedWriting) { s.completed = true return true } @@ -393,13 +397,13 @@ func (s *sendStream) isNewlyCompleted() bool { func (s *sendStream) Close() error { s.mutex.Lock() - if s.closeForShutdownErr != nil { + if s.closedForShutdown || s.finishedWriting { s.mutex.Unlock() return nil } s.finishedWriting = true - cancelWriteErr := s.cancelWriteErr - if cancelWriteErr != nil { + cancelled := s.cancelled + if cancelled { s.cancellationFlagged = true } completed := s.isNewlyCompleted() @@ -408,7 +412,7 @@ func (s *sendStream) Close() error { if completed { s.sender.onStreamCompleted(s.streamID) } - if cancelWriteErr != nil { + if cancelled { return fmt.Errorf("close called for canceled stream %d", s.streamID) } s.sender.onHasStreamData(s.streamID, s) // need to send the FIN, must be called without holding the mutex @@ -418,18 +422,21 @@ func (s *sendStream) Close() error { } func (s *sendStream) CancelWrite(errorCode StreamErrorCode) { - s.cancelWriteImpl(errorCode, false) + s.cancelWrite(errorCode, false) } -func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) { +// cancelWrite cancels the stream +// It is possible to cancel a stream after it has been closed, both locally and remotely. +// This is useful to prevent the retransmission of outstanding stream data. +func (s *sendStream) cancelWrite(errorCode qerr.StreamErrorCode, remote bool) { s.mutex.Lock() - if s.closeForShutdownErr != nil { + if s.closedForShutdown { s.mutex.Unlock() return } if !remote { s.cancellationFlagged = true - if s.cancelWriteErr != nil { + if s.cancelled { completed := s.isNewlyCompleted() s.mutex.Unlock() // The user has called CancelWrite. If the previous cancellation was @@ -441,15 +448,20 @@ func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool return } } - if s.cancelWriteErr != nil { + if s.cancelled { s.mutex.Unlock() return } - s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} - s.ctxCancel(s.cancelWriteErr) + s.cancelled = true + s.finalError = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} + s.ctxCancel(s.finalError) s.numOutstandingFrames = 0 s.retransmissionQueue = nil - s.queuedResetStreamFrame = true + s.queuedResetStreamFrame = &wire.ResetStreamFrame{ + StreamID: s.streamID, + FinalSize: s.writeOffset, + ErrorCode: errorCode, + } s.mutex.Unlock() s.signalWrite() @@ -470,33 +482,23 @@ func (s *sendStream) updateSendWindow(limit protocol.ByteCount) { } func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) { - s.cancelWriteImpl(frame.ErrorCode, true) + s.cancelWrite(frame.ErrorCode, true) } -func (s *sendStream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) { +func (s *sendStream) getControlFrame(time.Time) (_ ackhandler.Frame, ok, hasMore bool) { s.mutex.Lock() defer s.mutex.Unlock() - if !s.queuedBlockedFrame && !s.queuedResetStreamFrame { + if s.queuedResetStreamFrame == nil { return ackhandler.Frame{}, false, false } - if s.queuedBlockedFrame { - s.queuedBlockedFrame = false - return ackhandler.Frame{ - Frame: &wire.StreamDataBlockedFrame{StreamID: s.streamID, MaximumStreamData: s.writeOffset}, - }, true, s.queuedResetStreamFrame - } - // RESET_STREAM frame - s.queuedResetStreamFrame = false s.numOutstandingFrames++ - return ackhandler.Frame{ - Frame: &wire.ResetStreamFrame{ - StreamID: s.streamID, - FinalSize: s.writeOffset, - ErrorCode: s.cancelWriteErr.ErrorCode, - }, + f := ackhandler.Frame{ + Frame: s.queuedResetStreamFrame, Handler: (*sendStreamResetStreamHandler)(s), - }, true, false + } + s.queuedResetStreamFrame = nil + return f, true, false } func (s *sendStream) Context() context.Context { @@ -516,7 +518,10 @@ func (s *sendStream) SetWriteDeadline(t time.Time) error { // The peer will NOT be informed about this: the stream is closed without sending a FIN or RST. func (s *sendStream) closeForShutdown(err error) { s.mutex.Lock() - s.closeForShutdownErr = err + s.closedForShutdown = true + if s.finalError == nil && !s.finishedWriting { + s.finalError = err + } s.mutex.Unlock() s.signalWrite() } @@ -537,7 +542,7 @@ func (s *sendStreamAckHandler) OnAcked(f wire.Frame) { sf := f.(*wire.StreamFrame) sf.PutBack() s.mutex.Lock() - if s.cancelWriteErr != nil { + if s.cancelled { s.mutex.Unlock() return } @@ -556,7 +561,7 @@ func (s *sendStreamAckHandler) OnAcked(f wire.Frame) { func (s *sendStreamAckHandler) OnLost(f wire.Frame) { sf := f.(*wire.StreamFrame) s.mutex.Lock() - if s.cancelWriteErr != nil { + if s.cancelled { s.mutex.Unlock() return } @@ -589,9 +594,10 @@ func (s *sendStreamResetStreamHandler) OnAcked(wire.Frame) { } } -func (s *sendStreamResetStreamHandler) OnLost(wire.Frame) { +func (s *sendStreamResetStreamHandler) OnLost(f wire.Frame) { s.mutex.Lock() - s.queuedResetStreamFrame = true + s.queuedResetStreamFrame = f.(*wire.ResetStreamFrame) + s.numOutstandingFrames-- s.mutex.Unlock() s.sender.onHasStreamControlFrame(s.streamID, (*sendStream)(s)) } diff --git a/vendor/github.com/quic-go/quic-go/server.go b/vendor/github.com/quic-go/quic-go/server.go index 0cf45aca..2bb821ab 100644 --- a/vendor/github.com/quic-go/quic-go/server.go +++ b/vendor/github.com/quic-go/quic-go/server.go @@ -72,9 +72,10 @@ type baseServer struct { tokenGenerator *handshake.TokenGenerator maxTokenAge time.Duration - connIDGenerator ConnectionIDGenerator - connHandler packetHandlerManager - onClose func() + connIDGenerator ConnectionIDGenerator + statelessResetter *statelessResetter + connHandler packetHandlerManager + onClose func() receivedPackets chan receivedPacket @@ -95,7 +96,7 @@ type baseServer struct { protocol.ConnectionID, /* destination connection ID */ protocol.ConnectionID, /* source connection ID */ ConnectionIDGenerator, - protocol.StatelessResetToken, + *statelessResetter, *Config, *tls.Config, *handshake.TokenGenerator, @@ -105,15 +106,24 @@ type baseServer struct { protocol.Version, ) quicConn - closeMx sync.Mutex - errorChan chan struct{} // is closed when the server is closed - closeErr error - running chan struct{} // closed as soon as run() returns + closeMx sync.Mutex + // errorChan is closed when Close is called. This has two effects: + // 1. it cancels handshakes that are still in flight (using CONNECTION_REFUSED) errors + // 2. it stops handling of packets passed to this server + errorChan chan struct{} + // acceptChan is closed when Close returns. + // This only happens once all handshake in flight have either completed and canceled. + // Calls to Accept will first drain the queue of connections that have completed the handshake, + // and then return ErrServerClosed. + stopAccepting chan struct{} + closeErr error + running chan struct{} // closed as soon as run() returns versionNegotiationQueue chan receivedPacket invalidTokenQueue chan rejectedPacket connectionRefusedQueue chan rejectedPacket retryQueue chan rejectedPacket + handshakingCount sync.WaitGroup verifySourceAddress func(net.Addr) bool @@ -239,6 +249,7 @@ func newServer( conn rawConn, connHandler packetHandlerManager, connIDGenerator ConnectionIDGenerator, + statelessResetter *statelessResetter, connContext func(context.Context) context.Context, tlsConf *tls.Config, config *Config, @@ -259,9 +270,11 @@ func newServer( maxTokenAge: maxTokenAge, verifySourceAddress: verifySourceAddress, connIDGenerator: connIDGenerator, + statelessResetter: statelessResetter, connHandler: connHandler, connQueue: make(chan quicConn, protocol.MaxAcceptQueueSize), errorChan: make(chan struct{}), + stopAccepting: make(chan struct{}), running: make(chan struct{}), receivedPackets: make(chan receivedPacket, protocol.MaxServerUnprocessedPackets), versionNegotiationQueue: make(chan receivedPacket, 4), @@ -332,7 +345,13 @@ func (s *baseServer) accept(ctx context.Context) (quicConn, error) { return nil, ctx.Err() case conn := <-s.connQueue: return conn, nil - case <-s.errorChan: + case <-s.stopAccepting: + // first drain the queue + select { + case conn := <-s.connQueue: + return conn, nil + default: + } return nil, s.closeErr } } @@ -356,6 +375,9 @@ func (s *baseServer) close(e error, notifyOnClose bool) { if notifyOnClose { s.onClose() } + // wait until all handshakes in flight have terminated + s.handshakingCount.Wait() + close(s.stopAccepting) } // Addr returns the server's network address @@ -366,6 +388,8 @@ func (s *baseServer) Addr() net.Addr { func (s *baseServer) handlePacket(p receivedPacket) { select { case s.receivedPackets <- p: + case <-s.errorChan: + return default: s.logger.Debugf("Dropping packet from %s (%d bytes). Server receive queue full.", p.remoteAddr, p.Size()) if s.tracer != nil && s.tracer.DroppedPacket != nil { @@ -686,7 +710,7 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error hdr.SrcConnectionID, connID, s.connIDGenerator, - s.connHandler.GetStatelessResetToken(connID), + s.statelessResetter, config, s.tlsConf, s.tokenGenerator, @@ -713,43 +737,42 @@ func (s *baseServer) handleInitialImpl(p receivedPacket, hdr *wire.Header) error delete(s.zeroRTTQueues, hdr.DestConnectionID) } - go conn.run() + s.handshakingCount.Add(1) go func() { - if completed := s.handleNewConn(conn); !completed { - return - } - - select { - case s.connQueue <- conn: - default: - conn.closeWithTransportError(ConnectionRefused) - } + defer s.handshakingCount.Done() + s.handleNewConn(conn) }() + go conn.run() return nil } -func (s *baseServer) handleNewConn(conn quicConn) bool { +func (s *baseServer) handleNewConn(conn quicConn) { if s.acceptEarlyConns { // wait until the early connection is ready, the handshake fails, or the server is closed select { case <-s.errorChan: conn.closeWithTransportError(ConnectionRefused) - return false + return case <-conn.Context().Done(): - return false + return case <-conn.earlyConnReady(): - return true + } + } else { + // wait until the handshake completes, fails, or the server is closed + select { + case <-s.errorChan: + conn.closeWithTransportError(ConnectionRefused) + return + case <-conn.Context().Done(): + return + case <-conn.HandshakeComplete(): } } - // wait until the handshake completes, fails, or the server is closed + select { - case <-s.errorChan: + case s.connQueue <- conn: + default: conn.closeWithTransportError(ConnectionRefused) - return false - case <-conn.Context().Done(): - return false - case <-conn.HandshakeComplete(): - return true } } diff --git a/vendor/github.com/quic-go/quic-go/stateless_reset.go b/vendor/github.com/quic-go/quic-go/stateless_reset.go new file mode 100644 index 00000000..cd0059a5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/stateless_reset.go @@ -0,0 +1,42 @@ +package quic + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "hash" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" +) + +type statelessResetter struct { + mx sync.Mutex + h hash.Hash +} + +// newStatelessRetter creates a new stateless reset generator. +// It is valid to use a nil key. In that case, a random key will be used. +// This makes is impossible for on-path attackers to shut down established connections. +func newStatelessResetter(key *StatelessResetKey) *statelessResetter { + var h hash.Hash + if key != nil { + h = hmac.New(sha256.New, key[:]) + } else { + b := make([]byte, 32) + _, _ = rand.Read(b) + h = hmac.New(sha256.New, b) + } + return &statelessResetter{h: h} +} + +func (r *statelessResetter) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken { + r.mx.Lock() + defer r.mx.Unlock() + + var token protocol.StatelessResetToken + r.h.Write(connID.Bytes()) + copy(token[:], r.h.Sum(nil)) + r.h.Reset() + return token +} diff --git a/vendor/github.com/quic-go/quic-go/stream.go b/vendor/github.com/quic-go/quic-go/stream.go index 1ed26323..9cd2695d 100644 --- a/vendor/github.com/quic-go/quic-go/stream.go +++ b/vendor/github.com/quic-go/quic-go/stream.go @@ -24,6 +24,7 @@ var errDeadline net.Error = &deadlineError{} // The streamSender is notified by the stream about various events. type streamSender interface { + onHasConnectionData() onHasStreamData(protocol.StreamID, sendStreamI) onHasStreamControlFrame(protocol.StreamID, streamControlFrameGetter) // must be called without holding the mutex that is acquired by closeForShutdown @@ -52,12 +53,12 @@ type streamI interface { Stream closeForShutdown(error) // for receiving - handleStreamFrame(*wire.StreamFrame) error - handleResetStreamFrame(*wire.ResetStreamFrame) error + handleStreamFrame(*wire.StreamFrame, time.Time) error + handleResetStreamFrame(*wire.ResetStreamFrame, time.Time) error // for sending hasData() bool handleStopSendingFrame(*wire.StopSendingFrame) - popStreamFrame(maxBytes protocol.ByteCount, v protocol.Version) (ackhandler.StreamFrame, bool, bool) + popStreamFrame(protocol.ByteCount, protocol.Version) (_ ackhandler.StreamFrame, _ *wire.StreamDataBlockedFrame, hasMore bool) updateSendWindow(protocol.ByteCount) } @@ -131,12 +132,12 @@ func (s *stream) Close() error { return s.sendStream.Close() } -func (s *stream) getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) { - f, ok, _ := s.sendStream.getControlFrame() +func (s *stream) getControlFrame(now time.Time) (_ ackhandler.Frame, ok, hasMore bool) { + f, ok, _ := s.sendStream.getControlFrame(now) if ok { return f, true, true } - return s.receiveStream.getControlFrame() + return s.receiveStream.getControlFrame(now) } func (s *stream) SetDeadline(t time.Time) error { diff --git a/vendor/github.com/quic-go/quic-go/sys_conn.go b/vendor/github.com/quic-go/quic-go/sys_conn.go index 71cc4607..811131d9 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn.go @@ -58,8 +58,8 @@ func wrapConn(pc net.PacketConn) (rawConn, error) { return nil, err } + // only set DF on UDP sockets if _, ok := pc.LocalAddr().(*net.UDPAddr); ok { - // Only set DF on sockets that we expect to be able to handle that configuration. var err error supportsDF, err = setDF(rawConn) if err != nil { diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go index b51cd8f1..8ed273ee 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_darwin.go @@ -4,47 +4,67 @@ package quic import ( "errors" + "fmt" "strconv" "strings" "syscall" "golang.org/x/sys/unix" +) - "github.com/quic-go/quic-go/internal/utils" +// for macOS versions, see https://en.wikipedia.org/wiki/Darwin_(operating_system)#Darwin_20_onwards +const ( + macOSVersion11 = 20 + macOSVersion15 = 24 ) func setDF(rawConn syscall.RawConn) (bool, error) { - // Setting DF bit is only supported from macOS11 + // Setting DF bit is only supported from macOS 11. // https://github.com/chromium/chromium/blob/117.0.5881.2/net/socket/udp_socket_posix.cc#L555 - if supportsDF, err := isAtLeastMacOS11(); !supportsDF || err != nil { + version, err := getMacOSVersion() + if err != nil || version < macOSVersion11 { return false, err } - // Enabling IP_DONTFRAG will force the kernel to return "sendto: message too long" - // and the datagram will not be fragmented - var errDFIPv4, errDFIPv6 error + var controlErr error + var disableDF bool if err := rawConn.Control(func(fd uintptr) { - errDFIPv4 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_DONTFRAG, 1) - errDFIPv6 = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_DONTFRAG, 1) + addr, err := unix.Getsockname(int(fd)) + if err != nil { + controlErr = fmt.Errorf("getsockname: %w", err) + return + } + + // Dual-stack sockets are effectively IPv6 sockets (with IPV6_ONLY set to 0). + // On macOS, the DF bit on dual-stack sockets is controlled by the IPV6_DONTFRAG option. + // See https://datatracker.ietf.org/doc/draft-seemann-tsvwg-udp-fragmentation/ for details. + switch addr.(type) { + case *unix.SockaddrInet4: + controlErr = unix.SetsockoptInt(int(fd), unix.IPPROTO_IP, unix.IP_DONTFRAG, 1) + case *unix.SockaddrInet6: + controlErr = unix.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_DONTFRAG, 1) + + // Setting the DF bit on dual-stack sockets works since macOS Sequoia. + // Disable DF on dual-stack sockets before Sequoia. + if version < macOSVersion15 { + // check if this is a dual-stack socket by reading the IPV6_V6ONLY flag + v6only, err := unix.GetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_V6ONLY) + if err != nil { + controlErr = fmt.Errorf("getting IPV6_V6ONLY: %w", err) + return + } + disableDF = v6only == 0 + } + default: + controlErr = fmt.Errorf("unknown address type: %T", addr) + } }); err != nil { return false, err } - switch { - case errDFIPv4 == nil && errDFIPv6 == nil: - utils.DefaultLogger.Debugf("Setting DF for IPv4 and IPv6.") - case errDFIPv4 == nil && errDFIPv6 != nil: - utils.DefaultLogger.Debugf("Setting DF for IPv4.") - case errDFIPv4 != nil && errDFIPv6 == nil: - utils.DefaultLogger.Debugf("Setting DF for IPv6.") - // On macOS, the syscall for setting DF bit for IPv4 fails on dual-stack listeners. - // Treat the connection as not having DF enabled, even though the DF bit will be set - // when used for IPv6. - // See https://github.com/quic-go/quic-go/issues/3793 for details. - return false, nil - case errDFIPv4 != nil && errDFIPv6 != nil: - return false, errors.New("setting DF failed for both IPv4 and IPv6") + if controlErr != nil { + return false, controlErr } - return true, nil + return !disableDF, nil } func isSendMsgSizeErr(err error) bool { @@ -53,22 +73,20 @@ func isSendMsgSizeErr(err error) bool { func isRecvMsgSizeErr(error) bool { return false } -func isAtLeastMacOS11() (bool, error) { +func getMacOSVersion() (int, error) { uname := &unix.Utsname{} - err := unix.Uname(uname) - if err != nil { - return false, err + if err := unix.Uname(uname); err != nil { + return 0, err } release := string(uname.Release[:]) - if idx := strings.Index(release, "."); idx != -1 { - version, err := strconv.Atoi(release[:idx]) - if err != nil { - return false, err - } - // Darwin version 20 is macOS version 11 - // https://en.wikipedia.org/wiki/Darwin_(operating_system)#Darwin_20_onwards - return version >= 20, nil + idx := strings.Index(release, ".") + if idx == -1 { + return 0, nil } - return false, nil + version, err := strconv.Atoi(release[:idx]) + if err != nil { + return 0, err + } + return version, nil } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go index e27635ec..4c140f00 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go @@ -12,21 +12,19 @@ import ( ) const ( - // IP_DONTFRAGMENT controls the Don't Fragment (DF) bit. - // - // It's the same code point for both IPv4 and IPv6 on Windows. - // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IP_DONTFRAG.html - // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IPV6_DONTFRAG.html - // + // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IP_DONTFRAGMENT.html //nolint:stylecheck IP_DONTFRAGMENT = 14 + // https://microsoft.github.io/windows-docs-rs/doc/windows/Win32/Networking/WinSock/constant.IPV6_DONTFRAG.html + //nolint:stylecheck + IPV6_DONTFRAG = 14 ) func setDF(rawConn syscall.RawConn) (bool, error) { var errDFIPv4, errDFIPv6 error if err := rawConn.Control(func(fd uintptr) { errDFIPv4 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IP, IP_DONTFRAGMENT, 1) - errDFIPv6 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IPV6, IP_DONTFRAGMENT, 1) + errDFIPv6 = windows.SetsockoptInt(windows.Handle(fd), windows.IPPROTO_IPV6, IPV6_DONTFRAG, 1) }); err != nil { return false, err } diff --git a/vendor/github.com/quic-go/quic-go/sys_conn_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go index a6795ca2..75979682 100644 --- a/vendor/github.com/quic-go/quic-go/sys_conn_oob.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go @@ -83,7 +83,7 @@ func newConn(c OOBCapablePacketConn, supportsDF bool) (*oobConn, error) { if err != nil { return nil, err } - needsPacketInfo := false + var needsPacketInfo bool if udpAddr, ok := c.LocalAddr().(*net.UDPAddr); ok && udpAddr.IP.IsUnspecified() { needsPacketInfo = true } diff --git a/vendor/github.com/quic-go/quic-go/transport.go b/vendor/github.com/quic-go/quic-go/transport.go index 059f30f5..41dbc7ab 100644 --- a/vendor/github.com/quic-go/quic-go/transport.go +++ b/vendor/github.com/quic-go/quic-go/transport.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "crypto/tls" "errors" + "fmt" "net" "sync" "sync/atomic" @@ -16,6 +17,27 @@ import ( "github.com/quic-go/quic-go/logging" ) +// ErrTransportClosed is returned by the Transport's Listen or Dial method after it was closed. +var ErrTransportClosed = &errTransportClosed{} + +type errTransportClosed struct { + err error +} + +func (e *errTransportClosed) Unwrap() []error { return []error{net.ErrClosed, e.err} } + +func (e *errTransportClosed) Error() string { + if e.err == nil { + return "quic: transport closed" + } + return fmt.Sprintf("quic: transport closed: %s", e.err) +} + +func (e *errTransportClosed) Is(target error) bool { + _, ok := target.(*errTransportClosed) + return ok +} + var errListenerAlreadySet = errors.New("listener already set") // The Transport is the central point to manage incoming and outgoing QUIC connections. @@ -115,7 +137,8 @@ type Transport struct { connIDLen int // Set in init. // If no ConnectionIDGenerator is set, this is set to a default. - connIDGenerator ConnectionIDGenerator + connIDGenerator ConnectionIDGenerator + statelessResetter *statelessResetter server *baseServer @@ -125,7 +148,7 @@ type Transport struct { statelessResetQueue chan receivedPacket listening chan struct{} // is closed when listen returns - closed bool + closeErr error createdConn bool isSingleUse bool // was created for a single server or client, i.e. by calling quic.Listen or quic.Dial @@ -168,6 +191,9 @@ func (t *Transport) createServer(tlsConf *tls.Config, conf *Config, allow0RTT bo t.mutex.Lock() defer t.mutex.Unlock() + if t.closeErr != nil { + return nil, t.closeErr + } if t.server != nil { return nil, errListenerAlreadySet } @@ -175,17 +201,22 @@ func (t *Transport) createServer(tlsConf *tls.Config, conf *Config, allow0RTT bo if err := t.init(false); err != nil { return nil, err } + maxTokenAge := t.MaxTokenAge + if maxTokenAge == 0 { + maxTokenAge = 24 * time.Hour + } s := newServer( t.conn, t.handlerMap, t.connIDGenerator, + t.statelessResetter, t.ConnContext, tlsConf, conf, t.Tracer, t.closeServer, *t.TokenGeneratorKey, - t.MaxTokenAge, + maxTokenAge, t.VerifySourceAddress, t.DisableVersionNegotiationPackets, allow0RTT, @@ -205,20 +236,137 @@ func (t *Transport) DialEarly(ctx context.Context, addr net.Addr, tlsConf *tls.C } func (t *Transport) dial(ctx context.Context, addr net.Addr, host string, tlsConf *tls.Config, conf *Config, use0RTT bool) (EarlyConnection, error) { + if err := t.init(t.isSingleUse); err != nil { + return nil, err + } if err := validateConfig(conf); err != nil { return nil, err } conf = populateConfig(conf) - if err := t.init(t.isSingleUse); err != nil { - return nil, err - } - var onClose func() - if t.isSingleUse { - onClose = func() { t.Close() } - } tlsConf = tlsConf.Clone() setTLSConfigServerName(tlsConf, addr, host) - return dial(ctx, newSendConn(t.conn, addr, packetInfo{}, utils.DefaultLogger), t.connIDGenerator, t.handlerMap, tlsConf, conf, onClose, use0RTT) + return t.doDial(ctx, + newSendConn(t.conn, addr, packetInfo{}, utils.DefaultLogger), + tlsConf, + conf, + 0, + false, + use0RTT, + conf.Versions[0], + ) +} + +func (t *Transport) doDial( + ctx context.Context, + sendConn sendConn, + tlsConf *tls.Config, + config *Config, + initialPacketNumber protocol.PacketNumber, + hasNegotiatedVersion bool, + use0RTT bool, + version protocol.Version, +) (quicConn, error) { + srcConnID, err := t.connIDGenerator.GenerateConnectionID() + if err != nil { + return nil, err + } + destConnID, err := generateConnectionIDForInitial() + if err != nil { + return nil, err + } + + tracingID := nextConnTracingID() + ctx = context.WithValue(ctx, ConnectionTracingKey, tracingID) + + t.mutex.Lock() + if t.closeErr != nil { + t.mutex.Unlock() + return nil, t.closeErr + } + + var tracer *logging.ConnectionTracer + if config.Tracer != nil { + tracer = config.Tracer(ctx, protocol.PerspectiveClient, destConnID) + } + if tracer != nil && tracer.StartedConnection != nil { + tracer.StartedConnection(sendConn.LocalAddr(), sendConn.RemoteAddr(), srcConnID, destConnID) + } + + logger := utils.DefaultLogger.WithPrefix("client") + logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", tlsConf.ServerName, sendConn.LocalAddr(), sendConn.RemoteAddr(), srcConnID, destConnID, version) + + conn := newClientConnection( + context.WithoutCancel(ctx), + sendConn, + t.handlerMap, + destConnID, + srcConnID, + t.connIDGenerator, + t.statelessResetter, + config, + tlsConf, + initialPacketNumber, + use0RTT, + hasNegotiatedVersion, + tracer, + logger, + version, + ) + t.handlerMap.Add(srcConnID, conn) + t.mutex.Unlock() + + // The error channel needs to be buffered, as the run loop will continue running + // after doDial returns (if the handshake is successful). + errChan := make(chan error, 1) + recreateChan := make(chan errCloseForRecreating) + go func() { + err := conn.run() + var recreateErr *errCloseForRecreating + if errors.As(err, &recreateErr) { + recreateChan <- *recreateErr + return + } + if t.isSingleUse { + t.Close() + } + errChan <- err + }() + + // Only set when we're using 0-RTT. + // Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever. + var earlyConnChan <-chan struct{} + if use0RTT { + earlyConnChan = conn.earlyConnReady() + } + + select { + case <-ctx.Done(): + conn.destroy(nil) + // wait until the Go routine that called Connection.run() returns + select { + case <-errChan: + case <-recreateChan: + } + return nil, context.Cause(ctx) + case params := <-recreateChan: + return t.doDial(ctx, + sendConn, + tlsConf, + config, + params.nextPacketNumber, + true, + use0RTT, + params.nextVersion, + ) + case err := <-errChan: + return nil, err + case <-earlyConnChan: + // ready to send 0-RTT data + return conn, nil + case <-conn.HandshakeComplete(): + // handshake successfully completed + return conn, nil + } } func (t *Transport) init(allowZeroLengthConnIDs bool) error { @@ -237,7 +385,9 @@ func (t *Transport) init(allowZeroLengthConnIDs bool) error { t.logger = utils.DefaultLogger // TODO: make this configurable t.conn = conn - t.handlerMap = newPacketHandlerMap(t.StatelessResetKey, t.enqueueClosePacket, t.logger) + if t.handlerMap == nil { // allows mocking the handlerMap in tests + t.handlerMap = newPacketHandlerMap(t.enqueueClosePacket, t.logger) + } t.listening = make(chan struct{}) t.closeQueue = make(chan closePacket, 4) @@ -262,8 +412,8 @@ func (t *Transport) init(allowZeroLengthConnIDs bool) error { t.connIDLen = connIDLen t.connIDGenerator = &protocol.DefaultConnectionIDGenerator{ConnLen: t.connIDLen} } + t.statelessResetter = newStatelessResetter(t.StatelessResetKey) - getMultiplexer().AddConn(t.Conn) go t.listen(conn) go t.runSendQueue() }) @@ -300,11 +450,14 @@ func (t *Transport) runSendQueue() { } } -// Close closes the underlying connection. +// Close stops listening for UDP datagrams on the Transport.Conn. // If any listener was started, it will be closed as well. // It is invalid to start new listeners or connections after that. func (t *Transport) Close() error { - t.close(errors.New("closing")) + // avoid race condition if the transport is currently being initialized + t.init(false) + + t.close(nil) if t.createdConn { if err := t.Conn.Close(); err != nil { return err @@ -323,7 +476,7 @@ func (t *Transport) closeServer() { t.mutex.Lock() t.server = nil if t.isSingleUse { - t.closed = true + t.closeErr = ErrServerClosed } t.mutex.Unlock() if t.createdConn { @@ -339,10 +492,12 @@ func (t *Transport) closeServer() { func (t *Transport) close(e error) { t.mutex.Lock() defer t.mutex.Unlock() - if t.closed { + + if t.closeErr != nil { return } + e = &errTransportClosed{err: e} if t.handlerMap != nil { t.handlerMap.Close(e) } @@ -352,7 +507,7 @@ func (t *Transport) close(e error) { if t.Tracer != nil && t.Tracer.Close != nil { t.Tracer.Close() } - t.closed = true + t.closeErr = e } // only print warnings about the UDP receive buffer size once @@ -360,7 +515,6 @@ var setBufferWarningOnce sync.Once func (t *Transport) listen(conn rawConn) { defer close(t.listening) - defer getMultiplexer().RemoveConn(t.Conn) for { p, err := conn.ReadPacket() @@ -370,7 +524,7 @@ func (t *Transport) listen(conn rawConn) { // See https://github.com/quic-go/quic-go/issues/1737 for details. if nerr, ok := err.(net.Error); ok && nerr.Temporary() { t.mutex.Lock() - closed := t.closed + closed := t.closeErr != nil t.mutex.Unlock() if closed { return @@ -424,7 +578,12 @@ func (t *Transport) handlePacket(p receivedPacket) { return } if !wire.IsLongHeaderPacket(p.data[0]) { - t.maybeSendStatelessReset(p) + if statelessResetQueued := t.maybeSendStatelessReset(p); !statelessResetQueued { + if t.Tracer != nil && t.Tracer.DroppedPacket != nil { + t.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnknownConnectionID) + } + p.buffer.Release() + } return } @@ -432,29 +591,32 @@ func (t *Transport) handlePacket(p receivedPacket) { defer t.mutex.Unlock() if t.server == nil { // no server set t.logger.Debugf("received a packet with an unexpected connection ID %s", connID) + if t.Tracer != nil && t.Tracer.DroppedPacket != nil { + t.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnknownConnectionID) + } + p.buffer.MaybeRelease() return } t.server.handlePacket(p) } -func (t *Transport) maybeSendStatelessReset(p receivedPacket) { +func (t *Transport) maybeSendStatelessReset(p receivedPacket) (statelessResetQueued bool) { if t.StatelessResetKey == nil { - p.buffer.Release() - return + return false } // Don't send a stateless reset in response to very small packets. // This includes packets that could be stateless resets. if len(p.data) <= protocol.MinStatelessResetSize { - p.buffer.Release() - return + return false } select { case t.statelessResetQueue <- p: + return true default: // it's fine to not send a stateless reset when we're busy - p.buffer.Release() + return false } } @@ -466,7 +628,7 @@ func (t *Transport) sendStatelessReset(p receivedPacket) { t.logger.Errorf("error parsing connection ID on packet from %s: %s", p.remoteAddr, err) return } - token := t.handlerMap.GetStatelessResetToken(connID) + token := t.statelessResetter.GetStatelessResetToken(connID) t.logger.Debugf("Sending stateless reset to %s (connection ID: %s). Token: %#x", p.remoteAddr, connID, token) data := make([]byte, protocol.MinStatelessResetSize-16, protocol.MinStatelessResetSize) rand.Read(data) @@ -489,7 +651,7 @@ func (t *Transport) maybeHandleStatelessReset(data []byte) bool { token := *(*protocol.StatelessResetToken)(data[len(data)-16:]) if conn, ok := t.handlerMap.GetByResetToken(token); ok { t.logger.Debugf("Received a stateless reset with token %#x. Closing connection.", token) - go conn.destroy(&StatelessResetError{Token: token}) + go conn.destroy(&StatelessResetError{}) return true } return false diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 2a7cf70d..6a66aea5 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright 2009 The Go Authors. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google LLC nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 2a7cf70d..6a66aea5 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright 2009 The Go Authors. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google LLC nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index de1b9821..22056825 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -226,9 +226,8 @@ func (x *FileSyntax) Cleanup() { continue } if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { - // Collapse block into single line but keep the Line reference used by the - // parsed File structure. - *stmt.Line[0] = Line{ + // Collapse block into single line. + line := &Line{ Comments: Comments{ Before: commentsAdd(stmt.Before, stmt.Line[0].Before), Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix), @@ -236,7 +235,7 @@ func (x *FileSyntax) Cleanup() { }, Token: stringsAdd(stmt.Token, stmt.Line[0].Token), } - x.Stmt[w] = stmt.Line[0] + x.Stmt[w] = line w++ continue } diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 3e4a1d0a..66dcaf98 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -43,7 +43,6 @@ type File struct { Exclude []*Exclude Replace []*Replace Retract []*Retract - Tool []*Tool Syntax *FileSyntax } @@ -94,12 +93,6 @@ type Retract struct { Syntax *Line } -// A Tool is a single tool statement. -type Tool struct { - Path string - Syntax *Line -} - // A VersionInterval represents a range of versions with upper and lower bounds. // Intervals are closed: both bounds are included. When Low is equal to High, // the interval may refer to a single version ('v1.2.3') or an interval @@ -304,7 +297,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse }) } continue - case "module", "godebug", "require", "exclude", "replace", "retract", "tool": + case "module", "godebug", "require", "exclude", "replace", "retract": for _, l := range x.Line { f.add(&errs, x, l, x.Token[0], l.Token, fix, strict) } @@ -516,21 +509,6 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Syntax: line, } f.Retract = append(f.Retract, retract) - - case "tool": - if len(args) != 1 { - errorf("tool directive expects exactly one argument") - return - } - s, err := parseString(&args[0]) - if err != nil { - errorf("invalid quoted string: %v", err) - return - } - f.Tool = append(f.Tool, &Tool{ - Path: s, - Syntax: line, - }) } } @@ -1589,36 +1567,6 @@ func (f *File) DropRetract(vi VersionInterval) error { return nil } -// AddTool adds a new tool directive with the given path. -// It does nothing if the tool line already exists. -func (f *File) AddTool(path string) error { - for _, t := range f.Tool { - if t.Path == path { - return nil - } - } - - f.Tool = append(f.Tool, &Tool{ - Path: path, - Syntax: f.Syntax.addLine(nil, "tool", path), - }) - - f.SortBlocks() - return nil -} - -// RemoveTool removes a tool directive with the given path. -// It does nothing if no such tool directive exists. -func (f *File) DropTool(path string) error { - for _, t := range f.Tool { - if t.Path == path { - t.Syntax.markRemoved() - *t = Tool{} - } - } - return nil -} - func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe @@ -1645,9 +1593,9 @@ func (f *File) SortBlocks() { } } -// removeDups removes duplicate exclude, replace and tool directives. +// removeDups removes duplicate exclude and replace directives. // -// Earlier exclude and tool directives take priority. +// Earlier exclude directives take priority. // // Later replace directives take priority. // @@ -1657,10 +1605,10 @@ func (f *File) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *File) removeDups() { - removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool) + removeDups(f.Syntax, &f.Exclude, &f.Replace) } -func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) { +func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) { kill := make(map[*Line]bool) // Remove duplicate excludes. @@ -1701,24 +1649,6 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, to } *replace = repl - if tool != nil { - haveTool := make(map[string]bool) - for _, t := range *tool { - if haveTool[t.Path] { - kill[t.Syntax] = true - continue - } - haveTool[t.Path] = true - } - var newTool []*Tool - for _, t := range *tool { - if !kill[t.Syntax] { - newTool = append(newTool, t) - } - } - *tool = newTool - } - // Duplicate require and retract directives are not removed. // Drop killed statements from the syntax tree. diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go index 5387d0c2..8f54897c 100644 --- a/vendor/golang.org/x/mod/modfile/work.go +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -331,5 +331,5 @@ func (f *WorkFile) SortBlocks() { // retract directives are not de-duplicated since comments are // meaningful, and versions may be retracted multiple times. func (f *WorkFile) removeDups() { - removeDups(f.Syntax, nil, &f.Replace, nil) + removeDups(f.Syntax, nil, &f.Replace) } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 2a364b22..cac1a899 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -506,6 +506,7 @@ var badWindowsNames = []string{ "PRN", "AUX", "NUL", + "COM0", "COM1", "COM2", "COM3", @@ -515,6 +516,7 @@ var badWindowsNames = []string{ "COM7", "COM8", "COM9", + "LPT0", "LPT1", "LPT2", "LPT3", diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 2a7cf70d..6a66aea5 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright 2009 The Go Authors. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google LLC nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go deleted file mode 100644 index 47a9a541..00000000 --- a/vendor/golang.org/x/tools/cover/profile.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cover provides support for parsing coverage profiles -// generated by "go test -coverprofile=cover.out". -package cover // import "golang.org/x/tools/cover" - -import ( - "bufio" - "errors" - "fmt" - "io" - "math" - "os" - "sort" - "strconv" - "strings" -) - -// Profile represents the profiling data for a specific file. -type Profile struct { - FileName string - Mode string - Blocks []ProfileBlock -} - -// ProfileBlock represents a single block of profiling data. -type ProfileBlock struct { - StartLine, StartCol int - EndLine, EndCol int - NumStmt, Count int -} - -type byFileName []*Profile - -func (p byFileName) Len() int { return len(p) } -func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } -func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// ParseProfiles parses profile data in the specified file and returns a -// Profile for each source file described therein. -func ParseProfiles(fileName string) ([]*Profile, error) { - pf, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer pf.Close() - return ParseProfilesFromReader(pf) -} - -// ParseProfilesFromReader parses profile data from the Reader and -// returns a Profile for each source file described therein. -func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { - // First line is "mode: foo", where foo is "set", "count", or "atomic". - // Rest of file is in the format - // encoding/base64/base64.go:34.44,37.40 3 1 - // where the fields are: name.go:line.column,line.column numberOfStatements count - files := make(map[string]*Profile) - s := bufio.NewScanner(rd) - mode := "" - for s.Scan() { - line := s.Text() - if mode == "" { - const p = "mode: " - if !strings.HasPrefix(line, p) || line == p { - return nil, fmt.Errorf("bad mode line: %v", line) - } - mode = line[len(p):] - continue - } - fn, b, err := parseLine(line) - if err != nil { - return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) - } - p := files[fn] - if p == nil { - p = &Profile{ - FileName: fn, - Mode: mode, - } - files[fn] = p - } - p.Blocks = append(p.Blocks, b) - } - if err := s.Err(); err != nil { - return nil, err - } - for _, p := range files { - sort.Sort(blocksByStart(p.Blocks)) - // Merge samples from the same location. - j := 1 - for i := 1; i < len(p.Blocks); i++ { - b := p.Blocks[i] - last := p.Blocks[j-1] - if b.StartLine == last.StartLine && - b.StartCol == last.StartCol && - b.EndLine == last.EndLine && - b.EndCol == last.EndCol { - if b.NumStmt != last.NumStmt { - return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) - } - if mode == "set" { - p.Blocks[j-1].Count |= b.Count - } else { - p.Blocks[j-1].Count += b.Count - } - continue - } - p.Blocks[j] = b - j++ - } - p.Blocks = p.Blocks[:j] - } - // Generate a sorted slice. - profiles := make([]*Profile, 0, len(files)) - for _, profile := range files { - profiles = append(profiles, profile) - } - sort.Sort(byFileName(profiles)) - return profiles, nil -} - -// parseLine parses a line from a coverage file. -// It is equivalent to the regex -// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ -// -// However, it is much faster: https://golang.org/cl/179377 -func parseLine(l string) (fileName string, block ProfileBlock, err error) { - end := len(l) - - b := ProfileBlock{} - b.Count, end, err = seekBack(l, ' ', end, "Count") - if err != nil { - return "", b, err - } - b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") - if err != nil { - return "", b, err - } - b.EndCol, end, err = seekBack(l, '.', end, "EndCol") - if err != nil { - return "", b, err - } - b.EndLine, end, err = seekBack(l, ',', end, "EndLine") - if err != nil { - return "", b, err - } - b.StartCol, end, err = seekBack(l, '.', end, "StartCol") - if err != nil { - return "", b, err - } - b.StartLine, end, err = seekBack(l, ':', end, "StartLine") - if err != nil { - return "", b, err - } - fn := l[0:end] - if fn == "" { - return "", b, errors.New("a FileName cannot be blank") - } - return fn, b, nil -} - -// seekBack searches backwards from end to find sep in l, then returns the -// value between sep and end as an integer. -// If seekBack fails, the returned error will reference what. -func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { - // Since we're seeking backwards and we know only ASCII is legal for these values, - // we can ignore the possibility of non-ASCII characters. - for start := end - 1; start >= 0; start-- { - if l[start] == sep { - i, err := strconv.Atoi(l[start+1 : end]) - if err != nil { - return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) - } - if i < 0 { - return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) - } - return i, start, nil - } - } - return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) -} - -type blocksByStart []ProfileBlock - -func (b blocksByStart) Len() int { return len(b) } -func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b blocksByStart) Less(i, j int) bool { - bi, bj := b[i], b[j] - return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol -} - -// Boundary represents the position in a source file of the beginning or end of a -// block as reported by the coverage profile. In HTML mode, it will correspond to -// the opening or closing of a tag and will be used to colorize the source -type Boundary struct { - Offset int // Location as a byte offset in the source file. - Start bool // Is this the start of a block? - Count int // Event count from the cover profile. - Norm float64 // Count normalized to [0..1]. - Index int // Order in input file. -} - -// Boundaries returns a Profile as a set of Boundary objects within the provided src. -func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { - // Find maximum count. - max := 0 - for _, b := range p.Blocks { - if b.Count > max { - max = b.Count - } - } - // Divisor for normalization. - divisor := math.Log(float64(max)) - - // boundary returns a Boundary, populating the Norm field with a normalized Count. - index := 0 - boundary := func(offset int, start bool, count int) Boundary { - b := Boundary{Offset: offset, Start: start, Count: count, Index: index} - index++ - if !start || count == 0 { - return b - } - if max <= 1 { - b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. - } else if count > 0 { - b.Norm = math.Log(float64(count)) / divisor - } - return b - } - - line, col := 1, 2 // TODO: Why is this 2? - for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { - b := p.Blocks[bi] - if b.StartLine == line && b.StartCol == col { - boundaries = append(boundaries, boundary(si, true, b.Count)) - } - if b.EndLine == line && b.EndCol == col || line > b.EndLine { - boundaries = append(boundaries, boundary(si, false, 0)) - bi++ - continue // Don't advance through src; maybe the next block starts here. - } - if src[si] == '\n' { - line++ - col = 0 - } - col++ - si++ - } - sort.Sort(boundariesByPos(boundaries)) - return -} - -type boundariesByPos []Boundary - -func (b boundariesByPos) Len() int { return len(b) } -func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b boundariesByPos) Less(i, j int) bool { - if b[i].Offset == b[j].Offset { - // Boundaries at the same offset should be ordered according to - // their original position. - return b[i].Index < b[j].Index - } - return b[i].Offset < b[j].Offset -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 6e34df46..2c4c4e23 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -106,21 +106,8 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // Does augmented child strictly contain [start, end)? if augPos <= start && end <= augEnd { - if is[tokenNode](child) { - return true - } - - // childrenOf elides the FuncType node beneath FuncDecl. - // Add it back here for TypeParams, Params, Results, - // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. - if decl, ok := node.(*ast.FuncDecl); ok { - if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { - path = append(path, decl.Type) - } - } - - return visit(child) + _, isToken := child.(tokenNode) + return isToken || visit(child) } // Does [start, end) overlap multiple children? @@ -326,8 +313,6 @@ func childrenOf(n ast.Node) []ast.Node { // // As a workaround, we inline the case for FuncType // here and order things correctly. - // We also need to insert the elided FuncType just - // before the 'visit' recursion. // children = nil // discard ast.Walk(FuncDecl) info subtrees children = append(children, tok(n.Type.Func, len("func"))) @@ -647,8 +632,3 @@ func NodeDescription(n ast.Node) string { } panic(fmt.Sprintf("unexpected node type: %T", n)) } - -func is[T any](x any) bool { - _, ok := x.(T) - return ok -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index a6b5ed0a..18d1adb0 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -344,12 +344,7 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r } // UsesImport reports whether a given import is used. -// The provided File must have been parsed with syntactic object resolution -// (not using go/parser.SkipObjectResolution). func UsesImport(f *ast.File, path string) (used bool) { - if f.Scope == nil { - panic("file f was not parsed with syntactic object resolution") - } spec := importSpec(f, path) if spec == nil { return diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index ca71e3e1..919d5305 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,5 +7,12 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. -// Deprecated: use [ast.Unparen]. -func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index cfda8934..1fc1de0b 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -36,7 +36,6 @@ package inspector import ( "go/ast" - _ "unsafe" ) // An Inspector provides methods for inspecting @@ -45,9 +44,6 @@ type Inspector struct { events []event } -//go:linkname events -func events(in *Inspector) []event { return in.events } - // New returns an Inspector for the specified syntax trees. func New(files []*ast.File) *Inspector { return &Inspector{traverse(files)} @@ -56,10 +52,9 @@ func New(files []*ast.File) *Inspector { // An event represents a push or a pop // of an ast.Node during a traversal. type event struct { - node ast.Node - typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events - index int32 // index of corresponding push or pop event - parent int32 // index of parent's push node (defined for push nodes only) + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event } // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). @@ -78,17 +73,8 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). - // This function is equivalent to the PreorderSeq call below, - // but to avoid the additional dynamic call (which adds 13-35% - // to the benchmarks), we expand it out. - // - // in.PreorderSeq(types...)(func(n ast.Node) bool { - // f(n) - // return true - // }) - mask := maskOf(types) - for i := int32(0); i < int32(len(in.events)); { + for i := 0; i < len(in.events); { ev := in.events[i] if ev.index > i { // push @@ -118,7 +104,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // matches an element of the types slice. func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) - for i := int32(0); i < int32(len(in.events)); { + for i := 0; i < len(in.events); { ev := in.events[i] if ev.index > i { // push @@ -152,7 +138,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node - for i := int32(0); i < int32(len(in.events)); { + for i := 0; i < len(in.events); { ev := in.events[i] if ev.index > i { // push @@ -185,9 +171,7 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s // traverse builds the table of events representing a traversal. func traverse(files []*ast.File) []event { // Preallocate approximate number of events - // based on source file extent of the declarations. - // (We use End-Pos not FileStart-FileEnd to neglect - // the effect of long doc comments.) + // based on source file extent. // This makes traverse faster by 4x (!). var extent int for _, f := range files { @@ -201,24 +185,18 @@ func traverse(files []*ast.File) []event { events := make([]event, 0, capacity) var stack []event - stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent + stack = append(stack, event{}) // include an extra event so file nodes have a parent for _, f := range files { ast.Inspect(f, func(n ast.Node) bool { if n != nil { // push ev := event{ - node: n, - typ: 0, // temporarily used to accumulate type bits of subtree - index: int32(len(events)), // push event temporarily holds own index - parent: stack[len(stack)-1].index, + node: n, + typ: 0, // temporarily used to accumulate type bits of subtree + index: len(events), // push event temporarily holds own index } stack = append(stack, ev) events = append(events, ev) - - // 2B nodes ought to be enough for anyone! - if int32(len(events)) < 0 { - panic("event index exceeded int32") - } } else { // pop top := len(stack) - 1 @@ -227,9 +205,9 @@ func traverse(files []*ast.File) []event { push := ev.index parent := top - 1 - events[push].typ = typ // set type of push - stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. - events[push].index = int32(len(events)) // make push refer to pop + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop stack = stack[:top] events = append(events, ev) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go deleted file mode 100644 index c576dc70..00000000 --- a/vendor/golang.org/x/tools/go/ast/inspector/iter.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.23 - -package inspector - -import ( - "go/ast" - "iter" -) - -// PreorderSeq returns an iterator that visits all the -// nodes of the files supplied to New in depth-first order. -// It visits each node n before n's children. -// The complete traversal sequence is determined by ast.Inspect. -// -// The types argument, if non-empty, enables type-based -// filtering of events: only nodes whose type matches an -// element of the types slice are included in the sequence. -func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { - - // This implementation is identical to Preorder, - // except that it supports breaking out of the loop. - - return func(yield func(ast.Node) bool) { - mask := maskOf(types) - for i := int32(0); i < int32(len(in.events)); { - ev := in.events[i] - if ev.index > i { - // push - if ev.typ&mask != 0 { - if !yield(ev.node) { - break - } - } - pop := ev.index - if in.events[pop].typ&mask == 0 { - // Subtrees do not contain types: skip them and pop. - i = pop + 1 - continue - } - } - i++ - } - } -} - -// All[N] returns an iterator over all the nodes of type N. -// N must be a pointer-to-struct type that implements ast.Node. -// -// Example: -// -// for call := range All[*ast.CallExpr](in) { ... } -func All[N interface { - *S - ast.Node -}, S any](in *Inspector) iter.Seq[N] { - - // To avoid additional dynamic call overheads, - // we duplicate rather than call the logic of PreorderSeq. - - mask := typeOf((N)(nil)) - return func(yield func(N) bool) { - for i := int32(0); i < int32(len(in.events)); { - ev := in.events[i] - if ev.index > i { - // push - if ev.typ&mask != 0 { - if !yield(ev.node.(N)) { - break - } - } - pop := ev.index - if in.events[pop].typ&mask == 0 { - // Subtrees do not contain types: skip them and pop. - i = pop + 1 - continue - } - } - i++ - } - } -} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 40b1bfd7..2a872f89 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( @@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 { return 0 } -//go:linkname maskOf func maskOf(nodes []ast.Node) uint64 { if nodes == nil { return math.MaxUint64 // match all node types diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 65fe2628..137cc8df 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,64 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for reading and writing -// export data, which is a serialized description of the API of a Go -// package including the names, kinds, types, and locations of all -// exported declarations. +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. // -// The standard Go compiler (cmd/compile) writes an export data file -// for each package it compiles, which it later reads when compiling -// packages that import the earlier one. The compiler must thus -// contain logic to both write and read export data. -// (See the "Export" section in the cmd/compile/README file.) -// -// The [Read] function in this package can read files produced by the -// compiler, producing [go/types] data structures. As a matter of -// policy, Read supports export data files produced by only the last -// two Go releases plus tip; see https://go.dev/issue/68898. The -// export data files produced by the compiler contain additional -// details related to generics, inlining, and other optimizations that -// cannot be decoded by the [Read] function. -// -// In files written by the compiler, the export data is not at the -// start of the file. Before calling Read, use [NewReader] to locate -// the desired portion of the file. -// -// The [Write] function in this package encodes the exported API of a -// Go package ([types.Package]) as a file. Such files can be later -// decoded by Read, but cannot be consumed by the compiler. -// -// # Future changes -// -// Although Read supports the formats written by both Write and the -// compiler, the two are quite different, and there is an open -// proposal (https://go.dev/issue/69491) to separate these APIs. -// -// Under that proposal, this package would ultimately provide only the -// Read operation for compiler export data, which must be defined in -// this module (golang.org/x/tools), not in the standard library, to -// avoid version skew for developer tools that need to read compiler -// export data both before and after a Go release, such as from Go -// 1.23 to Go 1.24. Because this package lives in the tools module, -// clients can update their version of the module some time before the -// Go 1.24 release and rebuild and redeploy their tools, which will -// then be able to consume both Go 1.23 and Go 1.24 export data files, -// so they will work before and after the Go update. (See discussion -// at https://go.dev/issue/15651.) -// -// The operations to import and export [go/types] data structures -// would be defined in the go/types package as Import and Export. -// [Write] would (eventually) delegate to Export, -// and [Read], when it detects a file produced by Export, -// would delegate to Import. -// -// # Deprecations -// -// The [NewImporter] and [Find] functions are deprecated and should -// not be used in new code. The [WriteBundle] and [ReadBundle] -// functions are experimental, and there is an open proposal to -// deprecate them (https://go.dev/issue/69573). -package gcexportdata +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" @@ -106,18 +64,24 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - size, err := gcimporter.FindExportData(buf) + _, size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } } // readAll works the same way as io.ReadAll, but avoids allocations and copies @@ -136,11 +100,6 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // -// Read is capable of reading export data produced by [Write] at the -// same source code version, or by the last two Go releases (plus tip) -// of the standard Go compiler. Reading files from older compilers may -// produce an error. -// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -169,26 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': - // binary, produced by cmd/compile till go1.10 + case 'v', 'c', 'd': // binary, till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': - // indexed, produced by cmd/compile till go1.19, - // and also by [Write]. - // - // If proposal #69491 is accepted, go/types - // serialization will be implemented by - // types.Export, to which Write would eventually - // delegate (explicitly dropping any pretence at - // inter-version Write-Read compatibility). - // This [Read] function would delegate to types.Import - // when it detects that the file was produced by Export. + case 'i': // indexed, till go1.19 _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': - // unified, produced by cmd/compile since go1.20 + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 00000000..c6e7c0d4 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// TODO(adonovan): move back into go/packages. +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return "", "", enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr + } else { + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return compiler, goarch, nil +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10..3531ac8f 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in [LoadFiles] mode, collecting minimal information. +causes Load to run in LoadFiles mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to Load, so that it can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -Load may be used to load Go packages even in Go projects that use +[Load] may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,15 +97,6 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) - -The value of the PWD environment variable seen by the driver process -is the preferred name of its working directory. (The working directory -may have other aliases due to symbolic links; see the comment on the -Dir field of [exec.Cmd] for related information.) -When the driver process emits in its response the name of a file -that is a descendant of this directory, it must use an absolute path -that has the value of PWD as a prefix, to ensure that the returned -filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 91bd62e8..c2b4b711 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,7 +13,6 @@ import ( "fmt" "os" "os/exec" - "slices" "strings" ) @@ -80,10 +79,10 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns []string) (*DriverResponse, error) +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found. +// the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { @@ -104,7 +103,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, patterns []string) (*DriverResponse, error) { + return func(cfg *Config, words ...string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -118,7 +117,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, patterns...) + cmd := exec.CommandContext(cfg.Context, tool, words...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the @@ -132,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) - cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) + cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -151,3 +150,7 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } + +// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. +// TODO(adonovan): use go1.21 slices.Clip. +func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 0458b4f9..d9be410a 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -21,6 +21,7 @@ import ( "sync" "unicode" + "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" ) @@ -80,12 +81,6 @@ type golistState struct { cfg *Config ctx context.Context - runner *gocommand.Runner - - // overlay is the JSON file that encodes the Config.Overlay - // mapping, used by 'go list -overlay=...'. - overlay string - envOnce sync.Once goEnvError error goEnv map[string]string @@ -133,10 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -// -// overlay is the JSON file that encodes the cfg.Overlay -// mapping, used by 'go list -overlay=...' -func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -151,15 +143,13 @@ func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, pattern cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, - overlay: overlay, - runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { errCh := make(chan error) go func() { - compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -322,7 +312,6 @@ type jsonPackage struct { ImportPath string Dir string Name string - Target string Export string GoFiles []string CompiledGoFiles []string @@ -506,15 +495,13 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, - Dir: p.Dir, - Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - ForTest: p.ForTest, + forTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -695,7 +682,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) }) return state.goVersion, state.goVersionError } @@ -765,7 +752,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -773,7 +760,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { + if cfg.Mode&NeedTypes != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -798,7 +785,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&NeedForTest != 0 { + if cfg.Mode&needInternalForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -813,9 +800,6 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } - if cfg.Mode&NeedTarget != 0 { - addFields("Target") - } return "-json=" + strings.Join(fields, ",") } @@ -857,7 +841,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: state.overlay, + Overlay: cfg.goListOverlayFile, } } @@ -868,8 +852,11 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - - stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -893,12 +880,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } - // Return an error if 'go list' failed due to missing tools in - // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { - return nil, friendlyErr - } - // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // @@ -1043,44 +1024,3 @@ func cmdDebugStr(cmd *exec.Cmd) string { } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) } - -// getSizesForArgs queries 'go list' for the appropriate -// Compiler and GOARCH arguments to pass to [types.SizesFor]. -func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 69eec9f4..5c080d21 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,48 +9,49 @@ import ( "strings" ) -var modes = [...]struct { - mode LoadMode - name string -}{ - {NeedName, "NeedName"}, - {NeedFiles, "NeedFiles"}, - {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, - {NeedImports, "NeedImports"}, - {NeedDeps, "NeedDeps"}, - {NeedExportFile, "NeedExportFile"}, - {NeedTypes, "NeedTypes"}, - {NeedSyntax, "NeedSyntax"}, - {NeedTypesInfo, "NeedTypesInfo"}, - {NeedTypesSizes, "NeedTypesSizes"}, - {NeedForTest, "NeedForTest"}, - {NeedModule, "NeedModule"}, - {NeedEmbedFiles, "NeedEmbedFiles"}, - {NeedEmbedPatterns, "NeedEmbedPatterns"}, - {NeedTarget, "NeedTarget"}, +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, } -func (mode LoadMode) String() string { - if mode == 0 { +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { return "LoadMode(0)" } var out []string - // named bits - for _, item := range modes { - if (mode & item.mode) != 0 { - mode ^= item.mode - out = append(out, item.name) + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x } } - // unnamed residue - if mode != 0 { - if out == nil { - return fmt.Sprintf("LoadMode(%#x)", int(mode)) - } - out = append(out, fmt.Sprintf("%#x", int(mode))) + if m != 0 { + out = append(out, "Unknown") } - if len(out) == 1 { - return out[0] - } - return "(" + strings.Join(out, "|") + ")" + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0147d908..34306ddd 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" + "io" "log" "os" "path/filepath" "runtime" "strings" "sync" - "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,6 +31,7 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -43,33 +44,20 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // -// The Mode flag is a union of several bits named NeedName, -// NeedFiles, and so on, each of which determines whether -// a given field of Package (Name, Files, etc) should be -// populated. -// -// For convenience, we provide named constants for the most -// common combinations of Need flags: -// -// [LoadFiles] lists of files in each package -// [LoadImports] ... plus imports -// [LoadTypes] ... plus type information -// [LoadSyntax] ... plus type-annotated syntax -// [LoadAllSyntax] ... for all dependencies -// // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://github.com/golang/go/issues/48226 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles + // NeedFiles adds GoFiles and OtherFiles. NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -88,10 +76,10 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax and Fset. + // NeedSyntax adds Syntax. NeedSyntax - // NeedTypesInfo adds TypesInfo and Fset. + // NeedTypesInfo adds TypesInfo. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -100,10 +88,9 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // NeedForTest adds ForTest. - // + // needInternalForTest adds the internal forTest field. // Tests must also be set on the context for this field to be populated. - NeedForTest + needInternalForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -117,27 +104,27 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns - - // NeedTarget adds Target. - NeedTarget - - // Be sure to update loadmode_string.go when adding new items! ) const ( - // LoadFiles loads the name and file names for the initial packages. + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - // LoadImports loads the name, file names, and import mapping for the initial packages. + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports - // LoadTypes loads exported type information for the initial packages. + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - // LoadSyntax loads typed syntax for the initial packages. + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. @@ -147,7 +134,13 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to [Load] do not modify this struct. +// Calls to Load do not modify this struct. +// +// TODO(adonovan): #67702: this is currently false: in fact, +// calls to [Load] do not modify the public fields of this struct, but +// may modify hidden fields, so concurrent calls to [Load] must not +// use the same Config. But perhaps we should reestablish the +// documented invariant. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -178,10 +171,19 @@ type Config struct { // Env []string + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -228,24 +230,21 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // -- Hidden configuration fields only for use in x/tools -- - - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string + // goListOverlayFile is the JSON file that encodes the Overlay + // mapping, used by 'go list -overlay=...' + goListOverlayFile string } // Load loads and returns the Go packages named by the given patterns. // -// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. +// Config specifies loading options; +// nil behaves the same as an empty Config. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. +// NeedName | NeedFiles | NeedCompiledGoFiles. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -262,7 +261,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The [PrintErrors] function is +// proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -325,24 +324,21 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // not handled: fall through + // (fall through) } // go list fallback - + // // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() + cfg.goListOverlayFile = overlay - var runner gocommand.Runner // (shared across many 'go list' calls) - driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { - return goListDriver(cfg, &runner, overlayFile, patterns) - } - response, err := callDriverOnChunks(driver, cfg, chunks) + response, err := callDriverOnChunks(goListDriver, cfg, chunks) if err != nil { return nil, false, err } @@ -380,14 +376,16 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg, nil) + return driver(cfg) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { + i := i + chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk) + responses[i], err = driver(cfg, chunk...) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -437,12 +435,6 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string - // Dir is the directory associated with the package, if it exists. - // - // For packages listed by the go command, this is the directory containing - // the package files. - Dir string - // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -482,10 +474,6 @@ type Package struct { // information for the package as provided by the build system. ExportFile string - // Target is the absolute install path of the .a file, for libraries, - // and of the executable file, for binaries. - Target string - // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package @@ -534,8 +522,8 @@ type Package struct { // -- internal -- - // ForTest is the package under test, if any. - ForTest string + // forTest is the package under test, if any. + forTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -564,6 +552,9 @@ type ModuleError struct { } func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } @@ -575,6 +566,7 @@ func init() { } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -690,19 +682,18 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - preds []*loaderPackage // packages that import this one - unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage // keyed by Package.ID + pkgs map[string]*loaderPackage Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -748,6 +739,9 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } if ld.Context == nil { ld.Context = context.Background() } @@ -761,7 +755,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -770,7 +764,6 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { - // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -802,7 +795,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -827,10 +820,9 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - // Materialize the import graph if it is needed (NeedImports), - // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). - var leaves []*loaderPackage // packages with no unfinished successors - if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + const ( white = 0 // new grey = 1 // in progress @@ -849,76 +841,63 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(from, lpkg *loaderPackage) bool - visit = func(from, lpkg *loaderPackage) bool { - if lpkg.color == grey { + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: panic("internal error: grey node") } - if lpkg.color == white { - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) - } - lpkg.importErrors[importPath] = importErr - continue - } - - if visit(lpkg, imp) { - lpkg.needsrc = true - } - lpkg.Imports[importPath] = imp.Package + lpkg.importErrors[importPath] = importErr + continue } - // -- postorder -- - - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true - } + if visit(imp) { + lpkg.needsrc = true } - - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - - // Add packages with no imports directly to the queue of leaves. - if len(lpkg.Imports) == 0 { - leaves = append(leaves, lpkg) - } - - stack = stack[:len(stack)-1] // pop - lpkg.color = black + lpkg.Imports[importPath] = imp.Package } - // Add edge from predecessor. - if from != nil { - from.unfinishedSuccs.Add(+1) // incref - lpkg.preds = append(lpkg.preds, from) + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } } + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(nil, lpkg) + visit(lpkg) } } else { @@ -931,45 +910,16 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { - - // We avoid using g.SetLimit to limit concurrency as - // it makes g.Go stop accepting work, which prevents - // workers from enqeuing, and thus finishing, and thus - // allowing the group to make progress: deadlock. - // - // Instead we use the ioLimit and cpuLimit semaphores. - g, _ := errgroup.WithContext(ld.Context) - - // enqueues adds a package to the type-checking queue. - // It must have no unfinished successors. - var enqueue func(*loaderPackage) - enqueue = func(lpkg *loaderPackage) { - g.Go(func() error { - // Parse and type-check. - ld.loadPackage(lpkg) - - // Notify each waiting predecessor, - // and enqueue it when it becomes a leaf. - for _, pred := range lpkg.preds { - if pred.unfinishedSuccs.Add(-1) == 0 { // decref - enqueue(pred) - } - } - - return nil - }) - } - - // Load leaves first, adding new packages - // to the queue as they become leaves. - for _, leaf := range leaves { - enqueue(leaf) - } - - if err := g.Wait(); err != nil { - return nil, err // cancelled + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) } + wg.Wait() } // If the context is done, return its error and @@ -1011,14 +961,12 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } - if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { - ld.pkgs[i].Fset = nil - } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -1033,10 +981,31 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadPackage loads/parses/typechecks the specified package. +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. +// Precondition: ld.Mode & NeedTypes. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1072,10 +1041,6 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } - - // TODO(adonovan): this condition looks wrong: - // I think it should be lpkg.needtypes && !lpg.needsrc, - // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1181,7 +1146,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { + if ld.Config.Mode&NeedTypes == 0 { return } @@ -1192,20 +1157,16 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - // Populate TypesInfo only if needed, as it - // causes the type checker to work much harder. - if ld.Config.Mode&NeedTypesInfo != 0 { - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - FileVersions: make(map[*ast.File]string), - } + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), } + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1258,10 +1219,6 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } - // Type-checking is CPU intensive. - cpuLimit <- unit{} // acquire a token - defer func() { <-cpuLimit }() // release a token - typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1326,11 +1283,8 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls or CPU threads per process. -var ( - ioLimit = make(chan unit, 20) - cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) -) +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1347,28 +1301,20 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { - // TODO(adonovan): Inefficient for large overlays. - // Do an exact name-based map lookup - // (for nonexistent files) followed by a - // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents - break } } var err error if src == nil { - ioLimit <- unit{} // acquire a token + ioLimit <- true // wait src, err = os.ReadFile(filename) - <-ioLimit // release a token + <-ioLimit // signal } if err != nil { v.err = err } else { - // Parsing is CPU intensive. - cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) - <-cpuLimit // release a token } close(v.ready) @@ -1383,21 +1329,18 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var ( - n = len(filenames) - parsed = make([]*ast.File, n) - errors = make([]error, n) - ) - var g errgroup.Group - for i, filename := range filenames { - // This creates goroutines unnecessarily in the - // cache-hit case, but that case is uncommon. - g.Go(func() error { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + wg.Add(1) + go func(i int, filename string) { parsed[i], errors[i] = ld.parseFile(filename) - return nil - }) + wg.Done() + }(i, file) } - g.Wait() + wg.Wait() // Eliminate nils, preserving order. var o int @@ -1556,10 +1499,6 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } - if loadMode&NeedTypes != 0 { - // Types require the GoVersion from Module. - loadMode |= NeedModule - } return loadMode } @@ -1568,4 +1507,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -type unit struct{} +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index df14ffd9..a1dcc40b 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,20 +49,11 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int - errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } - - // Print pkg.Module.Error once if present. - mod := pkg.Module - if mod != nil && mod.Error != nil && !errModules[mod] { - errModules[mod] = true - fmt.Fprintln(os.Stderr, mod.Error.Err) - n++ - } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 16ed3c17..a2386c34 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,8 +63,8 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTrCa]; -// two of these ({,Recv}TypeParams) require an integer operand, +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; // three of these (At,Field,Method) require an integer operand, @@ -98,21 +98,19 @@ const ( opType = '.' // .Type() (Object) // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) - opConstraint = 'C' // .Constraint() (TypeParam) - opRhs = 'a' // .Rhs() (Alias) + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) ) // For is equivalent to new(Encoder).For(obj). @@ -228,7 +226,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -280,26 +278,21 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - if alias, ok := T.(*types.Alias); ok { - if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { - return Path(r), nil - } - if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { - return Path(r), nil - } - } else if tname.IsAlias() { - // legacy alias - if r := find(obj, T, path); r != nil { + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { return Path(r), nil } - - } else if named, ok := T.(*types.Named); ok { + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } // defined (named) type - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { - return Path(r), nil - } - if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { return Path(r), nil } } @@ -312,7 +305,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType)); r != nil { + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { return Path(r), nil } } @@ -320,7 +313,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := types.Unalias(o.Type()).(*types.Named); ok { + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -332,7 +325,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType)); r != nil { + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { return Path(r), nil } } @@ -447,64 +440,43 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -// -// The seenMethods map is used internally to short circuit cycles through -// interface methods, such as occur in the following example: -// -// type I interface { f() interface{I} } -// -// See golang/go#68046 for details. -func find(obj types.Object, T types.Type, path []byte) []byte { - return (&finder{obj: obj}).find(T, path) -} - -// finder closes over search state for a call to find. -type finder struct { - obj types.Object // the sought object - seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters - seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces -} - -func (f *finder) find(T types.Type, path []byte) []byte { +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { - case *types.Alias: - return f.find(types.Unalias(T), path) + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return f.find(T.Elem(), append(path, opElem)) + return find(obj, T.Elem(), append(path, opElem), seen) case *types.Slice: - return f.find(T.Elem(), append(path, opElem)) + return find(obj, T.Elem(), append(path, opElem), seen) case *types.Array: - return f.find(T.Elem(), append(path, opElem)) + return find(obj, T.Elem(), append(path, opElem), seen) case *types.Chan: - return f.find(T.Elem(), append(path, opElem)) + return find(obj, T.Elem(), append(path, opElem), seen) case *types.Map: - if r := f.find(T.Key(), append(path, opKey)); r != nil { + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { return r } - return f.find(T.Elem(), append(path, opElem)) + return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } - if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { return r } - if r := f.find(T.Params(), append(path, opParams)); r != nil { - return r - } - return f.find(T.Results(), append(path, opResults)) + return find(obj, T.Results(), append(path, opResults), seen) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == f.obj { + if fld == obj { return path2 // found field var } - if r := f.find(fld.Type(), append(path2, opType)); r != nil { + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { return r } } @@ -513,10 +485,10 @@ func (f *finder) find(T types.Type, path []byte) []byte { for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == f.obj { + if v == obj { return path2 // found param/result var } - if r := f.find(v.Type(), append(path2, opType)); r != nil { + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { return r } } @@ -524,35 +496,28 @@ func (f *finder) find(T types.Type, path []byte) []byte { case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) - if f.seenMethods[m] { - return nil - } path2 := appendOpArg(path, opMethod, i) - if m == f.obj { + if m == obj { return path2 // found interface method } - if f.seenMethods == nil { - f.seenMethods = make(map[*types.Func]bool) - } - f.seenMethods[m] = true - if r := f.find(m.Type(), append(path2, opType)); r != nil { + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if f.seenTParamNames[name] { - return nil - } - if name == f.obj { + if name == obj { return append(path, opObj) } - if f.seenTParamNames == nil { - f.seenTParamNames = make(map[*types.TypeName]bool) + if seen[name] { + return nil } - f.seenTParamNames[name] = true - if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { return r } return nil @@ -560,15 +525,11 @@ func (f *finder) find(T types.Type, path []byte) []byte { panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { - return (&finder{obj: obj}).findTypeParam(list, path, op) -} - -func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) - path2 := appendOpArg(path, op, i) - if r := f.find(tparam, path2); r != nil { + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { return r } } @@ -619,10 +580,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { code := suffix[0] suffix = suffix[1:] - // Codes [AFMTr] have an integer operand. + // Codes [AFM] have an integer operand. var index int switch code { - case opAt, opField, opMethod, opTypeParam, opRecvTypeParam: + case opAt, opField, opMethod, opTypeParam: rest := strings.TrimLeft(suffix, "0123456789") numerals := suffix[:len(suffix)-len(rest)] suffix = rest @@ -655,7 +616,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = types.Unalias(t) + t = aliases.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -692,16 +653,6 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() - case opRhs: - if alias, ok := t.(*types.Alias); ok { - t = aliases.Rhs(alias) - } else if false && aliases.Enabled() { - // The Enabled check is too expensive, so for now we - // simply assume that aliases are not enabled. - // TODO(adonovan): replace with "if true {" when go1.24 is assured. - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) - } - case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { @@ -713,17 +664,6 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = tparams.At(index) - case opRecvTypeParam: - sig, ok := t.(*types.Signature) // Signature - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - rtparams := sig.RecvTypeParams() - if n := rtparams.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - t = rtparams.At(index) - case opConstraint: tparam, ok := t.(*types.TypeParam) if !ok { @@ -785,10 +725,6 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } } - if obj == nil { - panic(p) // path does not end in an object-valued operator - } - if obj.Pkg() != pkg { return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go deleted file mode 100644 index 75438035..00000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import ( - "go/ast" - "go/types" - - "golang.org/x/tools/internal/typeparams" -) - -// Callee returns the named target of a function call, if any: -// a function, method, builtin, or variable. -// -// Functions and methods may potentially have type parameters. -func Callee(info *types.Info, call *ast.CallExpr) types.Object { - fun := ast.Unparen(call.Fun) - - // Look through type instantiation if necessary. - isInstance := false - switch fun.(type) { - case *ast.IndexExpr, *ast.IndexListExpr: - // When extracting the callee from an *IndexExpr, we need to check that - // it is a *types.Func and not a *types.Var. - // Example: Don't match a slice m within the expression `m[0]()`. - isInstance = true - fun, _, _, _ = typeparams.UnpackIndexExpr(fun) - } - - var obj types.Object - switch fun := fun.(type) { - case *ast.Ident: - obj = info.Uses[fun] // type, var, builtin, or declared func - case *ast.SelectorExpr: - if sel, ok := info.Selections[fun]; ok { - obj = sel.Obj() // method or field - } else { - obj = info.Uses[fun.Sel] // qualified identifier? - } - } - if _, ok := obj.(*types.TypeName); ok { - return nil // T(x) is a conversion, not a call - } - // A Func is required to match instantiations. - if _, ok := obj.(*types.Func); isInstance && !ok { - return nil // Was not a Func. - } - return obj -} - -// StaticCallee returns the target (function or method) of a static function -// call, if any. It returns nil for calls to builtins. -// -// Note: for calls of instantiated functions and methods, StaticCallee returns -// the corresponding generic function or method on the generic type. -func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { - if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { - return f - } - return nil -} - -func interfaceMethod(f *types.Func) bool { - recv := f.Type().(*types.Signature).Recv() - return recv != nil && types.IsInterface(recv.Type()) -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go deleted file mode 100644 index b81ce0c3..00000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/imports.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import "go/types" - -// Dependencies returns all dependencies of the specified packages. -// -// Dependent packages appear in topological order: if package P imports -// package Q, Q appears earlier than P in the result. -// The algorithm follows import statements in the order they -// appear in the source code, so the result is a total order. -func Dependencies(pkgs ...*types.Package) []*types.Package { - var result []*types.Package - seen := make(map[*types.Package]bool) - var visit func(pkgs []*types.Package) - visit = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !seen[p] { - seen[p] = true - visit(p.Imports()) - result = append(result, p) - } - } - } - visit(pkgs) - return result -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go deleted file mode 100644 index 93b3090c..00000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeutil defines various utilities for types, such as [Map], -// a hash table that maps [types.Type] to any value. -package typeutil - -import ( - "bytes" - "fmt" - "go/types" - "hash/maphash" - "unsafe" - - "golang.org/x/tools/internal/typeparams" -) - -// Map is a hash-table-based mapping from types (types.Type) to -// arbitrary values. The concrete types that implement -// the Type interface are pointers. Since they are not canonicalized, -// == cannot be used to check for equivalence, and thus we cannot -// simply use a Go map. -// -// Just as with map[K]V, a nil *Map is a valid empty map. -// -// Read-only map operations ([Map.At], [Map.Len], and so on) may -// safely be called concurrently. -// -// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 -// and 69559, if the latter proposals for a generic hash-map type and -// a types.Hash function are accepted. -type Map struct { - table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused - length int // number of map entries -} - -// entry is an entry (key/value association) in a hash bucket. -type entry struct { - key types.Type - value any -} - -// SetHasher has no effect. -// -// It is a relic of an optimization that is no longer profitable. Do -// not use [Hasher], [MakeHasher], or [SetHasher] in new code. -func (m *Map) SetHasher(Hasher) {} - -// Delete removes the entry with the given key, if any. -// It returns true if the entry was found. -func (m *Map) Delete(key types.Type) bool { - if m != nil && m.table != nil { - hash := hash(key) - bucket := m.table[hash] - for i, e := range bucket { - if e.key != nil && types.Identical(key, e.key) { - // We can't compact the bucket as it - // would disturb iterators. - bucket[i] = entry{} - m.length-- - return true - } - } - } - return false -} - -// At returns the map entry for the given key. -// The result is nil if the entry is not present. -func (m *Map) At(key types.Type) any { - if m != nil && m.table != nil { - for _, e := range m.table[hash(key)] { - if e.key != nil && types.Identical(key, e.key) { - return e.value - } - } - } - return nil -} - -// Set sets the map entry for key to val, -// and returns the previous entry, if any. -func (m *Map) Set(key types.Type, value any) (prev any) { - if m.table != nil { - hash := hash(key) - bucket := m.table[hash] - var hole *entry - for i, e := range bucket { - if e.key == nil { - hole = &bucket[i] - } else if types.Identical(key, e.key) { - prev = e.value - bucket[i].value = value - return - } - } - - if hole != nil { - *hole = entry{key, value} // overwrite deleted entry - } else { - m.table[hash] = append(bucket, entry{key, value}) - } - } else { - hash := hash(key) - m.table = map[uint32][]entry{hash: {entry{key, value}}} - } - - m.length++ - return -} - -// Len returns the number of map entries. -func (m *Map) Len() int { - if m != nil { - return m.length - } - return 0 -} - -// Iterate calls function f on each entry in the map in unspecified order. -// -// If f should mutate the map, Iterate provides the same guarantees as -// Go maps: if f deletes a map entry that Iterate has not yet reached, -// f will not be invoked for it, but if f inserts a map entry that -// Iterate has not yet reached, whether or not f will be invoked for -// it is unspecified. -func (m *Map) Iterate(f func(key types.Type, value any)) { - if m != nil { - for _, bucket := range m.table { - for _, e := range bucket { - if e.key != nil { - f(e.key, e.value) - } - } - } - } -} - -// Keys returns a new slice containing the set of map keys. -// The order is unspecified. -func (m *Map) Keys() []types.Type { - keys := make([]types.Type, 0, m.Len()) - m.Iterate(func(key types.Type, _ any) { - keys = append(keys, key) - }) - return keys -} - -func (m *Map) toString(values bool) string { - if m == nil { - return "{}" - } - var buf bytes.Buffer - fmt.Fprint(&buf, "{") - sep := "" - m.Iterate(func(key types.Type, value any) { - fmt.Fprint(&buf, sep) - sep = ", " - fmt.Fprint(&buf, key) - if values { - fmt.Fprintf(&buf, ": %q", value) - } - }) - fmt.Fprint(&buf, "}") - return buf.String() -} - -// String returns a string representation of the map's entries. -// Values are printed using fmt.Sprintf("%v", v). -// Order is unspecified. -func (m *Map) String() string { - return m.toString(true) -} - -// KeysString returns a string representation of the map's key set. -// Order is unspecified. -func (m *Map) KeysString() string { - return m.toString(false) -} - -// -- Hasher -- - -// hash returns the hash of type t. -// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. -func hash(t types.Type) uint32 { - return theHasher.Hash(t) -} - -// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. -// Hashers are stateless, and all are equivalent. -type Hasher struct{} - -var theHasher Hasher - -// MakeHasher returns Hasher{}. -// Hashers are stateless; all are equivalent. -func MakeHasher() Hasher { return theHasher } - -// Hash computes a hash value for the given type t such that -// Identical(t, t') => Hash(t) == Hash(t'). -func (h Hasher) Hash(t types.Type) uint32 { - return hasher{inGenericSig: false}.hash(t) -} - -// hasher holds the state of a single Hash traversal: whether we are -// inside the signature of a generic function; this is used to -// optimize [hasher.hashTypeParam]. -type hasher struct{ inGenericSig bool } - -// hashString computes the Fowler–Noll–Vo hash of s. -func hashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// hash computes the hash of t. -func (h hasher) hash(t types.Type) uint32 { - // See Identical for rationale. - switch t := t.(type) { - case *types.Basic: - return uint32(t.Kind()) - - case *types.Alias: - return h.hash(types.Unalias(t)) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.hash(t.Elem()) - - case *types.Struct: - var hash uint32 = 9059 - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - if f.Anonymous() { - hash += 8861 - } - hash += hashString(t.Tag(i)) - hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.hash(f.Type()) - } - return hash - - case *types.Pointer: - return 9067 + 2*h.hash(t.Elem()) - - case *types.Signature: - var hash uint32 = 9091 - if t.Variadic() { - hash *= 8863 - } - - tparams := t.TypeParams() - for i := range tparams.Len() { - h.inGenericSig = true - tparam := tparams.At(i) - hash += 7 * h.hash(tparam.Constraint()) - } - - return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - - case *types.Union: - return h.hashUnion(t) - - case *types.Interface: - // Interfaces are identical if they have the same set of methods, with - // identical names and types, and they have the same set of type - // restrictions. See go/types.identical for more details. - var hash uint32 = 9103 - - // Hash methods. - for i, n := 0, t.NumMethods(); i < n; i++ { - // Method order is not significant. - // Ignore m.Pkg(). - m := t.Method(i) - // Use shallow hash on method signature to - // avoid anonymous interface cycles. - hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) - } - - // Hash type restrictions. - terms, err := typeparams.InterfaceTermSet(t) - // if err != nil t has invalid type restrictions. - if err == nil { - hash += h.hashTermSet(terms) - } - - return hash - - case *types.Map: - return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) - - case *types.Named: - hash := h.hashTypeName(t.Obj()) - targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) - hash += 2 * h.hash(targ) - } - return hash - - case *types.TypeParam: - return h.hashTypeParam(t) - - case *types.Tuple: - return h.hashTuple(t) - } - - panic(fmt.Sprintf("%T: %v", t, t)) -} - -func (h hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - hash := 9137 + 2*uint32(n) - for i := range n { - hash += 3 * h.hash(tuple.At(i).Type()) - } - return hash -} - -func (h hasher) hashUnion(t *types.Union) uint32 { - // Hash type restrictions. - terms, err := typeparams.UnionTermSet(t) - // if err != nil t has invalid type restrictions. Fall back on a non-zero - // hash. - if err != nil { - return 9151 - } - return h.hashTermSet(terms) -} - -func (h hasher) hashTermSet(terms []*types.Term) uint32 { - hash := 9157 + 2*uint32(len(terms)) - for _, term := range terms { - // term order is not significant. - termHash := h.hash(term.Type()) - if term.Tilde() { - termHash *= 9161 - } - hash += 3 * termHash - } - return hash -} - -// hashTypeParam returns the hash of a type parameter. -func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { - // Within the signature of a generic function, TypeParams are - // identical if they have the same index and constraint, so we - // hash them based on index. - // - // When we are outside a generic function, free TypeParams are - // identical iff they are the same object, so we can use a - // more discriminating hash consistent with object identity. - // This optimization saves [Map] about 4% when hashing all the - // types.Info.Types in the forward closure of net/http. - if !h.inGenericSig { - // Optimization: outside a generic function signature, - // use a more discrimating hash consistent with object identity. - return h.hashTypeName(t.Obj()) - } - return 9173 + 3*uint32(t.Index()) -} - -var theSeed = maphash.MakeSeed() - -// hashTypeName hashes the pointer of tname. -func (hasher) hashTypeName(tname *types.TypeName) uint32 { - // Since types.Identical uses == to compare TypeNames, - // the Hash function uses maphash.Comparable. - // TODO(adonovan): or will, when it becomes available in go1.24. - // In the meantime we use the pointer's numeric value. - // - // hash := maphash.Comparable(theSeed, tname) - // - // (Another approach would be to hash the name and package - // path, and whether or not it is a package-level typename. It - // is rare for a package to define multiple local types with - // the same name.) - hash := uintptr(unsafe.Pointer(tname)) - return uint32(hash ^ (hash >> 32)) -} - -// shallowHash computes a hash of t without looking at any of its -// element Types, to avoid potential anonymous cycles in the types of -// interface methods. -// -// When an unnamed non-empty interface type appears anywhere among the -// arguments or results of an interface method, there is a potential -// for endless recursion. Consider: -// -// type X interface { m() []*interface { X } } -// -// The problem is that the Methods of the interface in m's result type -// include m itself; there is no mention of the named type X that -// might help us break the cycle. -// (See comment in go/types.identical, case *Interface, for more.) -func (h hasher) shallowHash(t types.Type) uint32 { - // t is the type of an interface method (Signature), - // its params or results (Tuples), or their immediate - // elements (mostly Slice, Pointer, Basic, Named), - // so there's no need to optimize anything else. - switch t := t.(type) { - case *types.Alias: - return h.shallowHash(types.Unalias(t)) - - case *types.Signature: - var hash uint32 = 604171 - if t.Variadic() { - hash *= 971767 - } - // The Signature/Tuple recursion is always finite - // and invariably shallow. - return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) - - case *types.Tuple: - n := t.Len() - hash := 9137 + 2*uint32(n) - for i := range n { - hash += 53471161 * h.shallowHash(t.At(i).Type()) - } - return hash - - case *types.Basic: - return 45212177 * uint32(t.Kind()) - - case *types.Array: - return 1524181 + 2*uint32(t.Len()) - - case *types.Slice: - return 2690201 - - case *types.Struct: - return 3326489 - - case *types.Pointer: - return 4393139 - - case *types.Union: - return 562448657 - - case *types.Interface: - return 2124679 // no recursion here - - case *types.Map: - return 9109 - - case *types.Chan: - return 9127 - - case *types.Named: - return h.hashTypeName(t.Obj()) - - case *types.TypeParam: - return h.hashTypeParam(t) - } - panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go deleted file mode 100644 index f7666028..00000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements a cache of method sets. - -package typeutil - -import ( - "go/types" - "sync" -) - -// A MethodSetCache records the method set of each type T for which -// MethodSet(T) is called so that repeat queries are fast. -// The zero value is a ready-to-use cache instance. -type MethodSetCache struct { - mu sync.Mutex - named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N - others map[types.Type]*types.MethodSet // all other types -} - -// MethodSet returns the method set of type T. It is thread-safe. -// -// If cache is nil, this function is equivalent to types.NewMethodSet(T). -// Utility functions can thus expose an optional *MethodSetCache -// parameter to clients that care about performance. -func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { - if cache == nil { - return types.NewMethodSet(T) - } - cache.mu.Lock() - defer cache.mu.Unlock() - - switch T := types.Unalias(T).(type) { - case *types.Named: - return cache.lookupNamed(T).value - - case *types.Pointer: - if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { - return cache.lookupNamed(N).pointer - } - } - - // all other types - // (The map uses pointer equivalence, not type identity.) - mset := cache.others[T] - if mset == nil { - mset = types.NewMethodSet(T) - if cache.others == nil { - cache.others = make(map[types.Type]*types.MethodSet) - } - cache.others[T] = mset - } - return mset -} - -func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { - if cache.named == nil { - cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) - } - // Avoid recomputing mset(*T) for each distinct Pointer - // instance whose underlying type is a named type. - msets, ok := cache.named[named] - if !ok { - msets.value = types.NewMethodSet(named) - msets.pointer = types.NewMethodSet(types.NewPointer(named)) - cache.named[named] = msets - } - return msets -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go deleted file mode 100644 index 9dda6a25..00000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -// This file defines utilities for user interfaces that display types. - -import ( - "go/types" -) - -// IntuitiveMethodSet returns the intuitive method set of a type T, -// which is the set of methods you can call on an addressable value of -// that type. -// -// The result always contains MethodSet(T), and is exactly MethodSet(T) -// for interface types and for pointer-to-concrete types. -// For all other concrete types T, the result additionally -// contains each method belonging to *T if there is no identically -// named method on T itself. -// -// This corresponds to user intuition about method sets; -// this function is intended only for user interfaces. -// -// The order of the result is as for types.MethodSet(T). -func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { - isPointerToConcrete := func(T types.Type) bool { - ptr, ok := types.Unalias(T).(*types.Pointer) - return ok && !types.IsInterface(ptr.Elem()) - } - - var result []*types.Selection - mset := msets.MethodSet(T) - if types.IsInterface(T) || isPointerToConcrete(T) { - for i, n := 0, mset.Len(); i < n; i++ { - result = append(result, mset.At(i)) - } - } else { - // T is some other concrete type. - // Report methods of T and *T, preferring those of T. - pmset := msets.MethodSet(types.NewPointer(T)) - for i, n := 0, pmset.Len(); i < n; i++ { - meth := pmset.At(i) - if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { - meth = m - } - result = append(result, meth) - } - - } - return result -} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index b9425f5a..c24c2eee 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,17 +22,11 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -// -// Precondition: enabled || len(tparams)==0. -// If materialized aliases are disabled, there must not be any type parameters. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - SetTypeParams(types.NewAlias(tname, rhs), tparams) + newAlias(tname, rhs) return tname } - if len(tparams) > 0 { - panic("cannot create an alias with type parameters when gotypesalias is not enabled") - } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 00000000..c027b9f3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// Before go1.22, this function always returns false. +func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index 7716a333..b3299548 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.22 +// +build go1.22 + package aliases import ( @@ -11,51 +14,31 @@ import ( "go/types" ) +// Alias is an alias of types.Alias. +type Alias = types.Alias + // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *types.Alias) types.Type { +func Rhs(alias *Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return types.Unalias(alias) + return Unalias(alias) } -// TypeParams returns the type parameter list of the alias. -func TypeParams(alias *types.Alias) *types.TypeParamList { - if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { - return alias.TypeParams() // go1.23+ - } - return nil -} +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } -// SetTypeParams sets the type parameters of the alias type. -func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { - if alias, ok := any(alias).(interface { - SetTypeParams(tparams []*types.TypeParam) - }); ok { - alias.SetTypeParams(tparams) // go1.23+ - } else if len(tparams) > 0 { - panic("cannot set type parameters of an Alias type in go1.22") - } -} - -// TypeArgs returns the type arguments used to instantiate the Alias type. -func TypeArgs(alias *types.Alias) *types.TypeList { - if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { - return alias.TypeArgs() // go1.23+ - } - return nil // empty (go1.22) -} - -// Origin returns the generic Alias type of which alias is an instance. -// If alias is not an instance of a generic alias, Origin returns alias. -func Origin(alias *types.Alias) *types.Alias { - if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { - return alias.Origin() // go1.23+ - } - return alias // not an instance of a generic alias (go1.22) +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a } // Enabled reports whether [NewAlias] should create [types.Alias] types. @@ -73,7 +56,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d79a605e..d98b0db2 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,3 +87,64 @@ func chanDir(d int) types.ChanDir { return 0 } } + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index 5662a311..f6437feb 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,183 +2,49 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. -// This file also additionally implements FindExportData for gcexportdata.NewReader. +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. package gcimporter import ( "bufio" - "bytes" - "errors" "fmt" - "go/build" "io" - "os" - "os/exec" - "path/filepath" + "strconv" "strings" - "sync" ) +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + // FindExportData positions the reader r at the beginning of the -// export data section of an underlying cmd/compile created archive +// export data section of an underlying GC-created object/archive // file by reading from it. The reader must be positioned at the -// start of the file before calling this function. -// This returns the length of the export data in bytes. -// -// This function is needed by [gcexportdata.Read], which must -// accept inputs produced by the last two releases of cmd/compile, -// plus tip. -func FindExportData(r *bufio.Reader) (size int64, err error) { - arsize, err := FindPackageDefinition(r) - if err != nil { - return - } - size = int64(arsize) - - objapi, headers, err := ReadObjectHeaders(r) - if err != nil { - return - } - size -= int64(len(objapi)) - for _, h := range headers { - size -= int64(len(h)) - } - - // Check for the binary export data section header "$$B\n". - // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading - line, err := r.ReadSlice('\n') - if err != nil { - return - } - hdr := string(line) - if hdr != "$$B\n" { - err = fmt.Errorf("unknown export data header: %q", hdr) - return - } - size -= int64(len(hdr)) - - // For files with a binary export data header "$$B\n", - // these are always terminated by an end-of-section marker "\n$$\n". - // So the last bytes must always be this constant. - // - // The end-of-section marker is not a part of the export data itself. - // Do not include these in size. - // - // It would be nice to have sanity check that the final bytes after - // the export data are indeed the end-of-section marker. The split - // of gcexportdata.NewReader and gcexportdata.Read make checking this - // ugly so gcimporter gives up enforcing this. The compiler and go/types - // importer do enforce this, which seems good enough. - const endofsection = "\n$$\n" - size -= int64(len(endofsection)) - - if size < 0 { - err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) - return - } - - return -} - -// ReadUnified reads the contents of the unified export data from a reader r -// that contains the contents of a GC-created archive file. -// -// On success, the reader will be positioned after the end-of-section marker "\n$$\n". -// -// Supported GC-created archive files have 4 layers of nesting: -// - An archive file containing a package definition file. -// - The package definition file contains headers followed by a data section. -// Headers are lines (≤ 4kb) that do not start with "$$". -// - The data section starts with "$$B\n" followed by export data followed -// by an end of section marker "\n$$\n". (The section start "$$\n" is no -// longer supported.) -// - The export data starts with a format byte ('u') followed by the in -// the given format. (See ReadExportDataHeader for older formats.) -// -// Putting this together, the bytes in a GC-created archive files are expected -// to look like the following. -// See cmd/internal/archive for more details on ar file headers. -// -// | \n | ar file signature -// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. -// | go object <...>\n | objabi header -// | \n | other headers such as build id -// | $$B\n | binary format marker -// | u\n | unified export -// | $$\n | end-of-section marker -// | [optional padding] | padding byte (0x0A) if size is odd -// | [ar file header] | other ar files -// | [ar file data] | -func ReadUnified(r *bufio.Reader) (data []byte, err error) { - // We historically guaranteed headers at the default buffer size (4096) work. - // This ensures we can use ReadSlice throughout. - const minBufferSize = 4096 - r = bufio.NewReaderSize(r, minBufferSize) - - size, err := FindPackageDefinition(r) - if err != nil { - return - } - n := size - - objapi, headers, err := ReadObjectHeaders(r) - if err != nil { - return - } - n -= len(objapi) - for _, h := range headers { - n -= len(h) - } - - hdrlen, err := ReadExportDataHeader(r) - if err != nil { - return - } - n -= hdrlen - - // size also includes the end of section marker. Remove that many bytes from the end. - const marker = "\n$$\n" - n -= len(marker) - - if n < 0 { - err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) - return - } - - // Read n bytes from buf. - data = make([]byte, n) - _, err = io.ReadFull(r, data) - if err != nil { - return - } - - // Check for marker at the end. - var suffix [len(marker)]byte - _, err = io.ReadFull(r, suffix[:]) - if err != nil { - return - } - if s := string(suffix[:]); s != marker { - err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) - return - } - - return -} - -// FindPackageDefinition positions the reader r at the beginning of a package -// definition file ("__.PKGDEF") within a GC-created archive by reading -// from it, and returns the size of the package definition file in the archive. -// -// The reader must be positioned at the start of the archive file before calling -// this function, and "__.PKGDEF" is assumed to be the first file in the archive. -// -// See cmd/internal/archive for details on the archive format. -func FindPackageDefinition(r *bufio.Reader) (size int, err error) { - // Uses ReadSlice to limit risk of malformed inputs. - +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -186,236 +52,48 @@ func FindPackageDefinition(r *bufio.Reader) (size int, err error) { return } - // Is the first line an archive file signature? - if string(line) != "!\n" { - err = fmt.Errorf("not the start of an archive file (%q)", line) + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") return } - // package export block should be first - size = readArchiveHeader(r, "__.PKGDEF") - if size <= 0 { - err = fmt.Errorf("not a package file") - return + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 } return } - -// ReadObjectHeaders reads object headers from the reader. Object headers are -// lines that do not start with an end-of-section marker "$$". The first header -// is the objabi header. On success, the reader will be positioned at the beginning -// of the end-of-section marker. -// -// It returns an error if any header does not fit in r.Size() bytes. -func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { - // line is a temporary buffer for headers. - // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. - var line []byte - - // objapi header should be the first line - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - objapi = string(line) - - // objapi header begins with "go object ". - if !strings.HasPrefix(objapi, "go object ") { - err = fmt.Errorf("not a go object file: %s", objapi) - return - } - - // process remaining object header lines - for { - // check for an end of section marker "$$" - line, err = r.Peek(2) - if err != nil { - return - } - if string(line) == "$$" { - return // stop - } - - // read next header - line, err = r.ReadSlice('\n') - if err != nil { - return - } - headers = append(headers, string(line)) - } -} - -// ReadExportDataHeader reads the export data header and format from r. -// It returns the number of bytes read, or an error if the format is no longer -// supported or it failed to read. -// -// The only currently supported format is binary export data in the -// unified export format. -func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { - // Read export data header. - line, err := r.ReadSlice('\n') - if err != nil { - return - } - - hdr := string(line) - switch hdr { - case "$$\n": - err = fmt.Errorf("old textual export format no longer supported (recompile package)") - return - - case "$$B\n": - var format byte - format, err = r.ReadByte() - if err != nil { - return - } - // The unified export format starts with a 'u'. - switch format { - case 'u': - default: - // Older no longer supported export formats include: - // indexed export format which started with an 'i'; and - // the older binary export format which started with a 'c', - // 'd', or 'v' (from "version"). - err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) - return - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - return - } - - n = len(hdr) + 1 // + 1 is for 'u' - return -} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// -// FindPkg is only used in tests within x/tools. -func FindPkg(path, srcDir string) (filename, id string, err error) { - // TODO(taking): Move internal/exportdata.FindPkg into its own file, - // and then this copy into a _test package. - if path == "" { - return "", "", errors.New("path is empty") - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - var bp *build.Package - bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - if bp.Goroot && bp.Dir != "" { - filename, err = lookupGorootExport(bp.Dir) - if err == nil { - _, err = os.Stat(filename) - } - if err == nil { - return filename, bp.ImportPath, nil - } - } - goto notfound - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - } - id = bp.ImportPath - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - f, statErr := os.Stat(filename) - if statErr == nil && !f.IsDir() { - return filename, id, nil - } - if err == nil { - err = statErr - } - } - -notfound: - if err == nil { - return "", path, fmt.Errorf("can't find import: %q", path) - } - return "", path, fmt.Errorf("can't find import: %q: %w", path, err) -} - -var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension - -var exportMap sync.Map // package dir → func() (string, error) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -// -// lookupGorootExport is only used in tests within x/tools. -func lookupGorootExport(pkgDir string) (string, error) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - err error - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { - listOnce.Do(func() { - cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) - var output []byte - output, err = cmd.Output() - if err != nil { - if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { - err = errors.New(string(ee.Stderr)) - } - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) - return - } - - exportPath = exports[0] - }) - - return exportPath, err - }) - } - - return f.(func() (string, error))() -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 3dbd21d1..39df9112 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,11 +23,17 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" + "bytes" "fmt" + "go/build" "go/token" "go/types" "io" "os" + "os/exec" + "path/filepath" + "strings" + "sync" ) const ( @@ -39,14 +45,125 @@ const ( trace = false ) +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -// -// Import is only used in tests. -func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var id string + var filename, id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -65,13 +182,12 @@ func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDi } rc = f } else { - var filename string - filename, id, err = FindPkg(path, srcDir) + filename, id = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, err + return nil, fmt.Errorf("can't find import: %q", id) } // no need to re-import if the package was imported completely before @@ -94,15 +210,57 @@ func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDi } defer rc.Close() + var hdr string + var size int64 buf := bufio.NewReader(rc) - data, err := ReadUnified(buf) - if err != nil { - err = fmt.Errorf("import %q: %v", path, err) + if hdr, size, err = FindExportData(buf); err != nil { return } - // unified: emitted by cmd/compile since go1.20. - _, pkg, err = UImportData(fset, packages, data, id) + switch hdr { + case "$$B\n": + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': // unified, from go1.20 + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } return } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 7dfc31a3..deeb67f3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,227 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed package export. -// -// The indexed export data format is an evolution of the previous -// binary export data format. Its chief contribution is introducing an -// index table, which allows efficient random access of individual -// declarations and inline function bodies. In turn, this allows -// avoiding unnecessary work for compilation units that import large -// packages. -// -// -// The top-level data format is structured as: -// -// Header struct { -// Tag byte // 'i' -// Version uvarint -// StringSize uvarint -// DataSize uvarint -// } -// -// Strings [StringSize]byte -// Data [DataSize]byte -// -// MainIndex []struct{ -// PkgPath stringOff -// PkgName stringOff -// PkgHeight uvarint -// -// Decls []struct{ -// Name stringOff -// Offset declOff -// } -// } -// -// Fingerprint [8]byte -// -// uvarint means a uint64 written out using uvarint encoding. -// -// []T means a uvarint followed by that many T objects. In other -// words: -// -// Len uvarint -// Elems [Len]T -// -// stringOff means a uvarint that indicates an offset within the -// Strings section. At that offset is another uvarint, followed by -// that many bytes, which form the string value. -// -// declOff means a uvarint that indicates an offset within the Data -// section where the associated declaration can be found. -// -// -// There are five kinds of declarations, distinguished by their first -// byte: -// -// type Var struct { -// Tag byte // 'V' -// Pos Pos -// Type typeOff -// } -// -// type Func struct { -// Tag byte // 'F' or 'G' -// Pos Pos -// TypeParams []typeOff // only present if Tag == 'G' -// Signature Signature -// } -// -// type Const struct { -// Tag byte // 'C' -// Pos Pos -// Value Value -// } -// -// type Type struct { -// Tag byte // 'T' or 'U' -// Pos Pos -// TypeParams []typeOff // only present if Tag == 'U' -// Underlying typeOff -// -// Methods []struct{ // omitted if Underlying is an interface type -// Pos Pos -// Name stringOff -// Recv Param -// Signature Signature -// } -// } -// -// type Alias struct { -// Tag byte // 'A' or 'B' -// Pos Pos -// TypeParams []typeOff // only present if Tag == 'B' -// Type typeOff -// } -// -// // "Automatic" declaration of each typeparam -// type TypeParam struct { -// Tag byte // 'P' -// Pos Pos -// Implicit bool -// Constraint typeOff -// } -// -// typeOff means a uvarint that either indicates a predeclared type, -// or an offset into the Data section. If the uvarint is less than -// predeclReserved, then it indicates the index into the predeclared -// types list (see predeclared in bexport.go for order). Otherwise, -// subtracting predeclReserved yields the offset of a type descriptor. -// -// Value means a type, kind, and type-specific value. See -// (*exportWriter).value for details. -// -// -// There are twelve kinds of type descriptors, distinguished by an itag: -// -// type DefinedType struct { -// Tag itag // definedType -// Name stringOff -// PkgPath stringOff -// } -// -// type PointerType struct { -// Tag itag // pointerType -// Elem typeOff -// } -// -// type SliceType struct { -// Tag itag // sliceType -// Elem typeOff -// } -// -// type ArrayType struct { -// Tag itag // arrayType -// Len uint64 -// Elem typeOff -// } -// -// type ChanType struct { -// Tag itag // chanType -// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv -// Elem typeOff -// } -// -// type MapType struct { -// Tag itag // mapType -// Key typeOff -// Elem typeOff -// } -// -// type FuncType struct { -// Tag itag // signatureType -// PkgPath stringOff -// Signature Signature -// } -// -// type StructType struct { -// Tag itag // structType -// PkgPath stringOff -// Fields []struct { -// Pos Pos -// Name stringOff -// Type typeOff -// Embedded bool -// Note stringOff -// } -// } -// -// type InterfaceType struct { -// Tag itag // interfaceType -// PkgPath stringOff -// Embeddeds []struct { -// Pos Pos -// Type typeOff -// } -// Methods []struct { -// Pos Pos -// Name stringOff -// Signature Signature -// } -// } -// -// // Reference to a type param declaration -// type TypeParamType struct { -// Tag itag // typeParamType -// Name stringOff -// PkgPath stringOff -// } -// -// // Instantiation of a generic type (like List[T2] or List[int]) -// type InstanceType struct { -// Tag itag // instanceType -// Pos pos -// TypeArgs []typeOff -// BaseType typeOff -// } -// -// type UnionType struct { -// Tag itag // interfaceType -// Terms []struct { -// tilde bool -// Type typeOff -// } -// } -// -// -// -// type Signature struct { -// Params []Param -// Results []Param -// Variadic bool // omitted if Results is empty -// } -// -// type Param struct { -// Pos Pos -// Name stringOff -// Type typOff -// } -// -// -// Pos encodes a file:line:column triple, incorporating a simple delta -// encoding scheme within a data object. See exportWriter.pos for -// details. +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. package gcimporter @@ -242,30 +24,11 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // -// For types, we use "shallow" export data. Historically, the Go -// compiler always produced a summary of the types for a given package -// that included types from other packages that it indirectly -// referenced: "deep" export data. This had the advantage that the -// compiler (and analogous tools such as gopls) need only load one -// file per direct import. However, it meant that the files tended to -// get larger based on the level of the package in the import -// graph. For example, higher-level packages in the kubernetes module -// have over 1MB of "deep" export data, even when they have almost no -// content of their own, merely because they mention a major type that -// references many others. In pathological cases the export data was -// 300x larger than the source for a package due to this quadratic -// growth. -// -// "Shallow" export data means that the serialized types describe only -// a single package. If those types mention types from other packages, -// the type checker may need to request additional packages beyond -// just the direct imports. Type information for the entire transitive -// closure of imports is provided (lazily) by the DAG. -// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -288,8 +51,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// [IExportShallow] in the same executable. This function cannot import data -// from cmd/compile or gcexportdata.Write. +// IExportShallow in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -460,7 +223,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := file.Lines() // byte offset of each line start + lines := tokeninternal.GetLines(file) // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -744,13 +507,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -760,22 +523,9 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled - - var tparams *types.TypeParamList - if materialized { - tparams = aliases.TypeParams(alias) - } - if tparams.Len() == 0 { - w.tag(aliasTag) - } else { - w.tag(genericAliasTag) - } + w.tag(aliasTag) w.pos(obj.Pos()) - if tparams.Len() > 0 { - w.tparamList(obj.Name(), tparams, obj.Pkg()) - } - if materialized { + if alias, ok := t.(*aliases.Alias); ok { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -994,14 +744,8 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *types.Alias: - if targs := aliases.TypeArgs(t); targs.Len() > 0 { - w.startType(instanceType) - w.pos(t.Obj().Pos()) - w.typeList(targs, pkg) - w.typ(aliases.Origin(t), pkg) - return - } + case *aliases.Alias: + // TODO(adonovan): support parameterized aliases, following *types.Named. w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -1110,7 +854,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := types.Unalias(ft).(*types.Named); named != nil { + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 69b1d697..136aa036 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,9 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See iexport.go for the export data format. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. package gcimporter @@ -51,7 +53,6 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 - iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -539,7 +540,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := types.Unalias(rhs).(*types.Interface) + iface, _ := aliases.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -556,28 +557,19 @@ type importReader struct { prevColumn int64 } -// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. -// -// If TypeNames are not marked black (in the sense of go/types cycle -// detection), they may be mutated when dot-imported. Fix this by punching a -// hole through the type, when compiling with Go 1.23. (The bug has been fixed -// for 1.24, but the fix was not worth back-porting). -var markBlack = func(name *types.TypeName) {} - func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() switch tag { - case aliasTag, genericAliasTag: - var tparams []*types.TypeParam - if tag == genericAliasTag { - tparams = r.tparamList() - } + case aliasTag: typ := r.typ() - obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) - markBlack(obj) // workaround for golang/go#69912 - r.declare(obj) + // TODO(adonovan): support generic aliases: + // if tag == genericAliasTag { + // tparams := r.tparamList() + // alias.SetTypeParams(tparams) + // } + r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) case constTag: typ, val := r.value() @@ -597,9 +589,6 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) - - markBlack(obj) // workaround for golang/go#69912 - // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -626,7 +615,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -656,7 +645,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := types.Unalias(constraint).(*types.Interface) + iface, _ := aliases.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -863,7 +852,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := types.Unalias(t).(*types.Interface) + _, ok := aliases.Unalias(t).(*types.Interface) return ok } @@ -873,7 +862,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %v)", k, base) + r.p.trace("importing type %d (base: %s)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -970,7 +959,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := types.NewInterfaceType(methods, embeddeds) + typ := newInterface(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1062,7 +1051,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = types.Unalias(r.typ()).(*types.TypeParam) + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1109,9 +1098,3 @@ func (r *importReader) byte() byte { } return x } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go deleted file mode 100644 index 7586bfac..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 && !go1.24 - -package gcimporter - -import ( - "go/token" - "go/types" - "unsafe" -) - -// TODO(rfindley): delete this workaround once go1.24 is assured. - -func init() { - // Update markBlack so that it correctly sets the color - // of imported TypeNames. - // - // See the doc comment for markBlack for details. - - type color uint32 - const ( - white color = iota - black - grey - ) - type object struct { - _ *types.Scope - _ token.Pos - _ *types.Package - _ string - _ types.Type - _ uint32 - color_ color - _ token.Pos - } - type typeName struct { - object - } - - // If the size of types.TypeName changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) - var _ [-delta * delta]int - - markBlack = func(obj *types.TypeName) { - type uP = unsafe.Pointer - var ptr *typeName - *(*uP)(uP(&ptr)) = uP(obj) - ptr.color_ = black - } -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go new file mode 100644 index 00000000..8b163e3d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go new file mode 100644 index 00000000..49984f40 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go deleted file mode 100644 index 907c8557..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import ( - "go/types" - "sync" -) - -// predecl is a cache for the predeclared types in types.Universe. -// -// Cache a distinct result based on the runtime value of any. -// The pointer value of the any type varies based on GODEBUG settings. -var predeclMu sync.Mutex -var predecl map[types.Type][]types.Type - -func predeclared() []types.Type { - anyt := types.Universe.Lookup("any").Type() - - predeclMu.Lock() - defer predeclMu.Unlock() - - if pre, ok := predecl[anyt]; ok { - return pre - } - - if predecl == nil { - predecl = make(map[types.Type][]types.Type) - } - - decls := []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - anyt, - } - - predecl[anyt] = decls - return decls -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go deleted file mode 100644 index 4af810dc..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import ( - "bufio" - "io" - "strconv" - "strings" -) - -// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. -func readArchiveHeader(b *bufio.Reader, name string) int { - // architecture-independent object file output - const HeaderSize = 60 - - var buf [HeaderSize]byte - if _, err := io.ReadFull(b, buf[:]); err != nil { - return -1 - } - aname := strings.Trim(string(buf[0:16]), " ") - if !strings.HasPrefix(aname, name) { - return -1 - } - asize := strings.Trim(string(buf[48:58]), " ") - i, _ := strconv.Atoi(asize) - return i -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 00000000..0cd3b91b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,34 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 00000000..38b624ca --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !goexperiment.unified +// +build !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 00000000..b5118d0b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.unified +// +build goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 6cdab448..2c077068 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,6 +11,7 @@ import ( "go/token" "go/types" "sort" + "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" @@ -51,7 +52,8 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index + idx pkgbits.Index + needed bool } // See cmd/compile/internal/noder.typeInfo. @@ -70,6 +72,7 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -107,17 +110,13 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - if r.Version().Has(pkgbits.HasInit) { - r.Bool() - } + r.Bool() // has init for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - if r.Version().Has(pkgbits.DerivedFuncInstance) { - assert(!r.Bool()) - } + assert(!r.Bool()) r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -166,7 +165,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // derived is a slice of types derived from tparams, which may be + // devived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -264,12 +263,7 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - // cmd/compile emits path="main" for main packages because - // that's the linker symbol prefix it used; but we need - // the package's path as it would be reported by go list, - // hence "main" below. - // See test at go/packages.TestMainPackagePathInModeTypes. - case "", "main": + case "": path = r.p.PkgPath() case "builtin": return nil // universe @@ -477,9 +471,7 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - if r.Version().Has(pkgbits.DerivedFuncInstance) { - assert(!r.Bool()) - } + assert(!r.Bool()) pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -533,12 +525,8 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() - var tparams []*types.TypeParam - if r.Version().Has(pkgbits.AliasTypeParamNames) { - tparams = r.typeParamNames() - } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) case pkgbits.ObjConst: pos := r.pos() @@ -565,7 +553,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -644,10 +632,7 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} - if r.Version().Has(pkgbits.DerivedInfoNeeded) { - assert(!r.Bool()) - } + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} } pr.retireReader(r) @@ -741,17 +726,3 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index e333efc8..af0ee6c6 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,6 +16,7 @@ import ( "os" "os/exec" "path/filepath" + "reflect" "regexp" "runtime" "strconv" @@ -199,14 +200,12 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io return } -// logf logs if i.Logf is non-nil. -func (i *Invocation) logf(format string, args ...any) { - if i.Logf != nil { - i.Logf(format, args...) - } -} - func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + goArgs := []string{i.Verb} appendModFile := func() { @@ -249,13 +248,16 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - cmd.WaitDelay = 30 * time.Second + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the @@ -275,12 +277,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Dir = i.WorkingDir } - debugStr := cmdDebugStr(cmd) - i.logf("starting %v", debugStr) - start := time.Now() - defer func() { - i.logf("%s for %v", time.Since(start), debugStr) - }() + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) } @@ -517,7 +514,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), for k, v := range overlay { // Use a unique basename for each file (001-foo.go), // to avoid creating nested directories. - base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k)) + base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k)) filename := filepath.Join(dir, base) err := os.WriteFile(filename, v, 0666) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 5ae57697..4569313a 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -90,6 +90,18 @@ type ImportFix struct { Relevance float64 // see pkg } +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. +} + +// A packageInfo represents what's known about a package. +type packageInfo struct { + name string // real package name, if known. + exports map[string]bool // known exports. +} + // parseOtherFiles parses all the Go files in srcDir except filename, including // test files if filename looks like a test. // @@ -118,7 +130,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename continue } - f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution) + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) if err != nil { continue } @@ -149,8 +161,8 @@ func addGlobals(f *ast.File, globals map[string]bool) { // collectReferences builds a map of selector expressions, from // left hand side (X) to a set of right hand sides (Sel). -func collectReferences(f *ast.File) References { - refs := References{} +func collectReferences(f *ast.File) references { + refs := references{} var visitor visitFn visitor = func(node ast.Node) ast.Visitor { @@ -220,7 +232,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { allFound := true for right := range syms { - if !pkgInfo.Exports[right] { + if !pkgInfo.exports[right] { allFound = false break } @@ -233,6 +245,11 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { return nil } +// references is set of references found in a Go file. The first map key is the +// left hand side of a selector expression, the second key is the right hand +// side, and the value should always be true. +type references map[string]map[string]bool + // A pass contains all the inputs and state necessary to fix a file's imports. // It can be modified in some ways during use; see comments below. type pass struct { @@ -240,29 +257,27 @@ type pass struct { fset *token.FileSet // fset used to parse f and its siblings. f *ast.File // the file being fixed. srcDir string // the directory containing f. - logf func(string, ...any) - source Source // the environment to use for go commands, etc. - loadRealPackageNames bool // if true, load package names from disk rather than guessing them. - otherFiles []*ast.File // sibling files. - goroot string + env *ProcessEnv // the environment to use for go commands, etc. + loadRealPackageNames bool // if true, load package names from disk rather than guessing them. + otherFiles []*ast.File // sibling files. // Intermediate state, generated by load. existingImports map[string][]*ImportInfo - allRefs References - missingRefs References + allRefs references + missingRefs references // Inputs to fix. These can be augmented between successive fix calls. lastTry bool // indicates that this is the last call and fix should clean up as best it can. candidates []*ImportInfo // candidate imports in priority order. - knownPackages map[string]*PackageInfo // information about all known packages. + knownPackages map[string]*packageInfo // information about all known packages. } // loadPackageNames saves the package names for everything referenced by imports. -func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error { - if p.logf != nil { - p.logf("loading package names for %v packages", len(imports)) +func (p *pass) loadPackageNames(imports []*ImportInfo) error { + if p.env.Logf != nil { + p.env.Logf("loading package names for %v packages", len(imports)) defer func() { - p.logf("done loading package names for %v packages", len(imports)) + p.env.Logf("done loading package names for %v packages", len(imports)) }() } var unknown []string @@ -273,17 +288,20 @@ func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) erro unknown = append(unknown, imp.ImportPath) } - names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown) + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) if err != nil { return err } - // TODO(rfindley): revisit this. Why do we need to store known packages with - // no exports? The inconsistent data is confusing. for path, name := range names { - p.knownPackages[path] = &PackageInfo{ - Name: name, - Exports: map[string]bool{}, + p.knownPackages[path] = &packageInfo{ + name: name, + exports: map[string]bool{}, } } return nil @@ -311,8 +329,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { return imp.Name } known := p.knownPackages[imp.ImportPath] - if known != nil && known.Name != "" { - return withoutVersion(known.Name) + if known != nil && known.name != "" { + return withoutVersion(known.name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -320,9 +338,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { // load reads in everything necessary to run a pass, and reports whether the // file already has all the imports it needs. It fills in p.missingRefs with the // file's missing symbols, if any, or removes unused imports if not. -func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { - p.knownPackages = map[string]*PackageInfo{} - p.missingRefs = References{} +func (p *pass) load() ([]*ImportFix, bool) { + p.knownPackages = map[string]*packageInfo{} + p.missingRefs = references{} p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. @@ -345,10 +363,10 @@ func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) { // f's imports by the identifier they introduce. imports := collectImports(p.f) if p.loadRealPackageNames { - err := p.loadPackageNames(ctx, append(imports, p.candidates...)) + err := p.loadPackageNames(append(imports, p.candidates...)) if err != nil { - if p.logf != nil { - p.logf("loading package names: %v", err) + if p.env.Logf != nil { + p.env.Logf("loading package names: %v", err) } return nil, false } @@ -518,10 +536,9 @@ func (p *pass) assumeSiblingImportsValid() { // We have the stdlib in memory; no need to guess. rights = symbolNameSet(m) } - // TODO(rfindley): we should set package name here, for consistency. - p.addCandidate(imp, &PackageInfo{ + p.addCandidate(imp, &packageInfo{ // no name; we already know it. - Exports: rights, + exports: rights, }) } } @@ -530,14 +547,14 @@ func (p *pass) assumeSiblingImportsValid() { // addCandidate adds a candidate import to p, and merges in the information // in pkg. -func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { p.candidates = append(p.candidates, imp) if existing, ok := p.knownPackages[imp.ImportPath]; ok { - if existing.Name == "" { - existing.Name = pkg.Name + if existing.name == "" { + existing.name = pkg.name } - for export := range pkg.Exports { - existing.Exports[export] = true + for export := range pkg.exports { + existing.exports[export] = true } } else { p.knownPackages[imp.ImportPath] = pkg @@ -546,14 +563,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) { // fixImports adds and removes imports from f so that all its references are // satisfied and there are no unused imports. -// -// This is declared as a variable rather than a function so goimports can -// easily be extended by adding a file with an init function. -// -// DO NOT REMOVE: used internally at Google. -var fixImports = fixImportsDefault - -func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { +func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { fixes, err := getFixes(context.Background(), fset, f, filename, env) if err != nil { return err @@ -565,42 +575,21 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { - source, err := NewProcessEnvSource(env, filename, f.Name.Name) - if err != nil { - return nil, err - } - goEnv, err := env.goEnv() - if err != nil { - return nil, err - } - return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source) -} - -func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) { - // This logic is defensively duplicated from getFixes. abs, err := filepath.Abs(filename) if err != nil { return nil, err } srcDir := filepath.Dir(abs) - - if logf != nil { - logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir) + if env.Logf != nil { + env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) } // First pass: looking only at f, and using the naive algorithm to // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{ - fset: fset, - f: f, - srcDir: srcDir, - logf: logf, - goroot: goroot, - source: source, - } - if fixes, done := p.load(ctx); done { + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} + if fixes, done := p.load(); done { return fixes, nil } @@ -612,7 +601,7 @@ func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, f // Second pass: add information from other files in the same package, // like their package vars and imports. p.otherFiles = otherFiles - if fixes, done := p.load(ctx); done { + if fixes, done := p.load(); done { return fixes, nil } @@ -625,17 +614,10 @@ func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, f // Third pass: get real package names where we had previously used // the naive algorithm. - p = &pass{ - fset: fset, - f: f, - srcDir: srcDir, - logf: logf, - goroot: goroot, - source: p.source, // safe to reuse, as it's just a wrapper around env - } + p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} p.loadRealPackageNames = true p.otherFiles = otherFiles - if fixes, done := p.load(ctx); done { + if fixes, done := p.load(); done { return fixes, nil } @@ -849,7 +831,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return true }, dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, References{searchPkg: nil}, pkg) + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) }, packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg @@ -1032,26 +1014,16 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // already know the view type. if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { e.resolver = newGopathResolver(e) - e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { e.resolverErr = err - e.logf("failed to create module resolver: %v", err) } else { e.resolver = Resolver(r) - e.logf("created module resolver") } } return e.resolver, e.resolverErr } -// logf logs if e.Logf is non-nil. -func (e *ProcessEnv) logf(format string, args ...any) { - if e.Logf != nil { - e.Logf(format, args...) - } -} - // buildContext returns the build.Context to use for matching files. // // TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform @@ -1100,7 +1072,11 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) return e.GocmdRunner.Run(ctx, inv) } -func addStdlibCandidates(pass *pass, refs References) error { +func addStdlibCandidates(pass *pass, refs references) error { + goenv, err := pass.env.goEnv() + if err != nil { + return err + } localbase := func(nm string) string { ans := path.Base(nm) if ans[0] == 'v' { @@ -1115,13 +1091,13 @@ func addStdlibCandidates(pass *pass, refs References) error { } add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { return } exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &PackageInfo{Name: localbase(pkg), Exports: exports}) + &packageInfo{name: localbase(pkg), exports: exports}) } for left := range refs { if left == "rand" { @@ -1151,8 +1127,8 @@ type Resolver interface { // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error - // loadExports returns the package name and set of exported symbols in the - // package at dir. loadExports may be called concurrently. + // loadExports returns the set of exported symbols in the package at dir. + // loadExports may be called concurrently. loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) // scoreImportPath returns the relevance for an import path. @@ -1185,22 +1161,101 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } -func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { ctx, done := event.Start(ctx, "imports.addExternalCandidates") defer done() - results, err := pass.source.ResolveReferences(ctx, filename, refs) + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false + } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, + } + resolver, err := pass.env.GetResolver() if err != nil { return err } + if err = resolver.scan(ctx, callback); err != nil { + return err + } - for _, result := range results { - if result == nil { - continue - } + // Search for imports matching potential package references. + type result struct { + imp *ImportInfo + pkg *packageInfo + } + results := make(chan result, len(refs)) + + ctx, cancel := context.WithCancel(ctx) + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + var ( + firstErr error + firstErrOnce sync.Once + ) + for pkgName, symbols := range refs { + wg.Add(1) + go func(pkgName string, symbols map[string]bool) { + defer wg.Done() + + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) + + if err != nil { + firstErrOnce.Do(func() { + firstErr = err + cancel() + }) + return + } + + if found == nil { + return // No matching package. + } + + imp := &ImportInfo{ + ImportPath: found.importPathShort, + } + + pkg := &packageInfo{ + name: pkgName, + exports: symbols, + } + results <- result{imp, pkg} + }(pkgName, symbols) + } + go func() { + wg.Wait() + close(results) + }() + + for result := range results { // Don't offer completions that would shadow predeclared // names, such as github.com/coreos/etcd/error. - if types.Universe.Lookup(result.Package.Name) != nil { // predeclared + if types.Universe.Lookup(result.pkg.name) != nil { // predeclared // Ideally we would skip this candidate only // if the predeclared name is actually // referenced by the file, but that's a lot @@ -1209,9 +1264,9 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs References, fil // user before long. continue } - pass.addCandidate(result.Import, result.Package) + pass.addCandidate(result.imp, result.pkg) } - return nil + return firstErr } // notIdentifier reports whether ch is an invalid identifier character. @@ -1553,10 +1608,11 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } fullFile := filepath.Join(dir, fi.Name()) - // Legacy ast.Object resolution is needed here. f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - env.logf("error parsing %v: %v", fullFile, err) + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } continue } if f.Name.Name == "documentation" { @@ -1592,7 +1648,9 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } sortSymbols(exports) - env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) + if env.Logf != nil { + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) + } return pkgName, exports, nil } @@ -1602,39 +1660,25 @@ func sortSymbols(syms []stdlib.Symbol) { }) } -// A symbolSearcher searches for a package with a set of symbols, among a set -// of candidates. See [symbolSearcher.search]. -// -// The search occurs within the scope of a single file, with context captured -// in srcDir and xtest. -type symbolSearcher struct { - logf func(string, ...any) - srcDir string // directory containing the file - xtest bool // if set, the file containing is an x_test file - loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) -} - -// search searches the provided candidates for a package containing all -// exported symbols. -// -// If successful, returns the resulting package. -func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { +// findImport searches for a package with the given symbols. +// If no package is found, findImport returns ("", false, nil) +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if s.logf != nil { + if pass.env.Logf != nil { for i, c := range candidates { - s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) + pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } - // Arrange rescv so that we can we can await results in order of relevance - // and exit as soon as we find the first match. - // - // Search with bounded concurrency, returning as soon as the first result - // among rescv is non-nil. + // Collect exports for packages with matching names. rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1642,7 +1686,6 @@ func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, p const maxConcurrentPackageImport = 4 loadExportsSem := make(chan struct{}, maxConcurrentPackageImport) - // Ensure that all work is completed at exit. ctx, cancel := context.WithCancel(ctx) var wg sync.WaitGroup defer func() { @@ -1650,7 +1693,6 @@ func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, p wg.Wait() }() - // Start the search. wg.Add(1) go func() { defer wg.Done() @@ -1661,67 +1703,55 @@ func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, p return } - i := i - c := c wg.Add(1) - go func() { + go func(c pkgDistance, resc chan<- *pkg) { defer func() { <-loadExportsSem wg.Done() }() - if s.logf != nil { - s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) + + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - pkg, err := s.searchOne(ctx, c, symbols) + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) if err != nil { - if s.logf != nil && ctx.Err() == nil { - s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) + if pass.env.Logf != nil { + pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } - pkg = nil + resc <- nil + return } - rescv[i] <- pkg // may be nil - }() + + exportsMap := make(map[string]bool, len(exports)) + for _, sym := range exports { + exportsMap[sym.Name] = true + } + + // If it doesn't have the right + // symbols, send nil to mean no match. + for symbol := range symbols { + if !exportsMap[symbol] { + resc <- nil + return + } + } + resc <- c.pkg + }(c, rescv[i]) } }() - // Await the first (best) result. for _, resc := range rescv { - select { - case r := <-resc: - if r != nil { - return r, nil - } - case <-ctx.Done(): - return nil, ctx.Err() + pkg := <-resc + if pkg == nil { + continue } + return pkg, nil } return nil, nil } -func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) { - if ctx.Err() != nil { - return nil, ctx.Err() - } - // If we're considering the package under test from an x_test, load the - // test variant. - includeTest := s.xtest && c.pkg.dir == s.srcDir - _, exports, err := s.loadExports(ctx, c.pkg, includeTest) - if err != nil { - return nil, err - } - - exportsMap := make(map[string]bool, len(exports)) - for _, sym := range exports { - exportsMap[sym.Name] = true - } - for symbol := range symbols { - if !exportsMap[symbol] { - return nil, nil // no match - } - } - return c.pkg, nil -} - // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1734,31 +1764,65 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false } // Speed optimization to minimize disk I/O: + // the last two components on disk must contain the + // package name somewhere. // - // Use the matchesPath heuristic to filter to package paths that could - // reasonably match a dangling reference. - // - // This permits mismatch naming like directory "go-foo" being package "foo", - // or "pkg.v3" being "pkg", or directory - // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but - // doesn't permit a directory "foo" to be package "bar", which is strongly - // discouraged anyway. There's no reason goimports needs to be slow just to - // accommodate that. + // This permits mismatch naming like directory + // "go-foo" being package "foo", or "pkg.v3" being "pkg", + // or directory "google.golang.org/api/cloudbilling/v1" + // being package "cloudbilling", but doesn't + // permit a directory "foo" to be package + // "bar", which is strongly discouraged + // anyway. There's no reason goimports needs + // to be slow just to accommodate that. for pkgIdent := range refs { - if matchesPath(pkgIdent, pkg.importPathShort) { + lastTwo := lastTwoComponents(pkg.importPathShort) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } + } + return false +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { return true } } return false } +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + // canUse reports whether the package in dir is usable from filename, // respecting the Go "internal" and "vendor" visibility rules. func canUse(filename, dir string) bool { @@ -1799,84 +1863,19 @@ func canUse(filename, dir string) bool { return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal") } -// matchesPath reports whether ident may match a potential package name -// referred to by path, using heuristics to filter out unidiomatic package -// names. -// -// Specifically, it checks whether either of the last two '/'- or '\'-delimited -// path segments matches the identifier. The segment-matching heuristic must -// allow for various conventions around segment naming, including go-foo, -// foo-go, and foo.v3. To handle all of these, matching considers both (1) the -// entire segment, ignoring '-' and '.', as well as (2) the last subsegment -// separated by '-' or '.'. So the segment foo-go matches all of the following -// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII -// identifiers). -// -// See the docstring for [pkgIsCandidate] for an explanation of how this -// heuristic filters potential candidate packages. -func matchesPath(ident, path string) bool { - // Ignore case, for ASCII. - lowerIfASCII := func(b byte) byte { - if 'A' <= b && b <= 'Z' { - return b + ('a' - 'A') - } - return b - } - - // match reports whether path[start:end] matches ident, ignoring [.-]. - match := func(start, end int) bool { - ii := len(ident) - 1 // current byte in ident - pi := end - 1 // current byte in path - for ; pi >= start && ii >= 0; pi-- { - pb := path[pi] - if pb == '-' || pb == '.' { - continue - } - pb = lowerIfASCII(pb) - ib := lowerIfASCII(ident[ii]) - if pb != ib { - return false - } - ii-- - } - return ii < 0 && pi < start // all bytes matched - } - - // segmentEnd and subsegmentEnd hold the end points of the current segment - // and subsegment intervals. - segmentEnd := len(path) - subsegmentEnd := len(path) - - // Count slashes; we only care about the last two segments. +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { nslash := 0 - - for i := len(path) - 1; i >= 0; i-- { - switch b := path[i]; b { - // TODO(rfindley): we handle backlashes here only because the previous - // heuristic handled backslashes. This is perhaps overly defensive, but is - // the result of many lessons regarding Chesterton's fence and the - // goimports codebase. - // - // However, this function is only ever called with something called an - // 'importPath'. Is it possible that this is a real import path, and - // therefore we need only consider forward slashes? - case '/', '\\': - if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) { - return true - } + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { nslash++ if nslash == 2 { - return false // did not match above + return v[i:] } - segmentEnd, subsegmentEnd = i, i // reset - case '-', '.': - if match(i+1, subsegmentEnd) { - return true - } - subsegmentEnd = i } } - return match(0, segmentEnd) || match(0, subsegmentEnd) + return v } type visitFn func(node ast.Node) ast.Visitor diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 2215a128..f8346552 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -47,14 +47,7 @@ type Options struct { // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { fileSet := token.NewFileSet() - var parserMode parser.Mode - if opt.Comments { - parserMode |= parser.ParseComments - } - if opt.AllErrors { - parserMode |= parser.AllErrors - } - file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment) + file, adjust, err := parse(fileSet, filename, src, opt) if err != nil { return nil, err } @@ -73,19 +66,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { ctx, done := event.Start(ctx, "imports.FixImports") defer done() fileSet := token.NewFileSet() - // TODO(rfindley): these default values for ParseComments and AllErrors were - // extracted from gopls, but are they even needed? - file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true) + file, _, err := parse(fileSet, filename, src, opt) if err != nil { return nil, err } - return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source) + return getFixes(ctx, fileSet, file, filename, opt.Env) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode @@ -95,7 +86,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - parserMode := parser.SkipObjectResolution + parserMode := parser.Mode(0) if opt.Comments { parserMode |= parser.ParseComments } @@ -123,7 +114,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // formatted file, and returns the postpocessed result. func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { mergeImports(file) - sortImports(opt.LocalPrefix, fset.File(file.FileStart), file) + sortImports(opt.LocalPrefix, fset.File(file.Pos()), file) var spacesBefore []string // import paths we need spaces before for _, impSection := range astutil.Imports(fset, file) { // Within each block of contiguous imports, see if any @@ -173,9 +164,13 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. -func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) { - if parserMode&parser.SkipObjectResolution != 0 { - panic("legacy ast.Object resolution is required") +func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors } // Try as whole source file. @@ -186,7 +181,7 @@ func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.M // If the error is that the source file didn't begin with a // package line and we accept fragmented input, fall through to // try as a source fragment. Stop and return on any other error. - if !fragment || !strings.Contains(err.Error(), "expected 'package'") { + if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") { return nil, nil, err } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 8555e3f8..82fe644a 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -245,10 +245,7 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe // 2. Use this to separate module cache scanning from other scanning. func gomodcacheForEnv(goenv map[string]string) string { if gmc := goenv["GOMODCACHE"]; gmc != "" { - // golang/go#67156: ensure that the module cache is clean, since it is - // assumed as a prefix to directories scanned by gopathwalk, which are - // themselves clean. - return filepath.Clean(gmc) + return gmc } gopaths := filepath.SplitList(goenv["GOPATH"]) if len(gopaths) == 0 { @@ -268,7 +265,9 @@ func (r *ModuleResolver) initAllMods() error { return err } if mod.Dir == "" { - r.env.logf("module %v has not been downloaded and will be ignored", mod.Path) + if r.env.Logf != nil { + r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) + } // Can't do anything with a module that's not downloaded. continue } @@ -743,8 +742,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { subdir := "" - if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) { - subdir = dir[len(prefix):] + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] } importPath := filepath.ToSlash(subdir) if strings.HasPrefix(importPath, "vendor/") { @@ -767,7 +766,9 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { - r.env.logf("decoding module cache path %q: %v", subdir, err) + if r.env.Logf != nil { + r.env.Logf("decoding module cache path %q: %v", subdir, err) + } return directoryPackageInfo{ status: directoryScanned, err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go deleted file mode 100644 index cbe4f3c5..00000000 --- a/vendor/golang.org/x/tools/internal/imports/source.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package imports - -import "context" - -// These types document the APIs below. -// -// TODO(rfindley): consider making these defined types rather than aliases. -type ( - ImportPath = string - PackageName = string - Symbol = string - - // References is set of References found in a Go file. The first map key is the - // left hand side of a selector expression, the second key is the right hand - // side, and the value should always be true. - References = map[PackageName]map[Symbol]bool -) - -// A Result satisfies a missing import. -// -// The Import field describes the missing import spec, and the Package field -// summarizes the package exports. -type Result struct { - Import *ImportInfo - Package *PackageInfo -} - -// An ImportInfo represents a single import statement. -type ImportInfo struct { - ImportPath string // import path, e.g. "crypto/rand". - Name string // import name, e.g. "crand", or "" if none. -} - -// A PackageInfo represents what's known about a package. -type PackageInfo struct { - Name string // package name in the package declaration, if known - Exports map[string]bool // set of names of known package level sortSymbols -} - -// A Source provides imports to satisfy unresolved references in the file being -// fixed. -type Source interface { - // LoadPackageNames queries PackageName information for the requested import - // paths, when operating from the provided srcDir. - // - // TODO(rfindley): try to refactor to remove this operation. - LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) - - // ResolveReferences asks the Source for the best package name to satisfy - // each of the missing references, in the context of fixing the given - // filename. - // - // Returns a map from package name to a [Result] for that package name that - // provides the required symbols. Keys may be omitted in the map if no - // candidates satisfy all missing references for that package name. It is up - // to each data source to select the best result for each entry in the - // missing map. - ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) -} diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go deleted file mode 100644 index d14abaa3..00000000 --- a/vendor/golang.org/x/tools/internal/imports/source_env.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package imports - -import ( - "context" - "path/filepath" - "strings" - "sync" - - "golang.org/x/sync/errgroup" - "golang.org/x/tools/internal/gopathwalk" -) - -// ProcessEnvSource implements the [Source] interface using the legacy -// [ProcessEnv] abstraction. -type ProcessEnvSource struct { - env *ProcessEnv - srcDir string - filename string - pkgName string -} - -// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given -// env, to be used for fixing imports in the file with name filename in package -// named pkgName. -func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) { - abs, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - srcDir := filepath.Dir(abs) - return &ProcessEnvSource{ - env: env, - srcDir: srcDir, - filename: filename, - pkgName: pkgName, - }, nil -} - -func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) { - r, err := s.env.GetResolver() - if err != nil { - return nil, err - } - return r.loadPackageNames(unknown, srcDir) -} - -func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { - var mu sync.Mutex - found := make(map[string][]pkgDistance) - callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true // We want everything. - }, - dirFound: func(pkg *pkg) bool { - return pkgIsCandidate(filename, refs, pkg) - }, - packageNameLoaded: func(pkg *pkg) bool { - if _, want := refs[pkg.packageName]; !want { - return false - } - if pkg.dir == s.srcDir && s.pkgName == pkg.packageName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - return false - } - if !canUse(filename, pkg.dir) { - return false - } - mu.Lock() - defer mu.Unlock() - found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)}) - return false // We'll do our own loading after we sort. - }, - } - resolver, err := s.env.GetResolver() - if err != nil { - return nil, err - } - if err := resolver.scan(ctx, callback); err != nil { - return nil, err - } - - g, ctx := errgroup.WithContext(ctx) - - searcher := symbolSearcher{ - logf: s.env.logf, - srcDir: s.srcDir, - xtest: strings.HasSuffix(s.pkgName, "_test"), - loadExports: resolver.loadExports, - } - - var resultMu sync.Mutex - results := make(map[string]*Result, len(refs)) - for pkgName, symbols := range refs { - g.Go(func() error { - found, err := searcher.search(ctx, found[pkgName], pkgName, symbols) - if err != nil { - return err - } - if found == nil { - return nil // No matching package. - } - - imp := &ImportInfo{ - ImportPath: found.importPathShort, - } - pkg := &PackageInfo{ - Name: pkgName, - Exports: symbols, - } - resultMu.Lock() - results[pkgName] = &Result{Import: imp, Package: pkg} - resultMu.Unlock() - return nil - }) - } - if err := g.Wait(); err != nil { - return nil, err - } - var ans []*Result - for _, x := range results { - ans = append(ans, x) - } - return ans, nil -} diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go deleted file mode 100644 index 05229f06..00000000 --- a/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package imports - -import ( - "context" - "sync" - "time" - - "golang.org/x/tools/internal/modindex" -) - -// This code is here rather than in the modindex package -// to avoid import loops - -// implements Source using modindex, so only for module cache. -// -// this is perhaps over-engineered. A new Index is read at first use. -// And then Update is called after every 15 minutes, and a new Index -// is read if the index changed. It is not clear the Mutex is needed. -type IndexSource struct { - modcachedir string - mutex sync.Mutex - ix *modindex.Index - expires time.Time -} - -// create a new Source. Called from NewView in cache/session.go. -func NewIndexSource(cachedir string) *IndexSource { - return &IndexSource{modcachedir: cachedir} -} - -func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) { - /// This is used by goimports to resolve the package names of imports of the - // current package, which is irrelevant for the module cache. - return nil, nil -} - -func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { - return nil, err - } - var cs []modindex.Candidate - for pkg, nms := range missing { - for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) - cs = append(cs, x...) - } - } - found := make(map[string]*Result) - for _, c := range cs { - var x *Result - if x = found[c.ImportPath]; x == nil { - x = &Result{ - Import: &ImportInfo{ - ImportPath: c.ImportPath, - Name: "", - }, - Package: &PackageInfo{ - Name: c.PkgName, - Exports: make(map[string]bool), - }, - } - found[c.ImportPath] = x - } - x.Package.Exports[c.Name] = true - } - var ans []*Result - for _, x := range found { - ans = append(ans, x) - } - return ans, nil -} - -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } - - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) - if err != nil { - return err - } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) - } - - return nil -} diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go deleted file mode 100644 index 1e1a02f2..00000000 --- a/vendor/golang.org/x/tools/internal/modindex/directories.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package modindex - -import ( - "fmt" - "log" - "os" - "path/filepath" - "regexp" - "slices" - "strings" - "sync" - "time" - - "golang.org/x/mod/semver" - "golang.org/x/tools/internal/gopathwalk" -) - -type directory struct { - path Relpath - importPath string - version string // semantic version - syms []symbol -} - -// filterDirs groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) - if err != nil { - return nil, err - } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v - } - return ans, nil -} - -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) -} - -// modCacheRegexp splits a relpathpath into module, module version, and package. -var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) - -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { - m := modCacheRegexp.FindStringSubmatch(string(dir)) - // m[1] is the module path - // m[2] is the version major.minor.patch(-
 1 && flds[1][1] == 'D',
-			}
-			if px.Type == Func {
-				n, err := strconv.Atoi(flds[2])
-				if err != nil {
-					continue // should never happen
-				}
-				px.Results = int16(n)
-				if len(flds) >= 4 {
-					sig := strings.Split(flds[3], " ")
-					for i := 0; i < len(sig); i++ {
-						// $ cannot otherwise occur. removing the spaces
-						// almost works, but for chan struct{}, e.g.
-						sig[i] = strings.Replace(sig[i], "$", " ", -1)
-					}
-					px.Sig = toFields(sig)
-				}
-			}
-			ans = append(ans, px)
-		}
-	}
-	return ans
-}
-
-func toFields(sig []string) []Field {
-	ans := make([]Field, len(sig)/2)
-	for i := 0; i < len(ans); i++ {
-		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
-	}
-	return ans
-}
-
-// benchmarks show this is measurably better than strings.Split
-// split into first 4 fields separated by single space
-func fastSplit(x string) []string {
-	ans := make([]string, 0, 4)
-	nxt := 0
-	start := 0
-	for i := 0; i < len(x); i++ {
-		if x[i] != ' ' {
-			continue
-		}
-		ans = append(ans, x[start:i])
-		nxt++
-		start = i + 1
-		if nxt >= 3 {
-			break
-		}
-	}
-	ans = append(ans, x[start:])
-	return ans
-}
-
-func asLexType(c byte) LexType {
-	switch c {
-	case 'C':
-		return Const
-	case 'V':
-		return Var
-	case 'T':
-		return Type
-	case 'F':
-		return Func
-	}
-	return -1
-}
diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
deleted file mode 100644
index 355a53e7..00000000
--- a/vendor/golang.org/x/tools/internal/modindex/modindex.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package modindex contains code for building and searching an index to
-// the Go module cache. The directory containing the index, returned by
-// IndexDir(), contains a file index-name- that contains the name
-// of the current index. We believe writing that short file is atomic.
-// ReadIndex reads that file to get the file name of the index.
-// WriteIndex writes an index with a unique name and then
-// writes that name into a new version of index-name-.
-// ( stands for the CurrentVersion of the index format.)
-package modindex
-
-import (
-	"path/filepath"
-	"slices"
-	"strings"
-	"time"
-
-	"golang.org/x/mod/semver"
-)
-
-// Create always creates a new index for the go module cache that is in cachedir.
-func Create(cachedir string) error {
-	_, err := indexModCache(cachedir, true)
-	return err
-}
-
-// Update the index for the go module cache that is in cachedir,
-// If there is no existing index it will build one.
-// If there are changed directories since the last index, it will
-// write a new one and return true. Otherwise it returns false.
-func Update(cachedir string) (bool, error) {
-	return indexModCache(cachedir, false)
-}
-
-// indexModCache writes an index current as of when it is called.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and the updates to the cache. It returns true if it wrote an index,
-// false otherwise.
-func indexModCache(cachedir string, clear bool) (bool, error) {
-	cachedir, err := filepath.Abs(cachedir)
-	if err != nil {
-		return false, err
-	}
-	cd := Abspath(cachedir)
-	future := time.Now().Add(24 * time.Hour) // safely in the future
-	ok, err := modindexTimed(future, cd, clear)
-	if err != nil {
-		return false, err
-	}
-	return ok, nil
-}
-
-// modindexTimed writes an index current as of onlyBefore.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and all the updates to the cache before onlyBefore.
-// It returns true if it wrote a new index, false if it wrote nothing.
-func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
-	var curIndex *Index
-	if !clear {
-		var err error
-		curIndex, err = ReadIndex(string(cachedir))
-		if clear && err != nil {
-			return false, err
-		}
-		// TODO(pjw): check that most of those directories still exist
-	}
-	cfg := &work{
-		onlyBefore: onlyBefore,
-		oldIndex:   curIndex,
-		cacheDir:   cachedir,
-	}
-	if curIndex != nil {
-		cfg.onlyAfter = curIndex.Changed
-	}
-	if err := cfg.buildIndex(); err != nil {
-		return false, err
-	}
-	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
-		// no changes from existing curIndex, don't write a new index
-		return false, nil
-	}
-	if err := cfg.writeIndex(); err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
-type work struct {
-	onlyBefore time.Time // do not use directories later than this
-	onlyAfter  time.Time // only interested in directories after this
-	// directories from before onlyAfter come from oldIndex
-	oldIndex *Index
-	newIndex *Index
-	cacheDir Abspath
-}
-
-func (w *work) buildIndex() error {
-	// The effective date of the new index should be at least
-	// slightly earlier than when the directories are scanned
-	// so set it now.
-	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
-	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
-	if len(dirs) == 0 {
-		return nil
-	}
-	newdirs, err := byImportPath(dirs)
-	if err != nil {
-		return err
-	}
-	// for each import path it might occur only in newdirs,
-	// only in w.oldIndex, or in both.
-	// If it occurs in both, use the semantically later one
-	if w.oldIndex != nil {
-		for _, e := range w.oldIndex.Entries {
-			found, ok := newdirs[e.ImportPath]
-			if !ok {
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				continue // use this one, there is no new one
-			}
-			if semver.Compare(found[0].version, e.Version) > 0 {
-				// use the new one
-			} else {
-				// use the old one, forget the new one
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				delete(newdirs, e.ImportPath)
-			}
-		}
-	}
-	// get symbol information for all the new diredtories
-	getSymbols(w.cacheDir, newdirs)
-	// assemble the new index entries
-	for k, v := range newdirs {
-		d := v[0]
-		pkg, names := processSyms(d.syms)
-		if pkg == "" {
-			continue // PJW: does this ever happen?
-		}
-		entry := Entry{
-			PkgName:    pkg,
-			Dir:        d.path,
-			ImportPath: k,
-			Version:    d.version,
-			Names:      names,
-		}
-		w.newIndex.Entries = append(w.newIndex.Entries, entry)
-	}
-	// sort the entries in the new index
-	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
-		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
-			return n
-		}
-		return strings.Compare(l.ImportPath, r.ImportPath)
-	})
-	return nil
-}
-
-func (w *work) writeIndex() error {
-	return writeIndex(w.cacheDir, w.newIndex)
-}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
deleted file mode 100644
index 33bf2641..00000000
--- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modindex
-
-import (
-	"fmt"
-	"go/ast"
-	"go/parser"
-	"go/token"
-	"go/types"
-	"os"
-	"path/filepath"
-	"slices"
-	"strings"
-
-	"golang.org/x/sync/errgroup"
-)
-
-// The name of a symbol contains information about the symbol:
-//  T for types, TD if the type is deprecated
-//  C for consts, CD if the const is deprecated
-//  V for vars, VD if the var is deprecated
-// and for funcs:  F  ( )*
-// any spaces in  are replaced by $s so that the fields
-// of the name are space separated. F is replaced by FD if the func
-// is deprecated.
-type symbol struct {
-	pkg  string // name of the symbols's package
-	name string // declared name
-	kind string // T, C, V, or F
-	sig  string // signature information, for F
-}
-
-// find the symbols for the best directories
-func getSymbols(cd Abspath, dirs map[string][]*directory) {
-	var g errgroup.Group
-	g.SetLimit(-1) // maybe throttle this some day
-	for _, vv := range dirs {
-		// throttling some day?
-		d := vv[0]
-		g.Go(func() error {
-			thedir := filepath.Join(string(cd), string(d.path))
-			mode := parser.SkipObjectResolution | parser.ParseComments
-
-			fi, err := os.ReadDir(thedir)
-			if err != nil {
-				return nil // log this someday?
-			}
-			for _, fx := range fi {
-				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
-					continue
-				}
-				fname := filepath.Join(thedir, fx.Name())
-				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
-				if err != nil {
-					continue // ignore errors, someday log them?
-				}
-				d.syms = append(d.syms, getFileExports(tr)...)
-			}
-			return nil
-		})
-	}
-	g.Wait()
-}
-
-func getFileExports(f *ast.File) []symbol {
-	pkg := f.Name.Name
-	if pkg == "main" {
-		return nil
-	}
-	var ans []symbol
-	// should we look for //go:build ignore?
-	for _, decl := range f.Decls {
-		switch decl := decl.(type) {
-		case *ast.FuncDecl:
-			if decl.Recv != nil {
-				// ignore methods, as we are completing package selections
-				continue
-			}
-			name := decl.Name.Name
-			dtype := decl.Type
-			// not looking at dtype.TypeParams. That is, treating
-			// generic functions just like non-generic ones.
-			sig := dtype.Params
-			kind := "F"
-			if isDeprecated(decl.Doc) {
-				kind += "D"
-			}
-			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
-			for _, x := range sig.List {
-				// This code creates a string representing the type.
-				// TODO(pjw): it may be fragile:
-				// 1. x.Type could be nil, perhaps in ill-formed code
-				// 2. ExprString might someday change incompatibly to
-				//    include struct tags, which can be arbitrary strings
-				if x.Type == nil {
-					// Can this happen without a parse error? (Files with parse
-					// errors are ignored in getSymbols)
-					continue // maybe report this someday
-				}
-				tp := types.ExprString(x.Type)
-				if len(tp) == 0 {
-					// Can this happen?
-					continue // maybe report this someday
-				}
-				// This is only safe if ExprString never returns anything with a $
-				// The only place a $ can occur seems to be in a struct tag, which
-				// can be an arbitrary string literal, and ExprString does not presently
-				// print struct tags. So for this to happen the type of a formal parameter
-				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
-				// would have to show the struct tag. Even testing for this case seems
-				// a waste of effort, but let's not ignore such pathologies
-				if strings.Contains(tp, "$") {
-					continue
-				}
-				tp = strings.Replace(tp, " ", "$", -1)
-				if len(x.Names) == 0 {
-					result = append(result, "_")
-					result = append(result, tp)
-				} else {
-					for _, y := range x.Names {
-						result = append(result, y.Name)
-						result = append(result, tp)
-					}
-				}
-			}
-			sigs := strings.Join(result, " ")
-			if s := newsym(pkg, name, kind, sigs); s != nil {
-				ans = append(ans, *s)
-			}
-		case *ast.GenDecl:
-			depr := isDeprecated(decl.Doc)
-			switch decl.Tok {
-			case token.CONST, token.VAR:
-				tp := "V"
-				if decl.Tok == token.CONST {
-					tp = "C"
-				}
-				if depr {
-					tp += "D"
-				}
-				for _, sp := range decl.Specs {
-					for _, x := range sp.(*ast.ValueSpec).Names {
-						if s := newsym(pkg, x.Name, tp, ""); s != nil {
-							ans = append(ans, *s)
-						}
-					}
-				}
-			case token.TYPE:
-				tp := "T"
-				if depr {
-					tp += "D"
-				}
-				for _, sp := range decl.Specs {
-					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
-						ans = append(ans, *s)
-					}
-				}
-			}
-		}
-	}
-	return ans
-}
-
-func newsym(pkg, name, kind, sig string) *symbol {
-	if len(name) == 0 || !ast.IsExported(name) {
-		return nil
-	}
-	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
-	return &sym
-}
-
-func isDeprecated(doc *ast.CommentGroup) bool {
-	if doc == nil {
-		return false
-	}
-	// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
-	// This code fails for /* Deprecated: */, but it's the code from
-	// gopls/internal/analysis/deprecated
-	lines := strings.Split(doc.Text(), "\n\n")
-	for _, line := range lines {
-		if strings.HasPrefix(line, "Deprecated:") {
-			return true
-		}
-	}
-	return false
-}
-
-// return the package name and the value for the symbols.
-// if there are multiple packages, choose one arbitrarily
-// the returned slice is sorted lexicographically
-func processSyms(syms []symbol) (string, []string) {
-	if len(syms) == 0 {
-		return "", nil
-	}
-	slices.SortFunc(syms, func(l, r symbol) int {
-		return strings.Compare(l.name, r.name)
-	})
-	pkg := syms[0].pkg
-	var names []string
-	for _, s := range syms {
-		var nx string
-		if s.pkg == pkg {
-			if s.sig != "" {
-				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
-			} else {
-				nx = fmt.Sprintf("%s %s", s.name, s.kind)
-			}
-			names = append(names, nx)
-		} else {
-			continue // PJW: do we want to keep track of these?
-		}
-	}
-	return pkg, names
-}
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
deleted file mode 100644
index ece44886..00000000
--- a/vendor/golang.org/x/tools/internal/modindex/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modindex
-
-import (
-	"strings"
-)
-
-// some special types to avoid confusions
-
-// distinguish various types of directory names. It's easy to get confused.
-type Abspath string // absolute paths
-type Relpath string // paths with GOMODCACHE prefix removed
-
-func toRelpath(cachedir Abspath, s string) Relpath {
-	if strings.HasPrefix(s, string(cachedir)) {
-		if s == string(cachedir) {
-			return Relpath("")
-		}
-		return Relpath(s[len(cachedir)+1:])
-	}
-	return Relpath(s)
-}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 66e69b43..44719de1 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,6 +5,7 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
+var GetForTest = func(p interface{}) string { return "" }
 var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
 
 type PackageError struct {
@@ -15,6 +16,7 @@ type PackageError struct {
 
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
+var ForTest int    // must be set as a LoadMode to call GetForTest
 
 var SetModFlag = func(config interface{}, value string) {}
 var SetModFile = func(config interface{}, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index f6cb37c5..2acd8585 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -21,7 +21,10 @@ import (
 // export data.
 type PkgDecoder struct {
 	// version is the file format version.
-	version Version
+	version uint32
+
+	// aliases determines whether types.Aliases should be created
+	aliases bool
 
 	// sync indicates whether the file uses sync markers.
 	sync bool
@@ -68,9 +71,12 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
 // NewPkgDecoder returns a PkgDecoder initialized to read the Unified
 // IR export data from input. pkgPath is the package path for the
 // compilation unit that produced the export data.
+//
+// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
 func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 	pr := PkgDecoder{
 		pkgPath: pkgPath,
+		//aliases: aliases.Enabled(),
 	}
 
 	// TODO(mdempsky): Implement direct indexing of input string to
@@ -78,15 +84,14 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 
 	r := strings.NewReader(input)
 
-	var ver uint32
-	assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
-	pr.version = Version(ver)
+	assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
 
-	if pr.version >= numVersions {
-		panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
-	}
-
-	if pr.version.Has(Flags) {
+	switch pr.version {
+	default:
+		panic(fmt.Errorf("unsupported version: %v", pr.version))
+	case 0:
+		// no flags
+	case 1:
 		var flags uint32
 		assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
 		pr.sync = flags&flagSyncMarkers != 0
@@ -101,9 +106,7 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 	assert(err == nil)
 
 	pr.elemData = input[pos:]
-
-	const fingerprintSize = 8
-	assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
+	assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
 
 	return pr
 }
@@ -137,7 +140,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
 		absIdx += int(pr.elemEndsEnds[k-1])
 	}
 	if absIdx >= int(pr.elemEndsEnds[k]) {
-		panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+		errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
 	}
 	return absIdx
 }
@@ -194,7 +197,9 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
 		Idx:    idx,
 	}
 
-	r.Data.Reset(pr.DataIdx(k, idx))
+	// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+	r.Data = *strings.NewReader(pr.DataIdx(k, idx))
+
 	r.Sync(SyncRelocs)
 	r.Relocs = make([]RelocEnt, r.Len())
 	for i := range r.Relocs {
@@ -243,7 +248,7 @@ type Decoder struct {
 
 func (r *Decoder) checkErr(err error) {
 	if err != nil {
-		panicf("unexpected decoding error: %w", err)
+		errorf("unexpected decoding error: %w", err)
 	}
 }
 
@@ -514,6 +519,3 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
 
 	return path, name, tag
 }
-
-// Version reports the version of the bitstream.
-func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
index c17a1239..6482617a 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -12,15 +12,18 @@ import (
 	"io"
 	"math/big"
 	"runtime"
-	"strings"
 )
 
+// currentVersion is the current version number.
+//
+//   - v0: initial prototype
+//
+//   - v1: adds the flags uint32 word
+const currentVersion uint32 = 1
+
 // A PkgEncoder provides methods for encoding a package's Unified IR
 // export data.
 type PkgEncoder struct {
-	// version of the bitstream.
-	version Version
-
 	// elems holds the bitstream for previously encoded elements.
 	elems [numRelocs][]string
 
@@ -44,9 +47,8 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
 // export data files, but can help diagnosing desync errors in
 // higher-level Unified IR reader/writer code. If syncFrames is
 // negative, then sync markers are omitted entirely.
-func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
+func NewPkgEncoder(syncFrames int) PkgEncoder {
 	return PkgEncoder{
-		version:    version,
 		stringsIdx: make(map[string]Index),
 		syncFrames: syncFrames,
 	}
@@ -62,15 +64,13 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
 		assert(binary.Write(out, binary.LittleEndian, x) == nil)
 	}
 
-	writeUint32(uint32(pw.version))
+	writeUint32(currentVersion)
 
-	if pw.version.Has(Flags) {
-		var flags uint32
-		if pw.SyncMarkers() {
-			flags |= flagSyncMarkers
-		}
-		writeUint32(flags)
+	var flags uint32
+	if pw.SyncMarkers() {
+		flags |= flagSyncMarkers
 	}
+	writeUint32(flags)
 
 	// Write elemEndsEnds.
 	var sum uint32
@@ -159,7 +159,7 @@ type Encoder struct {
 
 // Flush finalizes the element's bitstream and returns its Index.
 func (w *Encoder) Flush() Index {
-	var sb strings.Builder
+	var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
 
 	// Backup the data so we write the relocations at the front.
 	var tmp bytes.Buffer
@@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
 
 func (w *Encoder) checkErr(err error) {
 	if err != nil {
-		panicf("unexpected encoding error: %v", err)
+		errorf("unexpected encoding error: %v", err)
 	}
 }
 
@@ -320,14 +320,8 @@ func (w *Encoder) Code(c Code) {
 // section (if not already present), and then writing a relocation
 // into the element bitstream.
 func (w *Encoder) String(s string) {
-	w.StringRef(w.p.StringIdx(s))
-}
-
-// StringRef writes a reference to the given index, which must be a
-// previously encoded string value.
-func (w *Encoder) StringRef(idx Index) {
 	w.Sync(SyncString)
-	w.Reloc(RelocString, idx)
+	w.Reloc(RelocString, w.p.StringIdx(s))
 }
 
 // Strings encodes and writes a variable-length slice of strings into
@@ -354,7 +348,7 @@ func (w *Encoder) Value(val constant.Value) {
 func (w *Encoder) scalar(val constant.Value) {
 	switch v := constant.Val(val).(type) {
 	default:
-		panicf("unhandled %v (%v)", val, val.Kind())
+		errorf("unhandled %v (%v)", val, val.Kind())
 	case bool:
 		w.Code(ValBool)
 		w.Bool(v)
@@ -387,6 +381,3 @@ func (w *Encoder) bigFloat(v *big.Float) {
 	b := v.Append(nil, 'p', -1)
 	w.String(string(b)) // TODO: More efficient encoding.
 }
-
-// Version reports the version of the bitstream.
-func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
new file mode 100644
index 00000000..5294f6a6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package pkgbits
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+	for _, pc := range pcs {
+		fn := runtime.FuncForPC(pc)
+		file, line := fn.FileLine(pc)
+
+		visit(file, line, fn.Name(), pc-fn.Entry())
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
new file mode 100644
index 00000000..2324ae7a
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package pkgbits
+
+import "runtime"
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+	if len(pcs) == 0 {
+		return
+	}
+
+	frames := runtime.CallersFrames(pcs)
+	for {
+		frame, more := frames.Next()
+		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+		if !more {
+			return
+		}
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
index 50534a29..ad26d3b2 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -12,6 +12,6 @@ func assert(b bool) {
 	}
 }
 
-func panicf(format string, args ...any) {
+func errorf(format string, args ...interface{}) {
 	panic(fmt.Errorf(format, args...))
 }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
index 1520b73a..5bd51ef7 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -6,7 +6,6 @@ package pkgbits
 
 import (
 	"fmt"
-	"runtime"
 	"strings"
 )
 
@@ -24,24 +23,6 @@ func fmtFrames(pcs ...uintptr) []string {
 
 type frameVisitor func(file string, line int, name string, offset uintptr)
 
-// walkFrames calls visit for each call frame represented by pcs.
-//
-// pcs should be a slice of PCs, as returned by runtime.Callers.
-func walkFrames(pcs []uintptr, visit frameVisitor) {
-	if len(pcs) == 0 {
-		return
-	}
-
-	frames := runtime.CallersFrames(pcs)
-	for {
-		frame, more := frames.Next()
-		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
-		if !more {
-			return
-		}
-	}
-}
-
 // SyncMarker is an enum type that represents markers that may be
 // written to export data to ensure the reader and writer stay
 // synchronized.
@@ -129,8 +110,4 @@ const (
 	SyncStmtsEnd
 	SyncLabel
 	SyncOptLabel
-
-	SyncMultiExpr
-	SyncRType
-	SyncConvRTTI
 )
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
index 582ad56d..4a5b0ca5 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -74,14 +74,11 @@ func _() {
 	_ = x[SyncStmtsEnd-64]
 	_ = x[SyncLabel-65]
 	_ = x[SyncOptLabel-66]
-	_ = x[SyncMultiExpr-67]
-	_ = x[SyncRType-68]
-	_ = x[SyncConvRTTI-69]
 }
 
-const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
 
-var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
 
 func (i SyncMarker) String() string {
 	i -= 1
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
deleted file mode 100644
index 53af9df2..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/version.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pkgbits
-
-// Version indicates a version of a unified IR bitstream.
-// Each Version indicates the addition, removal, or change of
-// new data in the bitstream.
-//
-// These are serialized to disk and the interpretation remains fixed.
-type Version uint32
-
-const (
-	// V0: initial prototype.
-	//
-	// All data that is not assigned a Field is in version V0
-	// and has not been deprecated.
-	V0 Version = iota
-
-	// V1: adds the Flags uint32 word
-	V1
-
-	// V2: removes unused legacy fields and supports type parameters for aliases.
-	// - remove the legacy "has init" bool from the public root
-	// - remove obj's "derived func instance" bool
-	// - add a TypeParamNames field to ObjAlias
-	// - remove derived info "needed" bool
-	V2
-
-	numVersions = iota
-)
-
-// Field denotes a unit of data in the serialized unified IR bitstream.
-// It is conceptually a like field in a structure.
-//
-// We only really need Fields when the data may or may not be present
-// in a stream based on the Version of the bitstream.
-//
-// Unlike much of pkgbits, Fields are not serialized and
-// can change values as needed.
-type Field int
-
-const (
-	// Flags in a uint32 in the header of a bitstream
-	// that is used to indicate whether optional features are enabled.
-	Flags Field = iota
-
-	// Deprecated: HasInit was a bool indicating whether a package
-	// has any init functions.
-	HasInit
-
-	// Deprecated: DerivedFuncInstance was a bool indicating
-	// whether an object was a function instance.
-	DerivedFuncInstance
-
-	// ObjAlias has a list of TypeParamNames.
-	AliasTypeParamNames
-
-	// Deprecated: DerivedInfoNeeded was a bool indicating
-	// whether a type was a derived type.
-	DerivedInfoNeeded
-
-	numFields = iota
-)
-
-// introduced is the version a field was added.
-var introduced = [numFields]Version{
-	Flags:               V1,
-	AliasTypeParamNames: V2,
-}
-
-// removed is the version a field was removed in or 0 for fields
-// that have not yet been deprecated.
-// (So removed[f]-1 is the last version it is included in.)
-var removed = [numFields]Version{
-	HasInit:             V2,
-	DerivedFuncInstance: V2,
-	DerivedInfoNeeded:   V2,
-}
-
-// Has reports whether field f is present in a bitstream at version v.
-func (v Version) Has(f Field) bool {
-	return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
-}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index 9f0b871f..fd689207 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -23,7 +23,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ErrWriteAfterClose", Var, 0},
 		{"ErrWriteTooLong", Var, 0},
 		{"FileInfoHeader", Func, 1},
-		{"FileInfoNames", Type, 23},
 		{"Format", Type, 10},
 		{"FormatGNU", Const, 10},
 		{"FormatPAX", Const, 10},
@@ -268,8 +267,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ErrTooLarge", Var, 0},
 		{"Fields", Func, 0},
 		{"FieldsFunc", Func, 0},
-		{"FieldsFuncSeq", Func, 24},
-		{"FieldsSeq", Func, 24},
 		{"HasPrefix", Func, 0},
 		{"HasSuffix", Func, 0},
 		{"Index", Func, 0},
@@ -282,7 +279,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"LastIndexAny", Func, 0},
 		{"LastIndexByte", Func, 5},
 		{"LastIndexFunc", Func, 0},
-		{"Lines", Func, 24},
 		{"Map", Func, 0},
 		{"MinRead", Const, 0},
 		{"NewBuffer", Func, 0},
@@ -296,9 +292,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Split", Func, 0},
 		{"SplitAfter", Func, 0},
 		{"SplitAfterN", Func, 0},
-		{"SplitAfterSeq", Func, 24},
 		{"SplitN", Func, 0},
-		{"SplitSeq", Func, 24},
 		{"Title", Func, 0},
 		{"ToLower", Func, 0},
 		{"ToLowerSpecial", Func, 0},
@@ -540,7 +534,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"NewCTR", Func, 0},
 		{"NewGCM", Func, 2},
 		{"NewGCMWithNonceSize", Func, 5},
-		{"NewGCMWithRandomNonce", Func, 24},
 		{"NewGCMWithTagSize", Func, 11},
 		{"NewOFB", Func, 0},
 		{"Stream", Type, 0},
@@ -679,14 +672,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Unmarshal", Func, 0},
 		{"UnmarshalCompressed", Func, 15},
 	},
-	"crypto/fips140": {
-		{"Enabled", Func, 24},
-	},
-	"crypto/hkdf": {
-		{"Expand", Func, 24},
-		{"Extract", Func, 24},
-		{"Key", Func, 24},
-	},
 	"crypto/hmac": {
 		{"Equal", Func, 1},
 		{"New", Func, 0},
@@ -697,43 +682,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"Size", Const, 0},
 		{"Sum", Func, 2},
 	},
-	"crypto/mlkem": {
-		{"(*DecapsulationKey1024).Bytes", Method, 24},
-		{"(*DecapsulationKey1024).Decapsulate", Method, 24},
-		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
-		{"(*DecapsulationKey768).Bytes", Method, 24},
-		{"(*DecapsulationKey768).Decapsulate", Method, 24},
-		{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
-		{"(*EncapsulationKey1024).Bytes", Method, 24},
-		{"(*EncapsulationKey1024).Encapsulate", Method, 24},
-		{"(*EncapsulationKey768).Bytes", Method, 24},
-		{"(*EncapsulationKey768).Encapsulate", Method, 24},
-		{"CiphertextSize1024", Const, 24},
-		{"CiphertextSize768", Const, 24},
-		{"DecapsulationKey1024", Type, 24},
-		{"DecapsulationKey768", Type, 24},
-		{"EncapsulationKey1024", Type, 24},
-		{"EncapsulationKey768", Type, 24},
-		{"EncapsulationKeySize1024", Const, 24},
-		{"EncapsulationKeySize768", Const, 24},
-		{"GenerateKey1024", Func, 24},
-		{"GenerateKey768", Func, 24},
-		{"NewDecapsulationKey1024", Func, 24},
-		{"NewDecapsulationKey768", Func, 24},
-		{"NewEncapsulationKey1024", Func, 24},
-		{"NewEncapsulationKey768", Func, 24},
-		{"SeedSize", Const, 24},
-		{"SharedKeySize", Const, 24},
-	},
-	"crypto/pbkdf2": {
-		{"Key", Func, 24},
-	},
 	"crypto/rand": {
 		{"Int", Func, 0},
 		{"Prime", Func, 0},
 		{"Read", Func, 0},
 		{"Reader", Var, 0},
-		{"Text", Func, 24},
 	},
 	"crypto/rc4": {
 		{"(*Cipher).Reset", Method, 0},
@@ -812,39 +765,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Sum224", Func, 2},
 		{"Sum256", Func, 2},
 	},
-	"crypto/sha3": {
-		{"(*SHA3).AppendBinary", Method, 24},
-		{"(*SHA3).BlockSize", Method, 24},
-		{"(*SHA3).MarshalBinary", Method, 24},
-		{"(*SHA3).Reset", Method, 24},
-		{"(*SHA3).Size", Method, 24},
-		{"(*SHA3).Sum", Method, 24},
-		{"(*SHA3).UnmarshalBinary", Method, 24},
-		{"(*SHA3).Write", Method, 24},
-		{"(*SHAKE).AppendBinary", Method, 24},
-		{"(*SHAKE).BlockSize", Method, 24},
-		{"(*SHAKE).MarshalBinary", Method, 24},
-		{"(*SHAKE).Read", Method, 24},
-		{"(*SHAKE).Reset", Method, 24},
-		{"(*SHAKE).UnmarshalBinary", Method, 24},
-		{"(*SHAKE).Write", Method, 24},
-		{"New224", Func, 24},
-		{"New256", Func, 24},
-		{"New384", Func, 24},
-		{"New512", Func, 24},
-		{"NewCSHAKE128", Func, 24},
-		{"NewCSHAKE256", Func, 24},
-		{"NewSHAKE128", Func, 24},
-		{"NewSHAKE256", Func, 24},
-		{"SHA3", Type, 24},
-		{"SHAKE", Type, 24},
-		{"Sum224", Func, 24},
-		{"Sum256", Func, 24},
-		{"Sum384", Func, 24},
-		{"Sum512", Func, 24},
-		{"SumSHAKE128", Func, 24},
-		{"SumSHAKE256", Func, 24},
-	},
 	"crypto/sha512": {
 		{"BlockSize", Const, 0},
 		{"New", Func, 0},
@@ -867,7 +787,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ConstantTimeEq", Func, 0},
 		{"ConstantTimeLessOrEq", Func, 2},
 		{"ConstantTimeSelect", Func, 0},
-		{"WithDataIndependentTiming", Func, 24},
 		{"XORBytes", Func, 20},
 	},
 	"crypto/tls": {
@@ -901,7 +820,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
 		{"(*Dialer).Dial", Method, 15},
 		{"(*Dialer).DialContext", Method, 15},
-		{"(*ECHRejectionError).Error", Method, 23},
 		{"(*QUICConn).Close", Method, 21},
 		{"(*QUICConn).ConnectionState", Method, 21},
 		{"(*QUICConn).HandleData", Method, 21},
@@ -909,7 +827,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*QUICConn).SendSessionTicket", Method, 21},
 		{"(*QUICConn).SetTransportParameters", Method, 21},
 		{"(*QUICConn).Start", Method, 21},
-		{"(*QUICConn).StoreSession", Method, 23},
 		{"(*SessionState).Bytes", Method, 21},
 		{"(AlertError).Error", Method, 21},
 		{"(ClientAuthType).String", Method, 15},
@@ -944,7 +861,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ClientHelloInfo", Type, 4},
 		{"ClientHelloInfo.CipherSuites", Field, 4},
 		{"ClientHelloInfo.Conn", Field, 8},
-		{"ClientHelloInfo.Extensions", Field, 24},
 		{"ClientHelloInfo.ServerName", Field, 4},
 		{"ClientHelloInfo.SignatureSchemes", Field, 8},
 		{"ClientHelloInfo.SupportedCurves", Field, 4},
@@ -961,9 +877,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Config.ClientSessionCache", Field, 3},
 		{"Config.CurvePreferences", Field, 3},
 		{"Config.DynamicRecordSizingDisabled", Field, 7},
-		{"Config.EncryptedClientHelloConfigList", Field, 23},
-		{"Config.EncryptedClientHelloKeys", Field, 24},
-		{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
 		{"Config.GetCertificate", Field, 4},
 		{"Config.GetClientCertificate", Field, 8},
 		{"Config.GetConfigForClient", Field, 8},
@@ -989,7 +902,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ConnectionState", Type, 0},
 		{"ConnectionState.CipherSuite", Field, 0},
 		{"ConnectionState.DidResume", Field, 1},
-		{"ConnectionState.ECHAccepted", Field, 23},
 		{"ConnectionState.HandshakeComplete", Field, 0},
 		{"ConnectionState.NegotiatedProtocol", Field, 0},
 		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
@@ -1013,13 +925,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ECDSAWithP384AndSHA384", Const, 8},
 		{"ECDSAWithP521AndSHA512", Const, 8},
 		{"ECDSAWithSHA1", Const, 10},
-		{"ECHRejectionError", Type, 23},
-		{"ECHRejectionError.RetryConfigList", Field, 23},
 		{"Ed25519", Const, 13},
-		{"EncryptedClientHelloKey", Type, 24},
-		{"EncryptedClientHelloKey.Config", Field, 24},
-		{"EncryptedClientHelloKey.PrivateKey", Field, 24},
-		{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
 		{"InsecureCipherSuites", Func, 14},
 		{"Listen", Func, 0},
 		{"LoadX509KeyPair", Func, 0},
@@ -1037,7 +943,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParseSessionState", Func, 21},
 		{"QUICClient", Func, 21},
 		{"QUICConfig", Type, 21},
-		{"QUICConfig.EnableSessionEvents", Field, 23},
 		{"QUICConfig.TLSConfig", Field, 21},
 		{"QUICConn", Type, 21},
 		{"QUICEncryptionLevel", Type, 21},
@@ -1049,20 +954,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"QUICEvent.Data", Field, 21},
 		{"QUICEvent.Kind", Field, 21},
 		{"QUICEvent.Level", Field, 21},
-		{"QUICEvent.SessionState", Field, 23},
 		{"QUICEvent.Suite", Field, 21},
 		{"QUICEventKind", Type, 21},
 		{"QUICHandshakeDone", Const, 21},
 		{"QUICNoEvent", Const, 21},
 		{"QUICRejectedEarlyData", Const, 21},
-		{"QUICResumeSession", Const, 23},
 		{"QUICServer", Func, 21},
 		{"QUICSessionTicketOptions", Type, 21},
 		{"QUICSessionTicketOptions.EarlyData", Field, 21},
-		{"QUICSessionTicketOptions.Extra", Field, 23},
 		{"QUICSetReadSecret", Const, 21},
 		{"QUICSetWriteSecret", Const, 21},
-		{"QUICStoreSession", Const, 23},
 		{"QUICTransportParameters", Const, 21},
 		{"QUICTransportParametersRequired", Const, 21},
 		{"QUICWriteData", Const, 21},
@@ -1118,7 +1019,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"VersionTLS12", Const, 2},
 		{"VersionTLS13", Const, 12},
 		{"X25519", Const, 8},
-		{"X25519MLKEM768", Const, 24},
 		{"X509KeyPair", Func, 0},
 	},
 	"crypto/x509": {
@@ -1136,19 +1036,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Certificate).Verify", Method, 0},
 		{"(*Certificate).VerifyHostname", Method, 0},
 		{"(*CertificateRequest).CheckSignature", Method, 5},
-		{"(*OID).UnmarshalBinary", Method, 23},
-		{"(*OID).UnmarshalText", Method, 23},
 		{"(*RevocationList).CheckSignatureFrom", Method, 19},
 		{"(CertificateInvalidError).Error", Method, 0},
 		{"(ConstraintViolationError).Error", Method, 0},
 		{"(HostnameError).Error", Method, 0},
 		{"(InsecureAlgorithmError).Error", Method, 6},
-		{"(OID).AppendBinary", Method, 24},
-		{"(OID).AppendText", Method, 24},
 		{"(OID).Equal", Method, 22},
 		{"(OID).EqualASN1OID", Method, 22},
-		{"(OID).MarshalBinary", Method, 23},
-		{"(OID).MarshalText", Method, 23},
 		{"(OID).String", Method, 22},
 		{"(PublicKeyAlgorithm).String", Method, 10},
 		{"(SignatureAlgorithm).String", Method, 6},
@@ -1173,10 +1067,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.Extensions", Field, 2},
 		{"Certificate.ExtraExtensions", Field, 2},
 		{"Certificate.IPAddresses", Field, 1},
-		{"Certificate.InhibitAnyPolicy", Field, 24},
-		{"Certificate.InhibitAnyPolicyZero", Field, 24},
-		{"Certificate.InhibitPolicyMapping", Field, 24},
-		{"Certificate.InhibitPolicyMappingZero", Field, 24},
 		{"Certificate.IsCA", Field, 0},
 		{"Certificate.Issuer", Field, 0},
 		{"Certificate.IssuingCertificateURL", Field, 2},
@@ -1193,7 +1083,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.PermittedURIDomains", Field, 10},
 		{"Certificate.Policies", Field, 22},
 		{"Certificate.PolicyIdentifiers", Field, 0},
-		{"Certificate.PolicyMappings", Field, 24},
 		{"Certificate.PublicKey", Field, 0},
 		{"Certificate.PublicKeyAlgorithm", Field, 0},
 		{"Certificate.Raw", Field, 0},
@@ -1201,8 +1090,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.RawSubject", Field, 0},
 		{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
 		{"Certificate.RawTBSCertificate", Field, 0},
-		{"Certificate.RequireExplicitPolicy", Field, 24},
-		{"Certificate.RequireExplicitPolicyZero", Field, 24},
 		{"Certificate.SerialNumber", Field, 0},
 		{"Certificate.Signature", Field, 0},
 		{"Certificate.SignatureAlgorithm", Field, 0},
@@ -1294,7 +1181,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"NameConstraintsWithoutSANs", Const, 10},
 		{"NameMismatch", Const, 8},
 		{"NewCertPool", Func, 0},
-		{"NoValidChains", Const, 24},
 		{"NotAuthorizedToSign", Const, 0},
 		{"OID", Type, 22},
 		{"OIDFromInts", Func, 22},
@@ -1310,15 +1196,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParseCertificates", Func, 0},
 		{"ParseDERCRL", Func, 0},
 		{"ParseECPrivateKey", Func, 1},
-		{"ParseOID", Func, 23},
 		{"ParsePKCS1PrivateKey", Func, 0},
 		{"ParsePKCS1PublicKey", Func, 10},
 		{"ParsePKCS8PrivateKey", Func, 0},
 		{"ParsePKIXPublicKey", Func, 0},
 		{"ParseRevocationList", Func, 19},
-		{"PolicyMapping", Type, 24},
-		{"PolicyMapping.IssuerDomainPolicy", Field, 24},
-		{"PolicyMapping.SubjectDomainPolicy", Field, 24},
 		{"PublicKeyAlgorithm", Type, 0},
 		{"PureEd25519", Const, 13},
 		{"RSA", Const, 0},
@@ -1365,7 +1247,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"UnknownPublicKeyAlgorithm", Const, 0},
 		{"UnknownSignatureAlgorithm", Const, 0},
 		{"VerifyOptions", Type, 0},
-		{"VerifyOptions.CertificatePolicies", Field, 24},
 		{"VerifyOptions.CurrentTime", Field, 0},
 		{"VerifyOptions.DNSName", Field, 0},
 		{"VerifyOptions.Intermediates", Field, 0},
@@ -2076,8 +1957,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*File).DynString", Method, 1},
 		{"(*File).DynValue", Method, 21},
 		{"(*File).DynamicSymbols", Method, 4},
-		{"(*File).DynamicVersionNeeds", Method, 24},
-		{"(*File).DynamicVersions", Method, 24},
 		{"(*File).ImportedLibraries", Method, 0},
 		{"(*File).ImportedSymbols", Method, 0},
 		{"(*File).Section", Method, 0},
@@ -2343,19 +2222,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"DynFlag", Type, 0},
 		{"DynFlag1", Type, 21},
 		{"DynTag", Type, 0},
-		{"DynamicVersion", Type, 24},
-		{"DynamicVersion.Deps", Field, 24},
-		{"DynamicVersion.Flags", Field, 24},
-		{"DynamicVersion.Index", Field, 24},
-		{"DynamicVersion.Name", Field, 24},
-		{"DynamicVersionDep", Type, 24},
-		{"DynamicVersionDep.Dep", Field, 24},
-		{"DynamicVersionDep.Flags", Field, 24},
-		{"DynamicVersionDep.Index", Field, 24},
-		{"DynamicVersionFlag", Type, 24},
-		{"DynamicVersionNeed", Type, 24},
-		{"DynamicVersionNeed.Name", Field, 24},
-		{"DynamicVersionNeed.Needs", Field, 24},
 		{"EI_ABIVERSION", Const, 0},
 		{"EI_CLASS", Const, 0},
 		{"EI_DATA", Const, 0},
@@ -2675,7 +2541,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"PT_NOTE", Const, 0},
 		{"PT_NULL", Const, 0},
 		{"PT_OPENBSD_BOOTDATA", Const, 16},
-		{"PT_OPENBSD_NOBTCFI", Const, 23},
 		{"PT_OPENBSD_RANDOMIZE", Const, 16},
 		{"PT_OPENBSD_WXNEEDED", Const, 16},
 		{"PT_PAX_FLAGS", Const, 16},
@@ -3755,16 +3620,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"STT_COMMON", Const, 0},
 		{"STT_FILE", Const, 0},
 		{"STT_FUNC", Const, 0},
-		{"STT_GNU_IFUNC", Const, 23},
 		{"STT_HIOS", Const, 0},
 		{"STT_HIPROC", Const, 0},
 		{"STT_LOOS", Const, 0},
 		{"STT_LOPROC", Const, 0},
 		{"STT_NOTYPE", Const, 0},
 		{"STT_OBJECT", Const, 0},
-		{"STT_RELC", Const, 23},
 		{"STT_SECTION", Const, 0},
-		{"STT_SRELC", Const, 23},
 		{"STT_TLS", Const, 0},
 		{"STV_DEFAULT", Const, 0},
 		{"STV_HIDDEN", Const, 0},
@@ -3842,19 +3704,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"Symbol.Size", Field, 0},
 		{"Symbol.Value", Field, 0},
 		{"Symbol.Version", Field, 13},
-		{"Symbol.VersionIndex", Field, 24},
-		{"Symbol.VersionScope", Field, 24},
-		{"SymbolVersionScope", Type, 24},
 		{"Type", Type, 0},
-		{"VER_FLG_BASE", Const, 24},
-		{"VER_FLG_INFO", Const, 24},
-		{"VER_FLG_WEAK", Const, 24},
 		{"Version", Type, 0},
-		{"VersionScopeGlobal", Const, 24},
-		{"VersionScopeHidden", Const, 24},
-		{"VersionScopeLocal", Const, 24},
-		{"VersionScopeNone", Const, 24},
-		{"VersionScopeSpecific", Const, 24},
 	},
 	"debug/gosym": {
 		{"(*DecodingError).Error", Method, 0},
@@ -4580,10 +4431,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"FS", Type, 16},
 	},
 	"encoding": {
-		{"BinaryAppender", Type, 24},
 		{"BinaryMarshaler", Type, 2},
 		{"BinaryUnmarshaler", Type, 2},
-		{"TextAppender", Type, 24},
 		{"TextMarshaler", Type, 2},
 		{"TextUnmarshaler", Type, 2},
 	},
@@ -4695,14 +4544,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"URLEncoding", Var, 0},
 	},
 	"encoding/binary": {
-		{"Append", Func, 23},
 		{"AppendByteOrder", Type, 19},
 		{"AppendUvarint", Func, 19},
 		{"AppendVarint", Func, 19},
 		{"BigEndian", Var, 0},
 		{"ByteOrder", Type, 0},
-		{"Decode", Func, 23},
-		{"Encode", Func, 23},
 		{"LittleEndian", Var, 0},
 		{"MaxVarintLen16", Const, 0},
 		{"MaxVarintLen32", Const, 0},
@@ -5462,7 +5308,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParenExpr.Rparen", Field, 0},
 		{"ParenExpr.X", Field, 0},
 		{"Pkg", Const, 0},
-		{"Preorder", Func, 23},
 		{"Print", Func, 0},
 		{"RECV", Const, 0},
 		{"RangeStmt", Type, 0},
@@ -6053,12 +5898,7 @@ var PackageSymbols = map[string][]Symbol{
 	},
 	"go/types": {
 		{"(*Alias).Obj", Method, 22},
-		{"(*Alias).Origin", Method, 23},
-		{"(*Alias).Rhs", Method, 23},
-		{"(*Alias).SetTypeParams", Method, 23},
 		{"(*Alias).String", Method, 22},
-		{"(*Alias).TypeArgs", Method, 23},
-		{"(*Alias).TypeParams", Method, 23},
 		{"(*Alias).Underlying", Method, 22},
 		{"(*ArgumentError).Error", Method, 18},
 		{"(*ArgumentError).Unwrap", Method, 18},
@@ -6103,7 +5943,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Func).Pkg", Method, 5},
 		{"(*Func).Pos", Method, 5},
 		{"(*Func).Scope", Method, 5},
-		{"(*Func).Signature", Method, 23},
 		{"(*Func).String", Method, 5},
 		{"(*Func).Type", Method, 5},
 		{"(*Info).ObjectOf", Method, 5},
@@ -6113,16 +5952,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Interface).Complete", Method, 5},
 		{"(*Interface).Embedded", Method, 5},
 		{"(*Interface).EmbeddedType", Method, 11},
-		{"(*Interface).EmbeddedTypes", Method, 24},
 		{"(*Interface).Empty", Method, 5},
 		{"(*Interface).ExplicitMethod", Method, 5},
-		{"(*Interface).ExplicitMethods", Method, 24},
 		{"(*Interface).IsComparable", Method, 18},
 		{"(*Interface).IsImplicit", Method, 18},
 		{"(*Interface).IsMethodSet", Method, 18},
 		{"(*Interface).MarkImplicit", Method, 18},
 		{"(*Interface).Method", Method, 5},
-		{"(*Interface).Methods", Method, 24},
 		{"(*Interface).NumEmbeddeds", Method, 5},
 		{"(*Interface).NumExplicitMethods", Method, 5},
 		{"(*Interface).NumMethods", Method, 5},
@@ -6143,11 +5979,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*MethodSet).At", Method, 5},
 		{"(*MethodSet).Len", Method, 5},
 		{"(*MethodSet).Lookup", Method, 5},
-		{"(*MethodSet).Methods", Method, 24},
 		{"(*MethodSet).String", Method, 5},
 		{"(*Named).AddMethod", Method, 5},
 		{"(*Named).Method", Method, 5},
-		{"(*Named).Methods", Method, 24},
 		{"(*Named).NumMethods", Method, 5},
 		{"(*Named).Obj", Method, 5},
 		{"(*Named).Origin", Method, 18},
@@ -6188,7 +6022,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Pointer).String", Method, 5},
 		{"(*Pointer).Underlying", Method, 5},
 		{"(*Scope).Child", Method, 5},
-		{"(*Scope).Children", Method, 24},
 		{"(*Scope).Contains", Method, 5},
 		{"(*Scope).End", Method, 5},
 		{"(*Scope).Innermost", Method, 5},
@@ -6224,7 +6057,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*StdSizes).Offsetsof", Method, 5},
 		{"(*StdSizes).Sizeof", Method, 5},
 		{"(*Struct).Field", Method, 5},
-		{"(*Struct).Fields", Method, 24},
 		{"(*Struct).NumFields", Method, 5},
 		{"(*Struct).String", Method, 5},
 		{"(*Struct).Tag", Method, 5},
@@ -6236,10 +6068,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Tuple).Len", Method, 5},
 		{"(*Tuple).String", Method, 5},
 		{"(*Tuple).Underlying", Method, 5},
-		{"(*Tuple).Variables", Method, 24},
 		{"(*TypeList).At", Method, 18},
 		{"(*TypeList).Len", Method, 18},
-		{"(*TypeList).Types", Method, 24},
 		{"(*TypeName).Exported", Method, 5},
 		{"(*TypeName).Id", Method, 5},
 		{"(*TypeName).IsAlias", Method, 9},
@@ -6257,11 +6087,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*TypeParam).Underlying", Method, 18},
 		{"(*TypeParamList).At", Method, 18},
 		{"(*TypeParamList).Len", Method, 18},
-		{"(*TypeParamList).TypeParams", Method, 24},
 		{"(*Union).Len", Method, 18},
 		{"(*Union).String", Method, 18},
 		{"(*Union).Term", Method, 18},
-		{"(*Union).Terms", Method, 24},
 		{"(*Union).Underlying", Method, 18},
 		{"(*Var).Anonymous", Method, 5},
 		{"(*Var).Embedded", Method, 11},
@@ -6532,12 +6360,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Hash).WriteByte", Method, 14},
 		{"(*Hash).WriteString", Method, 14},
 		{"Bytes", Func, 19},
-		{"Comparable", Func, 24},
 		{"Hash", Type, 14},
 		{"MakeSeed", Func, 14},
 		{"Seed", Type, 14},
 		{"String", Func, 19},
-		{"WriteComparable", Func, 24},
 	},
 	"html": {
 		{"EscapeString", Func, 0},
@@ -7166,12 +6992,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"TempFile", Func, 0},
 		{"WriteFile", Func, 0},
 	},
-	"iter": {
-		{"Pull", Func, 23},
-		{"Pull2", Func, 23},
-		{"Seq", Type, 23},
-		{"Seq2", Type, 23},
-	},
 	"log": {
 		{"(*Logger).Fatal", Method, 0},
 		{"(*Logger).Fatalf", Method, 0},
@@ -7224,7 +7044,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*JSONHandler).WithGroup", Method, 21},
 		{"(*Level).UnmarshalJSON", Method, 21},
 		{"(*Level).UnmarshalText", Method, 21},
-		{"(*LevelVar).AppendText", Method, 24},
 		{"(*LevelVar).Level", Method, 21},
 		{"(*LevelVar).MarshalText", Method, 21},
 		{"(*LevelVar).Set", Method, 21},
@@ -7253,7 +7072,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Attr).Equal", Method, 21},
 		{"(Attr).String", Method, 21},
 		{"(Kind).String", Method, 21},
-		{"(Level).AppendText", Method, 24},
 		{"(Level).Level", Method, 21},
 		{"(Level).MarshalJSON", Method, 21},
 		{"(Level).MarshalText", Method, 21},
@@ -7284,7 +7102,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Debug", Func, 21},
 		{"DebugContext", Func, 21},
 		{"Default", Func, 21},
-		{"DiscardHandler", Var, 24},
 		{"Duration", Func, 21},
 		{"DurationValue", Func, 21},
 		{"Error", Func, 21},
@@ -7405,16 +7222,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"Writer", Type, 0},
 	},
 	"maps": {
-		{"All", Func, 23},
 		{"Clone", Func, 21},
-		{"Collect", Func, 23},
 		{"Copy", Func, 21},
 		{"DeleteFunc", Func, 21},
 		{"Equal", Func, 21},
 		{"EqualFunc", Func, 21},
-		{"Insert", Func, 23},
-		{"Keys", Func, 23},
-		{"Values", Func, 23},
 	},
 	"math": {
 		{"Abs", Func, 0},
@@ -7520,7 +7332,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Float).Acc", Method, 5},
 		{"(*Float).Add", Method, 5},
 		{"(*Float).Append", Method, 5},
-		{"(*Float).AppendText", Method, 24},
 		{"(*Float).Cmp", Method, 5},
 		{"(*Float).Copy", Method, 5},
 		{"(*Float).Float32", Method, 5},
@@ -7567,7 +7378,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Int).And", Method, 0},
 		{"(*Int).AndNot", Method, 0},
 		{"(*Int).Append", Method, 6},
-		{"(*Int).AppendText", Method, 24},
 		{"(*Int).Binomial", Method, 0},
 		{"(*Int).Bit", Method, 0},
 		{"(*Int).BitLen", Method, 0},
@@ -7624,7 +7434,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Int).Xor", Method, 0},
 		{"(*Rat).Abs", Method, 0},
 		{"(*Rat).Add", Method, 0},
-		{"(*Rat).AppendText", Method, 24},
 		{"(*Rat).Cmp", Method, 0},
 		{"(*Rat).Denom", Method, 0},
 		{"(*Rat).Float32", Method, 4},
@@ -7807,13 +7616,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"Zipf", Type, 0},
 	},
 	"math/rand/v2": {
-		{"(*ChaCha8).AppendBinary", Method, 24},
 		{"(*ChaCha8).MarshalBinary", Method, 22},
-		{"(*ChaCha8).Read", Method, 23},
 		{"(*ChaCha8).Seed", Method, 22},
 		{"(*ChaCha8).Uint64", Method, 22},
 		{"(*ChaCha8).UnmarshalBinary", Method, 22},
-		{"(*PCG).AppendBinary", Method, 24},
 		{"(*PCG).MarshalBinary", Method, 22},
 		{"(*PCG).Seed", Method, 22},
 		{"(*PCG).Uint64", Method, 22},
@@ -7830,7 +7636,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Rand).NormFloat64", Method, 22},
 		{"(*Rand).Perm", Method, 22},
 		{"(*Rand).Shuffle", Method, 22},
-		{"(*Rand).Uint", Method, 23},
 		{"(*Rand).Uint32", Method, 22},
 		{"(*Rand).Uint32N", Method, 22},
 		{"(*Rand).Uint64", Method, 22},
@@ -7858,7 +7663,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Rand", Type, 22},
 		{"Shuffle", Func, 22},
 		{"Source", Type, 22},
-		{"Uint", Func, 23},
 		{"Uint32", Func, 22},
 		{"Uint32N", Func, 22},
 		{"Uint64", Func, 22},
@@ -7939,7 +7743,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*DNSError).Error", Method, 0},
 		{"(*DNSError).Temporary", Method, 0},
 		{"(*DNSError).Timeout", Method, 0},
-		{"(*DNSError).Unwrap", Method, 23},
 		{"(*Dialer).Dial", Method, 1},
 		{"(*Dialer).DialContext", Method, 7},
 		{"(*Dialer).MultipathTCP", Method, 21},
@@ -8006,7 +7809,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*TCPConn).RemoteAddr", Method, 0},
 		{"(*TCPConn).SetDeadline", Method, 0},
 		{"(*TCPConn).SetKeepAlive", Method, 0},
-		{"(*TCPConn).SetKeepAliveConfig", Method, 23},
 		{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
 		{"(*TCPConn).SetLinger", Method, 0},
 		{"(*TCPConn).SetNoDelay", Method, 0},
@@ -8081,7 +7883,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*UnixListener).SyscallConn", Method, 10},
 		{"(Flags).String", Method, 0},
 		{"(HardwareAddr).String", Method, 0},
-		{"(IP).AppendText", Method, 24},
 		{"(IP).DefaultMask", Method, 0},
 		{"(IP).Equal", Method, 0},
 		{"(IP).IsGlobalUnicast", Method, 0},
@@ -8121,7 +7922,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"DNSError.IsTimeout", Field, 0},
 		{"DNSError.Name", Field, 0},
 		{"DNSError.Server", Field, 0},
-		{"DNSError.UnwrapErr", Field, 23},
 		{"DefaultResolver", Var, 8},
 		{"Dial", Func, 0},
 		{"DialIP", Func, 0},
@@ -8137,7 +7937,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Dialer.DualStack", Field, 2},
 		{"Dialer.FallbackDelay", Field, 5},
 		{"Dialer.KeepAlive", Field, 3},
-		{"Dialer.KeepAliveConfig", Field, 23},
 		{"Dialer.LocalAddr", Field, 1},
 		{"Dialer.Resolver", Field, 8},
 		{"Dialer.Timeout", Field, 1},
@@ -8190,16 +7989,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"Interfaces", Func, 0},
 		{"InvalidAddrError", Type, 0},
 		{"JoinHostPort", Func, 0},
-		{"KeepAliveConfig", Type, 23},
-		{"KeepAliveConfig.Count", Field, 23},
-		{"KeepAliveConfig.Enable", Field, 23},
-		{"KeepAliveConfig.Idle", Field, 23},
-		{"KeepAliveConfig.Interval", Field, 23},
 		{"Listen", Func, 0},
 		{"ListenConfig", Type, 11},
 		{"ListenConfig.Control", Field, 11},
 		{"ListenConfig.KeepAlive", Field, 13},
-		{"ListenConfig.KeepAliveConfig", Field, 23},
 		{"ListenIP", Func, 0},
 		{"ListenMulticastUDP", Func, 0},
 		{"ListenPacket", Func, 0},
@@ -8282,16 +8075,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*MaxBytesError).Error", Method, 19},
 		{"(*ProtocolError).Error", Method, 0},
 		{"(*ProtocolError).Is", Method, 21},
-		{"(*Protocols).SetHTTP1", Method, 24},
-		{"(*Protocols).SetHTTP2", Method, 24},
-		{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
 		{"(*Request).AddCookie", Method, 0},
 		{"(*Request).BasicAuth", Method, 4},
 		{"(*Request).Clone", Method, 13},
 		{"(*Request).Context", Method, 7},
 		{"(*Request).Cookie", Method, 0},
 		{"(*Request).Cookies", Method, 0},
-		{"(*Request).CookiesNamed", Method, 23},
 		{"(*Request).FormFile", Method, 0},
 		{"(*Request).FormValue", Method, 0},
 		{"(*Request).MultipartReader", Method, 0},
@@ -8344,10 +8133,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Header).Values", Method, 14},
 		{"(Header).Write", Method, 0},
 		{"(Header).WriteSubset", Method, 0},
-		{"(Protocols).HTTP1", Method, 24},
-		{"(Protocols).HTTP2", Method, 24},
-		{"(Protocols).String", Method, 24},
-		{"(Protocols).UnencryptedHTTP2", Method, 24},
 		{"AllowQuerySemicolons", Func, 17},
 		{"CanonicalHeaderKey", Func, 0},
 		{"Client", Type, 0},
@@ -8363,9 +8148,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Cookie.HttpOnly", Field, 0},
 		{"Cookie.MaxAge", Field, 0},
 		{"Cookie.Name", Field, 0},
-		{"Cookie.Partitioned", Field, 23},
 		{"Cookie.Path", Field, 0},
-		{"Cookie.Quoted", Field, 23},
 		{"Cookie.Raw", Field, 0},
 		{"Cookie.RawExpires", Field, 0},
 		{"Cookie.SameSite", Field, 11},
@@ -8410,18 +8193,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"FileSystem", Type, 0},
 		{"Flusher", Type, 0},
 		{"Get", Func, 0},
-		{"HTTP2Config", Type, 24},
-		{"HTTP2Config.CountError", Field, 24},
-		{"HTTP2Config.MaxConcurrentStreams", Field, 24},
-		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
-		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
-		{"HTTP2Config.MaxReadFrameSize", Field, 24},
-		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
-		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
-		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
-		{"HTTP2Config.PingTimeout", Field, 24},
-		{"HTTP2Config.SendPingTimeout", Field, 24},
-		{"HTTP2Config.WriteByteTimeout", Field, 24},
 		{"Handle", Func, 0},
 		{"HandleFunc", Func, 0},
 		{"Handler", Type, 0},
@@ -8454,15 +8225,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"NoBody", Var, 8},
 		{"NotFound", Func, 0},
 		{"NotFoundHandler", Func, 0},
-		{"ParseCookie", Func, 23},
 		{"ParseHTTPVersion", Func, 0},
-		{"ParseSetCookie", Func, 23},
 		{"ParseTime", Func, 1},
 		{"Post", Func, 0},
 		{"PostForm", Func, 0},
 		{"ProtocolError", Type, 0},
 		{"ProtocolError.ErrorString", Field, 0},
-		{"Protocols", Type, 24},
 		{"ProxyFromEnvironment", Func, 0},
 		{"ProxyURL", Func, 0},
 		{"PushOptions", Type, 8},
@@ -8484,7 +8252,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Request.Host", Field, 0},
 		{"Request.Method", Field, 0},
 		{"Request.MultipartForm", Field, 0},
-		{"Request.Pattern", Field, 23},
 		{"Request.PostForm", Field, 1},
 		{"Request.Proto", Field, 0},
 		{"Request.ProtoMajor", Field, 0},
@@ -8532,11 +8299,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Server.ConnState", Field, 3},
 		{"Server.DisableGeneralOptionsHandler", Field, 20},
 		{"Server.ErrorLog", Field, 3},
-		{"Server.HTTP2", Field, 24},
 		{"Server.Handler", Field, 0},
 		{"Server.IdleTimeout", Field, 8},
 		{"Server.MaxHeaderBytes", Field, 0},
-		{"Server.Protocols", Field, 24},
 		{"Server.ReadHeaderTimeout", Field, 8},
 		{"Server.ReadTimeout", Field, 0},
 		{"Server.TLSConfig", Field, 0},
@@ -8626,14 +8391,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"Transport.ExpectContinueTimeout", Field, 6},
 		{"Transport.ForceAttemptHTTP2", Field, 13},
 		{"Transport.GetProxyConnectHeader", Field, 16},
-		{"Transport.HTTP2", Field, 24},
 		{"Transport.IdleConnTimeout", Field, 7},
 		{"Transport.MaxConnsPerHost", Field, 11},
 		{"Transport.MaxIdleConns", Field, 7},
 		{"Transport.MaxIdleConnsPerHost", Field, 0},
 		{"Transport.MaxResponseHeaderBytes", Field, 7},
 		{"Transport.OnProxyConnectResponse", Field, 20},
-		{"Transport.Protocols", Field, 24},
 		{"Transport.Proxy", Field, 0},
 		{"Transport.ProxyConnectHeader", Field, 8},
 		{"Transport.ReadBufferSize", Field, 13},
@@ -8690,7 +8453,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"DefaultRemoteAddr", Const, 0},
 		{"NewRecorder", Func, 0},
 		{"NewRequest", Func, 7},
-		{"NewRequestWithContext", Func, 23},
 		{"NewServer", Func, 0},
 		{"NewTLSServer", Func, 0},
 		{"NewUnstartedServer", Func, 0},
@@ -8821,8 +8583,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*AddrPort).UnmarshalText", Method, 18},
 		{"(*Prefix).UnmarshalBinary", Method, 18},
 		{"(*Prefix).UnmarshalText", Method, 18},
-		{"(Addr).AppendBinary", Method, 24},
-		{"(Addr).AppendText", Method, 24},
 		{"(Addr).AppendTo", Method, 18},
 		{"(Addr).As16", Method, 18},
 		{"(Addr).As4", Method, 18},
@@ -8853,8 +8613,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Addr).WithZone", Method, 18},
 		{"(Addr).Zone", Method, 18},
 		{"(AddrPort).Addr", Method, 18},
-		{"(AddrPort).AppendBinary", Method, 24},
-		{"(AddrPort).AppendText", Method, 24},
 		{"(AddrPort).AppendTo", Method, 18},
 		{"(AddrPort).Compare", Method, 22},
 		{"(AddrPort).IsValid", Method, 18},
@@ -8863,8 +8621,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(AddrPort).Port", Method, 18},
 		{"(AddrPort).String", Method, 18},
 		{"(Prefix).Addr", Method, 18},
-		{"(Prefix).AppendBinary", Method, 24},
-		{"(Prefix).AppendText", Method, 24},
 		{"(Prefix).AppendTo", Method, 18},
 		{"(Prefix).Bits", Method, 18},
 		{"(Prefix).Contains", Method, 18},
@@ -9049,7 +8805,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Error).Temporary", Method, 6},
 		{"(*Error).Timeout", Method, 6},
 		{"(*Error).Unwrap", Method, 13},
-		{"(*URL).AppendBinary", Method, 24},
 		{"(*URL).EscapedFragment", Method, 15},
 		{"(*URL).EscapedPath", Method, 5},
 		{"(*URL).Hostname", Method, 8},
@@ -9149,17 +8904,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*ProcessState).SysUsage", Method, 0},
 		{"(*ProcessState).SystemTime", Method, 0},
 		{"(*ProcessState).UserTime", Method, 0},
-		{"(*Root).Close", Method, 24},
-		{"(*Root).Create", Method, 24},
-		{"(*Root).FS", Method, 24},
-		{"(*Root).Lstat", Method, 24},
-		{"(*Root).Mkdir", Method, 24},
-		{"(*Root).Name", Method, 24},
-		{"(*Root).Open", Method, 24},
-		{"(*Root).OpenFile", Method, 24},
-		{"(*Root).OpenRoot", Method, 24},
-		{"(*Root).Remove", Method, 24},
-		{"(*Root).Stat", Method, 24},
 		{"(*SyscallError).Error", Method, 0},
 		{"(*SyscallError).Timeout", Method, 10},
 		{"(*SyscallError).Unwrap", Method, 13},
@@ -9173,7 +8917,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Chown", Func, 0},
 		{"Chtimes", Func, 0},
 		{"Clearenv", Func, 0},
-		{"CopyFS", Func, 23},
 		{"Create", Func, 0},
 		{"CreateTemp", Func, 16},
 		{"DevNull", Const, 0},
@@ -9253,8 +8996,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"O_WRONLY", Const, 0},
 		{"Open", Func, 0},
 		{"OpenFile", Func, 0},
-		{"OpenInRoot", Func, 24},
-		{"OpenRoot", Func, 24},
 		{"PathError", Type, 0},
 		{"PathError.Err", Field, 0},
 		{"PathError.Op", Field, 0},
@@ -9276,7 +9017,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Remove", Func, 0},
 		{"RemoveAll", Func, 0},
 		{"Rename", Func, 0},
-		{"Root", Type, 24},
 		{"SEEK_CUR", Const, 0},
 		{"SEEK_END", Const, 0},
 		{"SEEK_SET", Const, 0},
@@ -9410,7 +9150,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"IsLocal", Func, 20},
 		{"Join", Func, 0},
 		{"ListSeparator", Const, 0},
-		{"Localize", Func, 23},
 		{"Match", Func, 0},
 		{"Rel", Func, 0},
 		{"Separator", Const, 0},
@@ -9493,8 +9232,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Value).Pointer", Method, 0},
 		{"(Value).Recv", Method, 0},
 		{"(Value).Send", Method, 0},
-		{"(Value).Seq", Method, 23},
-		{"(Value).Seq2", Method, 23},
 		{"(Value).Set", Method, 0},
 		{"(Value).SetBool", Method, 0},
 		{"(Value).SetBytes", Method, 0},
@@ -9577,7 +9314,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"SelectSend", Const, 1},
 		{"SendDir", Const, 0},
 		{"Slice", Const, 0},
-		{"SliceAt", Func, 23},
 		{"SliceHeader", Type, 0},
 		{"SliceHeader.Cap", Field, 0},
 		{"SliceHeader.Data", Field, 0},
@@ -9618,7 +9354,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Zero", Func, 0},
 	},
 	"regexp": {
-		{"(*Regexp).AppendText", Method, 24},
 		{"(*Regexp).Copy", Method, 6},
 		{"(*Regexp).Expand", Method, 0},
 		{"(*Regexp).ExpandString", Method, 0},
@@ -9799,8 +9534,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*StackRecord).Stack", Method, 0},
 		{"(*TypeAssertionError).Error", Method, 0},
 		{"(*TypeAssertionError).RuntimeError", Method, 0},
-		{"(Cleanup).Stop", Method, 24},
-		{"AddCleanup", Func, 24},
 		{"BlockProfile", Func, 1},
 		{"BlockProfileRecord", Type, 1},
 		{"BlockProfileRecord.Count", Field, 1},
@@ -9811,7 +9544,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Caller", Func, 0},
 		{"Callers", Func, 0},
 		{"CallersFrames", Func, 7},
-		{"Cleanup", Type, 24},
 		{"Compiler", Const, 0},
 		{"Error", Type, 0},
 		{"Frame", Type, 7},
@@ -9923,7 +9655,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"BuildSetting", Type, 18},
 		{"BuildSetting.Key", Field, 18},
 		{"BuildSetting.Value", Field, 18},
-		{"CrashOptions", Type, 23},
 		{"FreeOSMemory", Func, 1},
 		{"GCStats", Type, 1},
 		{"GCStats.LastGC", Field, 1},
@@ -9941,7 +9672,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"PrintStack", Func, 0},
 		{"ReadBuildInfo", Func, 12},
 		{"ReadGCStats", Func, 1},
-		{"SetCrashOutput", Func, 23},
 		{"SetGCPercent", Func, 1},
 		{"SetMaxStack", Func, 2},
 		{"SetMaxThreads", Func, 2},
@@ -10012,15 +9742,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"WithRegion", Func, 11},
 	},
 	"slices": {
-		{"All", Func, 23},
-		{"AppendSeq", Func, 23},
-		{"Backward", Func, 23},
 		{"BinarySearch", Func, 21},
 		{"BinarySearchFunc", Func, 21},
-		{"Chunk", Func, 23},
 		{"Clip", Func, 21},
 		{"Clone", Func, 21},
-		{"Collect", Func, 23},
 		{"Compact", Func, 21},
 		{"CompactFunc", Func, 21},
 		{"Compare", Func, 21},
@@ -10042,16 +9767,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"MaxFunc", Func, 21},
 		{"Min", Func, 21},
 		{"MinFunc", Func, 21},
-		{"Repeat", Func, 23},
 		{"Replace", Func, 21},
 		{"Reverse", Func, 21},
 		{"Sort", Func, 21},
 		{"SortFunc", Func, 21},
 		{"SortStableFunc", Func, 21},
-		{"Sorted", Func, 23},
-		{"SortedFunc", Func, 23},
-		{"SortedStableFunc", Func, 23},
-		{"Values", Func, 23},
 	},
 	"sort": {
 		{"(Float64Slice).Len", Method, 0},
@@ -10174,8 +9894,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"EqualFold", Func, 0},
 		{"Fields", Func, 0},
 		{"FieldsFunc", Func, 0},
-		{"FieldsFuncSeq", Func, 24},
-		{"FieldsSeq", Func, 24},
 		{"HasPrefix", Func, 0},
 		{"HasSuffix", Func, 0},
 		{"Index", Func, 0},
@@ -10188,7 +9906,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"LastIndexAny", Func, 0},
 		{"LastIndexByte", Func, 5},
 		{"LastIndexFunc", Func, 0},
-		{"Lines", Func, 24},
 		{"Map", Func, 0},
 		{"NewReader", Func, 0},
 		{"NewReplacer", Func, 0},
@@ -10200,9 +9917,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Split", Func, 0},
 		{"SplitAfter", Func, 0},
 		{"SplitAfterN", Func, 0},
-		{"SplitAfterSeq", Func, 24},
 		{"SplitN", Func, 0},
-		{"SplitSeq", Func, 24},
 		{"Title", Func, 0},
 		{"ToLower", Func, 0},
 		{"ToLowerSpecial", Func, 0},
@@ -10221,14 +9936,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"TrimSpace", Func, 0},
 		{"TrimSuffix", Func, 1},
 	},
-	"structs": {
-		{"HostLayout", Type, 23},
-	},
 	"sync": {
 		{"(*Cond).Broadcast", Method, 0},
 		{"(*Cond).Signal", Method, 0},
 		{"(*Cond).Wait", Method, 0},
-		{"(*Map).Clear", Method, 23},
 		{"(*Map).CompareAndDelete", Method, 20},
 		{"(*Map).CompareAndSwap", Method, 20},
 		{"(*Map).Delete", Method, 9},
@@ -10275,17 +9986,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Bool).Store", Method, 19},
 		{"(*Bool).Swap", Method, 19},
 		{"(*Int32).Add", Method, 19},
-		{"(*Int32).And", Method, 23},
 		{"(*Int32).CompareAndSwap", Method, 19},
 		{"(*Int32).Load", Method, 19},
-		{"(*Int32).Or", Method, 23},
 		{"(*Int32).Store", Method, 19},
 		{"(*Int32).Swap", Method, 19},
 		{"(*Int64).Add", Method, 19},
-		{"(*Int64).And", Method, 23},
 		{"(*Int64).CompareAndSwap", Method, 19},
 		{"(*Int64).Load", Method, 19},
-		{"(*Int64).Or", Method, 23},
 		{"(*Int64).Store", Method, 19},
 		{"(*Int64).Swap", Method, 19},
 		{"(*Pointer).CompareAndSwap", Method, 19},
@@ -10293,24 +10000,18 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Pointer).Store", Method, 19},
 		{"(*Pointer).Swap", Method, 19},
 		{"(*Uint32).Add", Method, 19},
-		{"(*Uint32).And", Method, 23},
 		{"(*Uint32).CompareAndSwap", Method, 19},
 		{"(*Uint32).Load", Method, 19},
-		{"(*Uint32).Or", Method, 23},
 		{"(*Uint32).Store", Method, 19},
 		{"(*Uint32).Swap", Method, 19},
 		{"(*Uint64).Add", Method, 19},
-		{"(*Uint64).And", Method, 23},
 		{"(*Uint64).CompareAndSwap", Method, 19},
 		{"(*Uint64).Load", Method, 19},
-		{"(*Uint64).Or", Method, 23},
 		{"(*Uint64).Store", Method, 19},
 		{"(*Uint64).Swap", Method, 19},
 		{"(*Uintptr).Add", Method, 19},
-		{"(*Uintptr).And", Method, 23},
 		{"(*Uintptr).CompareAndSwap", Method, 19},
 		{"(*Uintptr).Load", Method, 19},
-		{"(*Uintptr).Or", Method, 23},
 		{"(*Uintptr).Store", Method, 19},
 		{"(*Uintptr).Swap", Method, 19},
 		{"(*Value).CompareAndSwap", Method, 17},
@@ -10322,11 +10023,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"AddUint32", Func, 0},
 		{"AddUint64", Func, 0},
 		{"AddUintptr", Func, 0},
-		{"AndInt32", Func, 23},
-		{"AndInt64", Func, 23},
-		{"AndUint32", Func, 23},
-		{"AndUint64", Func, 23},
-		{"AndUintptr", Func, 23},
 		{"Bool", Type, 19},
 		{"CompareAndSwapInt32", Func, 0},
 		{"CompareAndSwapInt64", Func, 0},
@@ -10342,11 +10038,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"LoadUint32", Func, 0},
 		{"LoadUint64", Func, 0},
 		{"LoadUintptr", Func, 0},
-		{"OrInt32", Func, 23},
-		{"OrInt64", Func, 23},
-		{"OrUint32", Func, 23},
-		{"OrUint64", Func, 23},
-		{"OrUintptr", Func, 23},
 		{"Pointer", Type, 19},
 		{"StoreInt32", Func, 0},
 		{"StoreInt64", Func, 0},
@@ -16509,7 +16200,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"WSAEACCES", Const, 2},
 		{"WSAECONNABORTED", Const, 9},
 		{"WSAECONNRESET", Const, 3},
-		{"WSAENOPROTOOPT", Const, 23},
 		{"WSAEnumProtocols", Func, 2},
 		{"WSAID_CONNECTEX", Var, 1},
 		{"WSAIoctl", Func, 0},
@@ -16618,9 +16308,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ValueOf", Func, 0},
 	},
 	"testing": {
-		{"(*B).Chdir", Method, 24},
 		{"(*B).Cleanup", Method, 14},
-		{"(*B).Context", Method, 24},
 		{"(*B).Elapsed", Method, 20},
 		{"(*B).Error", Method, 0},
 		{"(*B).Errorf", Method, 0},
@@ -16632,7 +16320,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*B).Helper", Method, 9},
 		{"(*B).Log", Method, 0},
 		{"(*B).Logf", Method, 0},
-		{"(*B).Loop", Method, 24},
 		{"(*B).Name", Method, 8},
 		{"(*B).ReportAllocs", Method, 1},
 		{"(*B).ReportMetric", Method, 13},
@@ -16650,9 +16337,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*B).StopTimer", Method, 0},
 		{"(*B).TempDir", Method, 15},
 		{"(*F).Add", Method, 18},
-		{"(*F).Chdir", Method, 24},
 		{"(*F).Cleanup", Method, 18},
-		{"(*F).Context", Method, 24},
 		{"(*F).Error", Method, 18},
 		{"(*F).Errorf", Method, 18},
 		{"(*F).Fail", Method, 18},
@@ -16673,9 +16358,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*F).TempDir", Method, 18},
 		{"(*M).Run", Method, 4},
 		{"(*PB).Next", Method, 3},
-		{"(*T).Chdir", Method, 24},
 		{"(*T).Cleanup", Method, 14},
-		{"(*T).Context", Method, 24},
 		{"(*T).Deadline", Method, 15},
 		{"(*T).Error", Method, 0},
 		{"(*T).Errorf", Method, 0},
@@ -17166,9 +16849,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Time).Add", Method, 0},
 		{"(Time).AddDate", Method, 0},
 		{"(Time).After", Method, 0},
-		{"(Time).AppendBinary", Method, 24},
 		{"(Time).AppendFormat", Method, 5},
-		{"(Time).AppendText", Method, 24},
 		{"(Time).Before", Method, 0},
 		{"(Time).Clock", Method, 0},
 		{"(Time).Compare", Method, 20},
@@ -17603,7 +17284,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"Encode", Func, 0},
 		{"EncodeRune", Func, 0},
 		{"IsSurrogate", Func, 0},
-		{"RuneLen", Func, 23},
 	},
 	"unicode/utf8": {
 		{"AppendRune", Func, 18},
@@ -17626,11 +17306,6 @@ var PackageSymbols = map[string][]Symbol{
 		{"ValidRune", Func, 1},
 		{"ValidString", Func, 0},
 	},
-	"unique": {
-		{"(Handle).Value", Method, 23},
-		{"Handle", Type, 23},
-		{"Make", Func, 23},
-	},
 	"unsafe": {
 		{"Add", Func, 0},
 		{"Alignof", Func, 0},
@@ -17642,9 +17317,4 @@ var PackageSymbols = map[string][]Symbol{
 		{"String", Func, 0},
 		{"StringData", Func, 0},
 	},
-	"weak": {
-		{"(Pointer).Value", Method, 24},
-		{"Make", Func, 24},
-		{"Pointer", Type, 24},
-	},
 }
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
new file mode 100644
index 00000000..ff9437a3
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
@@ -0,0 +1,137 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package tokeninternal provides access to some internal features of the token
+// package.
+package tokeninternal
+
+import (
+	"fmt"
+	"go/token"
+	"sort"
+	"sync"
+	"unsafe"
+)
+
+// GetLines returns the table of line-start offsets from a token.File.
+func GetLines(file *token.File) []int {
+	// token.File has a Lines method on Go 1.21 and later.
+	if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
+		return file.Lines()
+	}
+
+	// This declaration must match that of token.File.
+	// This creates a risk of dependency skew.
+	// For now we check that the size of the two
+	// declarations is the same, on the (fragile) assumption
+	// that future changes would add fields.
+	type tokenFile119 struct {
+		_     string
+		_     int
+		_     int
+		mu    sync.Mutex // we're not complete monsters
+		lines []int
+		_     []struct{}
+	}
+
+	if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
+		panic("unexpected token.File size")
+	}
+	var ptr *tokenFile119
+	type uP = unsafe.Pointer
+	*(*uP)(uP(&ptr)) = uP(file)
+	ptr.mu.Lock()
+	defer ptr.mu.Unlock()
+	return ptr.lines
+}
+
+// AddExistingFiles adds the specified files to the FileSet if they
+// are not already present. It panics if any pair of files in the
+// resulting FileSet would overlap.
+func AddExistingFiles(fset *token.FileSet, files []*token.File) {
+	// Punch through the FileSet encapsulation.
+	type tokenFileSet struct {
+		// This type remained essentially consistent from go1.16 to go1.21.
+		mutex sync.RWMutex
+		base  int
+		files []*token.File
+		_     *token.File // changed to atomic.Pointer[token.File] in go1.19
+	}
+
+	// If the size of token.FileSet changes, this will fail to compile.
+	const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
+	var _ [-delta * delta]int
+
+	type uP = unsafe.Pointer
+	var ptr *tokenFileSet
+	*(*uP)(uP(&ptr)) = uP(fset)
+	ptr.mutex.Lock()
+	defer ptr.mutex.Unlock()
+
+	// Merge and sort.
+	newFiles := append(ptr.files, files...)
+	sort.Slice(newFiles, func(i, j int) bool {
+		return newFiles[i].Base() < newFiles[j].Base()
+	})
+
+	// Reject overlapping files.
+	// Discard adjacent identical files.
+	out := newFiles[:0]
+	for i, file := range newFiles {
+		if i > 0 {
+			prev := newFiles[i-1]
+			if file == prev {
+				continue
+			}
+			if prev.Base()+prev.Size()+1 > file.Base() {
+				panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
+					prev.Name(), prev.Base(), prev.Base()+prev.Size(),
+					file.Name(), file.Base(), file.Base()+file.Size()))
+			}
+		}
+		out = append(out, file)
+	}
+	newFiles = out
+
+	ptr.files = newFiles
+
+	// Advance FileSet.Base().
+	if len(newFiles) > 0 {
+		last := newFiles[len(newFiles)-1]
+		newBase := last.Base() + last.Size() + 1
+		if ptr.base < newBase {
+			ptr.base = newBase
+		}
+	}
+}
+
+// FileSetFor returns a new FileSet containing a sequence of new Files with
+// the same base, size, and line as the input files, for use in APIs that
+// require a FileSet.
+//
+// Precondition: the input files must be non-overlapping, and sorted in order
+// of their Base.
+func FileSetFor(files ...*token.File) *token.FileSet {
+	fset := token.NewFileSet()
+	for _, f := range files {
+		f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
+		lines := GetLines(f)
+		f2.SetLines(lines)
+	}
+	return fset
+}
+
+// CloneFileSet creates a new FileSet holding all files in fset. It does not
+// create copies of the token.Files in fset: they are added to the resulting
+// FileSet unmodified.
+func CloneFileSet(fset *token.FileSet) *token.FileSet {
+	var files []*token.File
+	fset.Iterate(func(f *token.File) bool {
+		files = append(files, f)
+		return true
+	})
+	newFileSet := token.NewFileSet()
+	AddExistingFiles(newFileSet, files)
+	return newFileSet
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
deleted file mode 100644
index cdae2b8e..00000000
--- a/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typeparams contains common utilities for writing tools that
-// interact with generic Go code, as introduced with Go 1.18. It
-// supplements the standard library APIs. Notably, the StructuralTerms
-// API computes a minimal representation of the structural
-// restrictions on a type parameter.
-//
-// An external version of these APIs is available in the
-// golang.org/x/exp/typeparams module.
-package typeparams
-
-import (
-	"go/ast"
-	"go/token"
-	"go/types"
-)
-
-// UnpackIndexExpr extracts data from AST nodes that represent index
-// expressions.
-//
-// For an ast.IndexExpr, the resulting indices slice will contain exactly one
-// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
-// number of index expressions.
-//
-// For nodes that don't represent index expressions, the first return value of
-// UnpackIndexExpr will be nil.
-func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
-	switch e := n.(type) {
-	case *ast.IndexExpr:
-		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
-	case *ast.IndexListExpr:
-		return e.X, e.Lbrack, e.Indices, e.Rbrack
-	}
-	return nil, token.NoPos, nil, token.NoPos
-}
-
-// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
-// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
-// will panic.
-func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
-	switch len(indices) {
-	case 0:
-		panic("empty indices")
-	case 1:
-		return &ast.IndexExpr{
-			X:      x,
-			Lbrack: lbrack,
-			Index:  indices[0],
-			Rbrack: rbrack,
-		}
-	default:
-		return &ast.IndexListExpr{
-			X:       x,
-			Lbrack:  lbrack,
-			Indices: indices,
-			Rbrack:  rbrack,
-		}
-	}
-}
-
-// IsTypeParam reports whether t is a type parameter (or an alias of one).
-func IsTypeParam(t types.Type) bool {
-	_, ok := types.Unalias(t).(*types.TypeParam)
-	return ok
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
deleted file mode 100644
index 6e83c6fb..00000000
--- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeparams
-
-import (
-	"fmt"
-	"go/types"
-)
-
-// CoreType returns the core type of T or nil if T does not have a core type.
-//
-// See https://go.dev/ref/spec#Core_types for the definition of a core type.
-func CoreType(T types.Type) types.Type {
-	U := T.Underlying()
-	if _, ok := U.(*types.Interface); !ok {
-		return U // for non-interface types,
-	}
-
-	terms, err := NormalTerms(U)
-	if len(terms) == 0 || err != nil {
-		// len(terms) -> empty type set of interface.
-		// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
-		return nil // no core type.
-	}
-
-	U = terms[0].Type().Underlying()
-	var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
-	for identical = 1; identical < len(terms); identical++ {
-		if !types.Identical(U, terms[identical].Type().Underlying()) {
-			break
-		}
-	}
-
-	if identical == len(terms) {
-		// https://go.dev/ref/spec#Core_types
-		// "There is a single type U which is the underlying type of all types in the type set of T"
-		return U
-	}
-	ch, ok := U.(*types.Chan)
-	if !ok {
-		return nil // no core type as identical < len(terms) and U is not a channel.
-	}
-	// https://go.dev/ref/spec#Core_types
-	// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
-	// <-chan E depending on the direction of the directional channels present."
-	for chans := identical; chans < len(terms); chans++ {
-		curr, ok := terms[chans].Type().Underlying().(*types.Chan)
-		if !ok {
-			return nil
-		}
-		if !types.Identical(ch.Elem(), curr.Elem()) {
-			return nil // channel elements are not identical.
-		}
-		if ch.Dir() == types.SendRecv {
-			// ch is bidirectional. We can safely always use curr's direction.
-			ch = curr
-		} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
-			// ch and curr are not bidirectional and not the same direction.
-			return nil
-		}
-	}
-	return ch
-}
-
-// NormalTerms returns a slice of terms representing the normalized structural
-// type restrictions of a type, if any.
-//
-// For all types other than *types.TypeParam, *types.Interface, and
-// *types.Union, this is just a single term with Tilde() == false and
-// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
-// below.
-//
-// Structural type restrictions of a type parameter are created via
-// non-interface types embedded in its constraint interface (directly, or via a
-// chain of interface embeddings). For example, in the declaration type
-// T[P interface{~int; m()}] int the structural restriction of the type
-// parameter P is ~int.
-//
-// With interface embedding and unions, the specification of structural type
-// restrictions may be arbitrarily complex. For example, consider the
-// following:
-//
-//	type A interface{ ~string|~[]byte }
-//
-//	type B interface{ int|string }
-//
-//	type C interface { ~string|~int }
-//
-//	type T[P interface{ A|B; C }] int
-//
-// In this example, the structural type restriction of P is ~string|int: A|B
-// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
-// which when intersected with C (~string|~int) yields ~string|int.
-//
-// NormalTerms computes these expansions and reductions, producing a
-// "normalized" form of the embeddings. A structural restriction is normalized
-// if it is a single union containing no interface terms, and is minimal in the
-// sense that removing any term changes the set of types satisfying the
-// constraint. It is left as a proof for the reader that, modulo sorting, there
-// is exactly one such normalized form.
-//
-// Because the minimal representation always takes this form, NormalTerms
-// returns a slice of tilde terms corresponding to the terms of the union in
-// the normalized structural restriction. An error is returned if the type is
-// invalid, exceeds complexity bounds, or has an empty type set. In the latter
-// case, NormalTerms returns ErrEmptyTypeSet.
-//
-// NormalTerms makes no guarantees about the order of terms, except that it
-// is deterministic.
-func NormalTerms(typ types.Type) ([]*types.Term, error) {
-	switch typ := typ.Underlying().(type) {
-	case *types.TypeParam:
-		return StructuralTerms(typ)
-	case *types.Union:
-		return UnionTermSet(typ)
-	case *types.Interface:
-		return InterfaceTermSet(typ)
-	default:
-		return []*types.Term{types.NewTerm(false, typ)}, nil
-	}
-}
-
-// Deref returns the type of the variable pointed to by t,
-// if t's core type is a pointer; otherwise it returns t.
-//
-// Do not assume that Deref(T)==T implies T is not a pointer:
-// consider "type T *T", for example.
-//
-// TODO(adonovan): ideally this would live in typesinternal, but that
-// creates an import cycle. Move there when we melt this package down.
-func Deref(t types.Type) types.Type {
-	if ptr, ok := CoreType(t).(*types.Pointer); ok {
-		return ptr.Elem()
-	}
-	return t
-}
-
-// MustDeref returns the type of the variable pointed to by t.
-// It panics if t's core type is not a pointer.
-//
-// TODO(adonovan): ideally this would live in typesinternal, but that
-// creates an import cycle. Move there when we melt this package down.
-func MustDeref(t types.Type) types.Type {
-	if ptr, ok := CoreType(t).(*types.Pointer); ok {
-		return ptr.Elem()
-	}
-	panic(fmt.Sprintf("%v is not a pointer", t))
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
deleted file mode 100644
index 0ade5c29..00000000
--- a/vendor/golang.org/x/tools/internal/typeparams/free.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeparams
-
-import (
-	"go/types"
-
-	"golang.org/x/tools/internal/aliases"
-)
-
-// Free is a memoization of the set of free type parameters within a
-// type. It makes a sequence of calls to [Free.Has] for overlapping
-// types more efficient. The zero value is ready for use.
-//
-// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
-type Free struct {
-	seen map[types.Type]bool
-}
-
-// Has reports whether the specified type has a free type parameter.
-func (w *Free) Has(typ types.Type) (res bool) {
-	// detect cycles
-	if x, ok := w.seen[typ]; ok {
-		return x
-	}
-	if w.seen == nil {
-		w.seen = make(map[types.Type]bool)
-	}
-	w.seen[typ] = false
-	defer func() {
-		w.seen[typ] = res
-	}()
-
-	switch t := typ.(type) {
-	case nil, *types.Basic: // TODO(gri) should nil be handled here?
-		break
-
-	case *types.Alias:
-		if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
-			return true // This is an uninstantiated Alias.
-		}
-		// The expansion of an alias can have free type parameters,
-		// whether or not the alias itself has type parameters:
-		//
-		//   func _[K comparable]() {
-		//     type Set      = map[K]bool // free(Set)      = {K}
-		//     type MapTo[V] = map[K]V    // free(Map[foo]) = {V}
-		//   }
-		//
-		// So, we must Unalias.
-		return w.Has(types.Unalias(t))
-
-	case *types.Array:
-		return w.Has(t.Elem())
-
-	case *types.Slice:
-		return w.Has(t.Elem())
-
-	case *types.Struct:
-		for i, n := 0, t.NumFields(); i < n; i++ {
-			if w.Has(t.Field(i).Type()) {
-				return true
-			}
-		}
-
-	case *types.Pointer:
-		return w.Has(t.Elem())
-
-	case *types.Tuple:
-		n := t.Len()
-		for i := 0; i < n; i++ {
-			if w.Has(t.At(i).Type()) {
-				return true
-			}
-		}
-
-	case *types.Signature:
-		// t.tparams may not be nil if we are looking at a signature
-		// of a generic function type (or an interface method) that is
-		// part of the type we're testing. We don't care about these type
-		// parameters.
-		// Similarly, the receiver of a method may declare (rather than
-		// use) type parameters, we don't care about those either.
-		// Thus, we only need to look at the input and result parameters.
-		return w.Has(t.Params()) || w.Has(t.Results())
-
-	case *types.Interface:
-		for i, n := 0, t.NumMethods(); i < n; i++ {
-			if w.Has(t.Method(i).Type()) {
-				return true
-			}
-		}
-		terms, err := InterfaceTermSet(t)
-		if err != nil {
-			return false // ill typed
-		}
-		for _, term := range terms {
-			if w.Has(term.Type()) {
-				return true
-			}
-		}
-
-	case *types.Map:
-		return w.Has(t.Key()) || w.Has(t.Elem())
-
-	case *types.Chan:
-		return w.Has(t.Elem())
-
-	case *types.Named:
-		args := t.TypeArgs()
-		if params := t.TypeParams(); params.Len() > args.Len() {
-			return true // this is an uninstantiated named type.
-		}
-		for i, n := 0, args.Len(); i < n; i++ {
-			if w.Has(args.At(i)) {
-				return true
-			}
-		}
-		return w.Has(t.Underlying()) // recurse for types local to parameterized functions
-
-	case *types.TypeParam:
-		return true
-
-	default:
-		panic(t) // unreachable
-	}
-
-	return false
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
deleted file mode 100644
index 93c80fdc..00000000
--- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeparams
-
-import (
-	"errors"
-	"fmt"
-	"go/types"
-	"os"
-	"strings"
-)
-
-//go:generate go run copytermlist.go
-
-const debug = false
-
-var ErrEmptyTypeSet = errors.New("empty type set")
-
-// StructuralTerms returns a slice of terms representing the normalized
-// structural type restrictions of a type parameter, if any.
-//
-// Structural type restrictions of a type parameter are created via
-// non-interface types embedded in its constraint interface (directly, or via a
-// chain of interface embeddings). For example, in the declaration
-//
-//	type T[P interface{~int; m()}] int
-//
-// the structural restriction of the type parameter P is ~int.
-//
-// With interface embedding and unions, the specification of structural type
-// restrictions may be arbitrarily complex. For example, consider the
-// following:
-//
-//	type A interface{ ~string|~[]byte }
-//
-//	type B interface{ int|string }
-//
-//	type C interface { ~string|~int }
-//
-//	type T[P interface{ A|B; C }] int
-//
-// In this example, the structural type restriction of P is ~string|int: A|B
-// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
-// which when intersected with C (~string|~int) yields ~string|int.
-//
-// StructuralTerms computes these expansions and reductions, producing a
-// "normalized" form of the embeddings. A structural restriction is normalized
-// if it is a single union containing no interface terms, and is minimal in the
-// sense that removing any term changes the set of types satisfying the
-// constraint. It is left as a proof for the reader that, modulo sorting, there
-// is exactly one such normalized form.
-//
-// Because the minimal representation always takes this form, StructuralTerms
-// returns a slice of tilde terms corresponding to the terms of the union in
-// the normalized structural restriction. An error is returned if the
-// constraint interface is invalid, exceeds complexity bounds, or has an empty
-// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
-//
-// StructuralTerms makes no guarantees about the order of terms, except that it
-// is deterministic.
-func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
-	constraint := tparam.Constraint()
-	if constraint == nil {
-		return nil, fmt.Errorf("%s has nil constraint", tparam)
-	}
-	iface, _ := constraint.Underlying().(*types.Interface)
-	if iface == nil {
-		return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
-	}
-	return InterfaceTermSet(iface)
-}
-
-// InterfaceTermSet computes the normalized terms for a constraint interface,
-// returning an error if the term set cannot be computed or is empty. In the
-// latter case, the error will be ErrEmptyTypeSet.
-//
-// See the documentation of StructuralTerms for more information on
-// normalization.
-func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
-	return computeTermSet(iface)
-}
-
-// UnionTermSet computes the normalized terms for a union, returning an error
-// if the term set cannot be computed or is empty. In the latter case, the
-// error will be ErrEmptyTypeSet.
-//
-// See the documentation of StructuralTerms for more information on
-// normalization.
-func UnionTermSet(union *types.Union) ([]*types.Term, error) {
-	return computeTermSet(union)
-}
-
-func computeTermSet(typ types.Type) ([]*types.Term, error) {
-	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
-	if err != nil {
-		return nil, err
-	}
-	if tset.terms.isEmpty() {
-		return nil, ErrEmptyTypeSet
-	}
-	if tset.terms.isAll() {
-		return nil, nil
-	}
-	var terms []*types.Term
-	for _, term := range tset.terms {
-		terms = append(terms, types.NewTerm(term.tilde, term.typ))
-	}
-	return terms, nil
-}
-
-// A termSet holds the normalized set of terms for a given type.
-//
-// The name termSet is intentionally distinct from 'type set': a type set is
-// all types that implement a type (and includes method restrictions), whereas
-// a term set just represents the structural restrictions on a type.
-type termSet struct {
-	complete bool
-	terms    termlist
-}
-
-func indentf(depth int, format string, args ...interface{}) {
-	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
-}
-
-func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
-	if t == nil {
-		panic("nil type")
-	}
-
-	if debug {
-		indentf(depth, "%s", t.String())
-		defer func() {
-			if err != nil {
-				indentf(depth, "=> %s", err)
-			} else {
-				indentf(depth, "=> %s", res.terms.String())
-			}
-		}()
-	}
-
-	const maxTermCount = 100
-	if tset, ok := seen[t]; ok {
-		if !tset.complete {
-			return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
-		}
-		return tset, nil
-	}
-
-	// Mark the current type as seen to avoid infinite recursion.
-	tset := new(termSet)
-	defer func() {
-		tset.complete = true
-	}()
-	seen[t] = tset
-
-	switch u := t.Underlying().(type) {
-	case *types.Interface:
-		// The term set of an interface is the intersection of the term sets of its
-		// embedded types.
-		tset.terms = allTermlist
-		for i := 0; i < u.NumEmbeddeds(); i++ {
-			embedded := u.EmbeddedType(i)
-			if _, ok := embedded.Underlying().(*types.TypeParam); ok {
-				return nil, fmt.Errorf("invalid embedded type %T", embedded)
-			}
-			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
-			if err != nil {
-				return nil, err
-			}
-			tset.terms = tset.terms.intersect(tset2.terms)
-		}
-	case *types.Union:
-		// The term set of a union is the union of term sets of its terms.
-		tset.terms = nil
-		for i := 0; i < u.Len(); i++ {
-			t := u.Term(i)
-			var terms termlist
-			switch t.Type().Underlying().(type) {
-			case *types.Interface:
-				tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
-				if err != nil {
-					return nil, err
-				}
-				terms = tset2.terms
-			case *types.TypeParam, *types.Union:
-				// A stand-alone type parameter or union is not permitted as union
-				// term.
-				return nil, fmt.Errorf("invalid union term %T", t)
-			default:
-				if t.Type() == types.Typ[types.Invalid] {
-					continue
-				}
-				terms = termlist{{t.Tilde(), t.Type()}}
-			}
-			tset.terms = tset.terms.union(terms)
-			if len(tset.terms) > maxTermCount {
-				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
-			}
-		}
-	case *types.TypeParam:
-		panic("unreachable")
-	default:
-		// For all other types, the term set is just a single non-tilde term
-		// holding the type itself.
-		if u != types.Typ[types.Invalid] {
-			tset.terms = termlist{{false, t}}
-		}
-	}
-	return tset, nil
-}
-
-// under is a facade for the go/types internal function of the same name. It is
-// used by typeterm.go.
-func under(t types.Type) types.Type {
-	return t.Underlying()
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
deleted file mode 100644
index cbd12f80..00000000
--- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by copytermlist.go DO NOT EDIT.
-
-package typeparams
-
-import (
-	"bytes"
-	"go/types"
-)
-
-// A termlist represents the type set represented by the union
-// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
-// A termlist is in normal form if all terms are disjoint.
-// termlist operations don't require the operands to be in
-// normal form.
-type termlist []*term
-
-// allTermlist represents the set of all types.
-// It is in normal form.
-var allTermlist = termlist{new(term)}
-
-// String prints the termlist exactly (without normalization).
-func (xl termlist) String() string {
-	if len(xl) == 0 {
-		return "∅"
-	}
-	var buf bytes.Buffer
-	for i, x := range xl {
-		if i > 0 {
-			buf.WriteString(" | ")
-		}
-		buf.WriteString(x.String())
-	}
-	return buf.String()
-}
-
-// isEmpty reports whether the termlist xl represents the empty set of types.
-func (xl termlist) isEmpty() bool {
-	// If there's a non-nil term, the entire list is not empty.
-	// If the termlist is in normal form, this requires at most
-	// one iteration.
-	for _, x := range xl {
-		if x != nil {
-			return false
-		}
-	}
-	return true
-}
-
-// isAll reports whether the termlist xl represents the set of all types.
-func (xl termlist) isAll() bool {
-	// If there's a 𝓤 term, the entire list is 𝓤.
-	// If the termlist is in normal form, this requires at most
-	// one iteration.
-	for _, x := range xl {
-		if x != nil && x.typ == nil {
-			return true
-		}
-	}
-	return false
-}
-
-// norm returns the normal form of xl.
-func (xl termlist) norm() termlist {
-	// Quadratic algorithm, but good enough for now.
-	// TODO(gri) fix asymptotic performance
-	used := make([]bool, len(xl))
-	var rl termlist
-	for i, xi := range xl {
-		if xi == nil || used[i] {
-			continue
-		}
-		for j := i + 1; j < len(xl); j++ {
-			xj := xl[j]
-			if xj == nil || used[j] {
-				continue
-			}
-			if u1, u2 := xi.union(xj); u2 == nil {
-				// If we encounter a 𝓤 term, the entire list is 𝓤.
-				// Exit early.
-				// (Note that this is not just an optimization;
-				// if we continue, we may end up with a 𝓤 term
-				// and other terms and the result would not be
-				// in normal form.)
-				if u1.typ == nil {
-					return allTermlist
-				}
-				xi = u1
-				used[j] = true // xj is now unioned into xi - ignore it in future iterations
-			}
-		}
-		rl = append(rl, xi)
-	}
-	return rl
-}
-
-// union returns the union xl ∪ yl.
-func (xl termlist) union(yl termlist) termlist {
-	return append(xl, yl...).norm()
-}
-
-// intersect returns the intersection xl ∩ yl.
-func (xl termlist) intersect(yl termlist) termlist {
-	if xl.isEmpty() || yl.isEmpty() {
-		return nil
-	}
-
-	// Quadratic algorithm, but good enough for now.
-	// TODO(gri) fix asymptotic performance
-	var rl termlist
-	for _, x := range xl {
-		for _, y := range yl {
-			if r := x.intersect(y); r != nil {
-				rl = append(rl, r)
-			}
-		}
-	}
-	return rl.norm()
-}
-
-// equal reports whether xl and yl represent the same type set.
-func (xl termlist) equal(yl termlist) bool {
-	// TODO(gri) this should be more efficient
-	return xl.subsetOf(yl) && yl.subsetOf(xl)
-}
-
-// includes reports whether t ∈ xl.
-func (xl termlist) includes(t types.Type) bool {
-	for _, x := range xl {
-		if x.includes(t) {
-			return true
-		}
-	}
-	return false
-}
-
-// supersetOf reports whether y ⊆ xl.
-func (xl termlist) supersetOf(y *term) bool {
-	for _, x := range xl {
-		if y.subsetOf(x) {
-			return true
-		}
-	}
-	return false
-}
-
-// subsetOf reports whether xl ⊆ yl.
-func (xl termlist) subsetOf(yl termlist) bool {
-	if yl.isEmpty() {
-		return xl.isEmpty()
-	}
-
-	// each term x of xl must be a subset of yl
-	for _, x := range xl {
-		if !yl.supersetOf(x) {
-			return false // x is not a subset yl
-		}
-	}
-	return true
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
deleted file mode 100644
index 7350bb70..00000000
--- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by copytermlist.go DO NOT EDIT.
-
-package typeparams
-
-import "go/types"
-
-// A term describes elementary type sets:
-//
-//	 ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
-//	 𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
-//	 T:  &term{false, T}  == {T}                    // set of type T
-//	~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
-type term struct {
-	tilde bool // valid if typ != nil
-	typ   types.Type
-}
-
-func (x *term) String() string {
-	switch {
-	case x == nil:
-		return "∅"
-	case x.typ == nil:
-		return "𝓤"
-	case x.tilde:
-		return "~" + x.typ.String()
-	default:
-		return x.typ.String()
-	}
-}
-
-// equal reports whether x and y represent the same type set.
-func (x *term) equal(y *term) bool {
-	// easy cases
-	switch {
-	case x == nil || y == nil:
-		return x == y
-	case x.typ == nil || y.typ == nil:
-		return x.typ == y.typ
-	}
-	// ∅ ⊂ x, y ⊂ 𝓤
-
-	return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
-}
-
-// union returns the union x ∪ y: zero, one, or two non-nil terms.
-func (x *term) union(y *term) (_, _ *term) {
-	// easy cases
-	switch {
-	case x == nil && y == nil:
-		return nil, nil // ∅ ∪ ∅ == ∅
-	case x == nil:
-		return y, nil // ∅ ∪ y == y
-	case y == nil:
-		return x, nil // x ∪ ∅ == x
-	case x.typ == nil:
-		return x, nil // 𝓤 ∪ y == 𝓤
-	case y.typ == nil:
-		return y, nil // x ∪ 𝓤 == 𝓤
-	}
-	// ∅ ⊂ x, y ⊂ 𝓤
-
-	if x.disjoint(y) {
-		return x, y // x ∪ y == (x, y) if x ∩ y == ∅
-	}
-	// x.typ == y.typ
-
-	// ~t ∪ ~t == ~t
-	// ~t ∪  T == ~t
-	//  T ∪ ~t == ~t
-	//  T ∪  T ==  T
-	if x.tilde || !y.tilde {
-		return x, nil
-	}
-	return y, nil
-}
-
-// intersect returns the intersection x ∩ y.
-func (x *term) intersect(y *term) *term {
-	// easy cases
-	switch {
-	case x == nil || y == nil:
-		return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
-	case x.typ == nil:
-		return y // 𝓤 ∩ y == y
-	case y.typ == nil:
-		return x // x ∩ 𝓤 == x
-	}
-	// ∅ ⊂ x, y ⊂ 𝓤
-
-	if x.disjoint(y) {
-		return nil // x ∩ y == ∅ if x ∩ y == ∅
-	}
-	// x.typ == y.typ
-
-	// ~t ∩ ~t == ~t
-	// ~t ∩  T ==  T
-	//  T ∩ ~t ==  T
-	//  T ∩  T ==  T
-	if !x.tilde || y.tilde {
-		return x
-	}
-	return y
-}
-
-// includes reports whether t ∈ x.
-func (x *term) includes(t types.Type) bool {
-	// easy cases
-	switch {
-	case x == nil:
-		return false // t ∈ ∅ == false
-	case x.typ == nil:
-		return true // t ∈ 𝓤 == true
-	}
-	// ∅ ⊂ x ⊂ 𝓤
-
-	u := t
-	if x.tilde {
-		u = under(u)
-	}
-	return types.Identical(x.typ, u)
-}
-
-// subsetOf reports whether x ⊆ y.
-func (x *term) subsetOf(y *term) bool {
-	// easy cases
-	switch {
-	case x == nil:
-		return true // ∅ ⊆ y == true
-	case y == nil:
-		return false // x ⊆ ∅ == false since x != ∅
-	case y.typ == nil:
-		return true // x ⊆ 𝓤 == true
-	case x.typ == nil:
-		return false // 𝓤 ⊆ y == false since y != 𝓤
-	}
-	// ∅ ⊂ x, y ⊂ 𝓤
-
-	if x.disjoint(y) {
-		return false // x ⊆ y == false if x ∩ y == ∅
-	}
-	// x.typ == y.typ
-
-	// ~t ⊆ ~t == true
-	// ~t ⊆ T == false
-	//  T ⊆ ~t == true
-	//  T ⊆  T == true
-	return !x.tilde || y.tilde
-}
-
-// disjoint reports whether x ∩ y == ∅.
-// x.typ and y.typ must not be nil.
-func (x *term) disjoint(y *term) bool {
-	if debug && (x.typ == nil || y.typ == nil) {
-		panic("invalid argument(s)")
-	}
-	ux := x.typ
-	if y.tilde {
-		ux = under(ux)
-	}
-	uy := y.typ
-	if x.tilde {
-		uy = under(uy)
-	}
-	return !types.Identical(ux, uy)
-}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
deleted file mode 100644
index 4957f021..00000000
--- a/vendor/golang.org/x/tools/internal/typesinternal/element.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typesinternal
-
-import (
-	"fmt"
-	"go/types"
-
-	"golang.org/x/tools/go/types/typeutil"
-)
-
-// ForEachElement calls f for type T and each type reachable from its
-// type through reflection. It does this by recursively stripping off
-// type constructors; in addition, for each named type N, the type *N
-// is added to the result as it may have additional methods.
-//
-// The caller must provide an initially empty set used to de-duplicate
-// identical types, potentially across multiple calls to ForEachElement.
-// (Its final value holds all the elements seen, matching the arguments
-// passed to f.)
-//
-// TODO(adonovan): share/harmonize with go/callgraph/rta.
-func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
-	var visit func(T types.Type, skip bool)
-	visit = func(T types.Type, skip bool) {
-		if !skip {
-			if seen, _ := rtypes.Set(T, true).(bool); seen {
-				return // de-dup
-			}
-
-			f(T) // notify caller of new element type
-		}
-
-		// Recursion over signatures of each method.
-		tmset := msets.MethodSet(T)
-		for i := 0; i < tmset.Len(); i++ {
-			sig := tmset.At(i).Type().(*types.Signature)
-			// It is tempting to call visit(sig, false)
-			// but, as noted in golang.org/cl/65450043,
-			// the Signature.Recv field is ignored by
-			// types.Identical and typeutil.Map, which
-			// is confusing at best.
-			//
-			// More importantly, the true signature rtype
-			// reachable from a method using reflection
-			// has no receiver but an extra ordinary parameter.
-			// For the Read method of io.Reader we want:
-			//   func(Reader, []byte) (int, error)
-			// but here sig is:
-			//   func([]byte) (int, error)
-			// with .Recv = Reader (though it is hard to
-			// notice because it doesn't affect Signature.String
-			// or types.Identical).
-			//
-			// TODO(adonovan): construct and visit the correct
-			// non-method signature with an extra parameter
-			// (though since unnamed func types have no methods
-			// there is essentially no actual demand for this).
-			//
-			// TODO(adonovan): document whether or not it is
-			// safe to skip non-exported methods (as RTA does).
-			visit(sig.Params(), true)  // skip the Tuple
-			visit(sig.Results(), true) // skip the Tuple
-		}
-
-		switch T := T.(type) {
-		case *types.Alias:
-			visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
-
-		case *types.Basic:
-			// nop
-
-		case *types.Interface:
-			// nop---handled by recursion over method set.
-
-		case *types.Pointer:
-			visit(T.Elem(), false)
-
-		case *types.Slice:
-			visit(T.Elem(), false)
-
-		case *types.Chan:
-			visit(T.Elem(), false)
-
-		case *types.Map:
-			visit(T.Key(), false)
-			visit(T.Elem(), false)
-
-		case *types.Signature:
-			if T.Recv() != nil {
-				panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
-			}
-			visit(T.Params(), true)  // skip the Tuple
-			visit(T.Results(), true) // skip the Tuple
-
-		case *types.Named:
-			// A pointer-to-named type can be derived from a named
-			// type via reflection.  It may have methods too.
-			visit(types.NewPointer(T), false)
-
-			// Consider 'type T struct{S}' where S has methods.
-			// Reflection provides no way to get from T to struct{S},
-			// only to S, so the method set of struct{S} is unwanted,
-			// so set 'skip' flag during recursion.
-			visit(T.Underlying(), true) // skip the unnamed type
-
-		case *types.Array:
-			visit(T.Elem(), false)
-
-		case *types.Struct:
-			for i, n := 0, T.NumFields(); i < n; i++ {
-				// TODO(adonovan): document whether or not
-				// it is safe to skip non-exported fields.
-				visit(T.Field(i).Type(), false)
-			}
-
-		case *types.Tuple:
-			for i, n := 0, T.Len(); i < n; i++ {
-				visit(T.At(i).Type(), false)
-			}
-
-		case *types.TypeParam, *types.Union:
-			// forEachReachable must not be called on parameterized types.
-			panic(T)
-
-		default:
-			panic(T)
-		}
-	}
-	visit(T, false)
-}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 131caab2..834e0538 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -838,7 +838,7 @@ const (
 	// InvalidCap occurs when an argument to the cap built-in function is not of
 	// supported type.
 	//
-	// See https://golang.org/ref/spec#Length_and_capacity for information on
+	// See https://golang.org/ref/spec#Lengthand_capacity for information on
 	// which underlying types are supported as arguments to cap and len.
 	//
 	// Example:
@@ -859,7 +859,7 @@ const (
 	// InvalidCopy occurs when the arguments are not of slice type or do not
 	// have compatible type.
 	//
-	// See https://golang.org/ref/spec#Appending_and_copying_slices for more
+	// See https://golang.org/ref/spec#Appendingand_copying_slices for more
 	// information on the type requirements for the copy built-in.
 	//
 	// Example:
@@ -897,7 +897,7 @@ const (
 	// InvalidLen occurs when an argument to the len built-in function is not of
 	// supported type.
 	//
-	// See https://golang.org/ref/spec#Length_and_capacity for information on
+	// See https://golang.org/ref/spec#Lengthand_capacity for information on
 	// which underlying types are supported as arguments to cap and len.
 	//
 	// Example:
@@ -914,7 +914,7 @@ const (
 
 	// InvalidMake occurs when make is called with an unsupported type argument.
 	//
-	// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
+	// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
 	// information on the types that may be created using make.
 	//
 	// Example:
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
deleted file mode 100644
index b64f714e..00000000
--- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typesinternal
-
-import (
-	"go/ast"
-	"go/types"
-	"strconv"
-)
-
-// FileQualifier returns a [types.Qualifier] function that qualifies
-// imported symbols appropriately based on the import environment of a given
-// file.
-// If the same package is imported multiple times, the last appearance is
-// recorded.
-func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
-	// Construct mapping of import paths to their defined names.
-	// It is only necessary to look at renaming imports.
-	imports := make(map[string]string)
-	for _, imp := range f.Imports {
-		if imp.Name != nil && imp.Name.Name != "_" {
-			path, _ := strconv.Unquote(imp.Path.Value)
-			imports[path] = imp.Name.Name
-		}
-	}
-
-	// Define qualifier to replace full package paths with names of the imports.
-	return func(p *types.Package) string {
-		if p == nil || p == pkg {
-			return ""
-		}
-
-		if name, ok := imports[p.Path()]; ok {
-			if name == "." {
-				return ""
-			} else {
-				return name
-			}
-		}
-
-		// If there is no local renaming, fall back to the package name.
-		return p.Name()
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index e54accc6..fea7c8b7 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -6,20 +6,20 @@ package typesinternal
 
 import (
 	"go/types"
+
+	"golang.org/x/tools/internal/aliases"
 )
 
 // ReceiverNamed returns the named type (if any) associated with the
 // type of recv, which may be of the form N or *N, or aliases thereof.
 // It also reports whether a Pointer was present.
-//
-// The named result may be nil in ill-typed code.
 func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
 	t := recv.Type()
-	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
+	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
 		isPtr = true
 		t = ptr.Elem()
 	}
-	named, _ = types.Unalias(t).(*types.Named)
+	named, _ = aliases.Unalias(t).(*types.Named)
 	return
 }
 
@@ -36,7 +36,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
 // indirection from the type, regardless of named types (analogous to
 // a LOAD instruction).
 func Unpointer(t types.Type) types.Type {
-	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
+	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
 		return ptr.Elem()
 	}
 	return t
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index a93d51f9..7c77c2fb 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,8 +11,6 @@ import (
 	"go/types"
 	"reflect"
 	"unsafe"
-
-	"golang.org/x/tools/internal/aliases"
 )
 
 func SetUsesCgo(conf *types.Config) bool {
@@ -50,73 +48,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
 	}
 	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
 }
-
-// NameRelativeTo returns a types.Qualifier that qualifies members of
-// all packages other than pkg, using only the package name.
-// (By contrast, [types.RelativeTo] uses the complete package path,
-// which is often excessive.)
-//
-// If pkg is nil, it is equivalent to [*types.Package.Name].
-func NameRelativeTo(pkg *types.Package) types.Qualifier {
-	return func(other *types.Package) string {
-		if pkg != nil && pkg == other {
-			return "" // same package; unqualified
-		}
-		return other.Name()
-	}
-}
-
-// A NamedOrAlias is a [types.Type] that is named (as
-// defined by the spec) and capable of bearing type parameters: it
-// abstracts aliases ([types.Alias]) and defined types
-// ([types.Named]).
-//
-// Every type declared by an explicit "type" declaration is a
-// NamedOrAlias. (Built-in type symbols may additionally
-// have type [types.Basic], which is not a NamedOrAlias,
-// though the spec regards them as "named".)
-//
-// NamedOrAlias cannot expose the Origin method, because
-// [types.Alias.Origin] and [types.Named.Origin] have different
-// (covariant) result types; use [Origin] instead.
-type NamedOrAlias interface {
-	types.Type
-	Obj() *types.TypeName
-	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
-}
-
-// TypeParams is a light shim around t.TypeParams().
-// (go/types.Alias).TypeParams requires >= 1.23.
-func TypeParams(t NamedOrAlias) *types.TypeParamList {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.TypeParams(t)
-	case *types.Named:
-		return t.TypeParams()
-	}
-	return nil
-}
-
-// TypeArgs is a light shim around t.TypeArgs().
-// (go/types.Alias).TypeArgs requires >= 1.23.
-func TypeArgs(t NamedOrAlias) *types.TypeList {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.TypeArgs(t)
-	case *types.Named:
-		return t.TypeArgs()
-	}
-	return nil
-}
-
-// Origin returns the generic type of the Named or Alias type t if it
-// is instantiated, otherwise it returns t.
-func Origin(t NamedOrAlias) NamedOrAlias {
-	switch t := t.(type) {
-	case *types.Alias:
-		return aliases.Origin(t)
-	case *types.Named:
-		return t.Origin()
-	}
-	return t
-}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
deleted file mode 100644
index d272949c..00000000
--- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typesinternal
-
-import (
-	"fmt"
-	"go/ast"
-	"go/token"
-	"go/types"
-	"strings"
-)
-
-// ZeroString returns the string representation of the zero value for any type t.
-// The boolean result indicates whether the type is or contains an invalid type
-// or a non-basic (constraint) interface type.
-//
-// Even for invalid input types, ZeroString may return a partially correct
-// string representation. The caller should use the returned isValid boolean
-// to determine the validity of the expression.
-//
-// When assigning to a wider type (such as 'any'), it's the caller's
-// responsibility to handle any necessary type conversions.
-//
-// This string can be used on the right-hand side of an assignment where the
-// left-hand side has that explicit type.
-// References to named types are qualified by an appropriate (optional)
-// qualifier function.
-// Exception: This does not apply to tuples. Their string representation is
-// informational only and cannot be used in an assignment.
-//
-// See [ZeroExpr] for a variant that returns an [ast.Expr].
-func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
-	switch t := t.(type) {
-	case *types.Basic:
-		switch {
-		case t.Info()&types.IsBoolean != 0:
-			return "false", true
-		case t.Info()&types.IsNumeric != 0:
-			return "0", true
-		case t.Info()&types.IsString != 0:
-			return `""`, true
-		case t.Kind() == types.UnsafePointer:
-			fallthrough
-		case t.Kind() == types.UntypedNil:
-			return "nil", true
-		case t.Kind() == types.Invalid:
-			return "invalid", false
-		default:
-			panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
-		}
-
-	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
-		return "nil", true
-
-	case *types.Interface:
-		if !t.IsMethodSet() {
-			return "invalid", false
-		}
-		return "nil", true
-
-	case *types.Named:
-		switch under := t.Underlying().(type) {
-		case *types.Struct, *types.Array:
-			return types.TypeString(t, qual) + "{}", true
-		default:
-			return ZeroString(under, qual)
-		}
-
-	case *types.Alias:
-		switch t.Underlying().(type) {
-		case *types.Struct, *types.Array:
-			return types.TypeString(t, qual) + "{}", true
-		default:
-			// A type parameter can have alias but alias type's underlying type
-			// can never be a type parameter.
-			// Use types.Unalias to preserve the info of type parameter instead
-			// of call Underlying() going right through and get the underlying
-			// type of the type parameter which is always an interface.
-			return ZeroString(types.Unalias(t), qual)
-		}
-
-	case *types.Array, *types.Struct:
-		return types.TypeString(t, qual) + "{}", true
-
-	case *types.TypeParam:
-		// Assumes func new is not shadowed.
-		return "*new(" + types.TypeString(t, qual) + ")", true
-
-	case *types.Tuple:
-		// Tuples are not normal values.
-		// We are currently format as "(t[0], ..., t[n])". Could be something else.
-		isValid := true
-		components := make([]string, t.Len())
-		for i := 0; i < t.Len(); i++ {
-			comp, ok := ZeroString(t.At(i).Type(), qual)
-
-			components[i] = comp
-			isValid = isValid && ok
-		}
-		return "(" + strings.Join(components, ", ") + ")", isValid
-
-	case *types.Union:
-		// Variables of these types cannot be created, so it makes
-		// no sense to ask for their zero value.
-		panic(fmt.Sprintf("invalid type for a variable: %v", t))
-
-	default:
-		panic(t) // unreachable.
-	}
-}
-
-// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
-// The boolean result indicates whether the type is or contains an invalid type
-// or a non-basic (constraint) interface type.
-//
-// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
-// representation. The caller should use the returned isValid boolean to determine
-// the validity of the expression.
-//
-// This function is designed for types suitable for variables and should not be
-// used with Tuple or Union types.References to named types are qualified by an
-// appropriate (optional) qualifier function.
-//
-// See [ZeroString] for a variant that returns a string.
-func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
-	switch t := t.(type) {
-	case *types.Basic:
-		switch {
-		case t.Info()&types.IsBoolean != 0:
-			return &ast.Ident{Name: "false"}, true
-		case t.Info()&types.IsNumeric != 0:
-			return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
-		case t.Info()&types.IsString != 0:
-			return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
-		case t.Kind() == types.UnsafePointer:
-			fallthrough
-		case t.Kind() == types.UntypedNil:
-			return ast.NewIdent("nil"), true
-		case t.Kind() == types.Invalid:
-			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
-		default:
-			panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
-		}
-
-	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
-		return ast.NewIdent("nil"), true
-
-	case *types.Interface:
-		if !t.IsMethodSet() {
-			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
-		}
-		return ast.NewIdent("nil"), true
-
-	case *types.Named:
-		switch under := t.Underlying().(type) {
-		case *types.Struct, *types.Array:
-			return &ast.CompositeLit{
-				Type: TypeExpr(t, qual),
-			}, true
-		default:
-			return ZeroExpr(under, qual)
-		}
-
-	case *types.Alias:
-		switch t.Underlying().(type) {
-		case *types.Struct, *types.Array:
-			return &ast.CompositeLit{
-				Type: TypeExpr(t, qual),
-			}, true
-		default:
-			return ZeroExpr(types.Unalias(t), qual)
-		}
-
-	case *types.Array, *types.Struct:
-		return &ast.CompositeLit{
-			Type: TypeExpr(t, qual),
-		}, true
-
-	case *types.TypeParam:
-		return &ast.StarExpr{ // *new(T)
-			X: &ast.CallExpr{
-				// Assumes func new is not shadowed.
-				Fun: ast.NewIdent("new"),
-				Args: []ast.Expr{
-					ast.NewIdent(t.Obj().Name()),
-				},
-			},
-		}, true
-
-	case *types.Tuple:
-		// Unlike ZeroString, there is no ast.Expr can express tuple by
-		// "(t[0], ..., t[n])".
-		panic(fmt.Sprintf("invalid type for a variable: %v", t))
-
-	case *types.Union:
-		// Variables of these types cannot be created, so it makes
-		// no sense to ask for their zero value.
-		panic(fmt.Sprintf("invalid type for a variable: %v", t))
-
-	default:
-		panic(t) // unreachable.
-	}
-}
-
-// IsZeroExpr uses simple syntactic heuristics to report whether expr
-// is a obvious zero value, such as 0, "", nil, or false.
-// It cannot do better without type information.
-func IsZeroExpr(expr ast.Expr) bool {
-	switch e := expr.(type) {
-	case *ast.BasicLit:
-		return e.Value == "0" || e.Value == `""`
-	case *ast.Ident:
-		return e.Name == "nil" || e.Name == "false"
-	default:
-		return false
-	}
-}
-
-// TypeExpr returns syntax for the specified type. References to named types
-// are qualified by an appropriate (optional) qualifier function.
-// It may panic for types such as Tuple or Union.
-func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
-	switch t := t.(type) {
-	case *types.Basic:
-		switch t.Kind() {
-		case types.UnsafePointer:
-			return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
-		default:
-			return ast.NewIdent(t.Name())
-		}
-
-	case *types.Pointer:
-		return &ast.UnaryExpr{
-			Op: token.MUL,
-			X:  TypeExpr(t.Elem(), qual),
-		}
-
-	case *types.Array:
-		return &ast.ArrayType{
-			Len: &ast.BasicLit{
-				Kind:  token.INT,
-				Value: fmt.Sprintf("%d", t.Len()),
-			},
-			Elt: TypeExpr(t.Elem(), qual),
-		}
-
-	case *types.Slice:
-		return &ast.ArrayType{
-			Elt: TypeExpr(t.Elem(), qual),
-		}
-
-	case *types.Map:
-		return &ast.MapType{
-			Key:   TypeExpr(t.Key(), qual),
-			Value: TypeExpr(t.Elem(), qual),
-		}
-
-	case *types.Chan:
-		dir := ast.ChanDir(t.Dir())
-		if t.Dir() == types.SendRecv {
-			dir = ast.SEND | ast.RECV
-		}
-		return &ast.ChanType{
-			Dir:   dir,
-			Value: TypeExpr(t.Elem(), qual),
-		}
-
-	case *types.Signature:
-		var params []*ast.Field
-		for i := 0; i < t.Params().Len(); i++ {
-			params = append(params, &ast.Field{
-				Type: TypeExpr(t.Params().At(i).Type(), qual),
-				Names: []*ast.Ident{
-					{
-						Name: t.Params().At(i).Name(),
-					},
-				},
-			})
-		}
-		if t.Variadic() {
-			last := params[len(params)-1]
-			last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
-		}
-		var returns []*ast.Field
-		for i := 0; i < t.Results().Len(); i++ {
-			returns = append(returns, &ast.Field{
-				Type: TypeExpr(t.Results().At(i).Type(), qual),
-			})
-		}
-		return &ast.FuncType{
-			Params: &ast.FieldList{
-				List: params,
-			},
-			Results: &ast.FieldList{
-				List: returns,
-			},
-		}
-
-	case *types.TypeParam:
-		pkgName := qual(t.Obj().Pkg())
-		if pkgName == "" || t.Obj().Pkg() == nil {
-			return ast.NewIdent(t.Obj().Name())
-		}
-		return &ast.SelectorExpr{
-			X:   ast.NewIdent(pkgName),
-			Sel: ast.NewIdent(t.Obj().Name()),
-		}
-
-	// types.TypeParam also implements interface NamedOrAlias. To differentiate,
-	// case TypeParam need to be present before case NamedOrAlias.
-	// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
-	// NamedOrAlias.
-	case NamedOrAlias:
-		var expr ast.Expr = ast.NewIdent(t.Obj().Name())
-		if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
-			expr = &ast.SelectorExpr{
-				X:   ast.NewIdent(pkgName),
-				Sel: expr.(*ast.Ident),
-			}
-		}
-
-		// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
-		// typesinternal.NamedOrAlias.
-		if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
-			if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
-				var indices []ast.Expr
-				for i := range typeArgs.Len() {
-					indices = append(indices, TypeExpr(typeArgs.At(i), qual))
-				}
-				expr = &ast.IndexListExpr{
-					X:       expr,
-					Indices: indices,
-				}
-			}
-		}
-
-		return expr
-
-	case *types.Struct:
-		return ast.NewIdent(t.String())
-
-	case *types.Interface:
-		return ast.NewIdent(t.String())
-
-	case *types.Union:
-		if t.Len() == 0 {
-			panic("Union type should have at least one term")
-		}
-		// Same as go/ast, the return expression will put last term in the
-		// Y field at topmost level of BinaryExpr.
-		// For union of type "float32 | float64 | int64", the structure looks
-		// similar to:
-		// {
-		// 	X: {
-		// 		X: float32,
-		// 		Op: |
-		// 		Y: float64,
-		// 	}
-		// 	Op: |,
-		// 	Y: int64,
-		// }
-		var union ast.Expr
-		for i := range t.Len() {
-			term := t.Term(i)
-			termExpr := TypeExpr(term.Type(), qual)
-			if term.Tilde() {
-				termExpr = &ast.UnaryExpr{
-					Op: token.TILDE,
-					X:  termExpr,
-				}
-			}
-			if i == 0 {
-				union = termExpr
-			} else {
-				union = &ast.BinaryExpr{
-					X:  union,
-					Op: token.OR,
-					Y:  termExpr,
-				}
-			}
-		}
-		return union
-
-	case *types.Tuple:
-		panic("invalid input type types.Tuple")
-
-	default:
-		panic("unreachable")
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
new file mode 100644
index 00000000..377bf7a5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+// toolchain is maximum version (<1.22) that the go toolchain used
+// to build the current tool is known to support.
+//
+// When a tool is built with >=1.22, the value of toolchain is unused.
+//
+// x/tools does not support building with go <1.18. So we take this
+// as the minimum possible maximum.
+var toolchain string = Go1_18
diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
similarity index 50%
rename from vendor/google.golang.org/protobuf/internal/genid/name.go
rename to vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
index 224f3393..f65beed9 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/name.go
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package genid
+//go:build go1.19
+// +build go1.19
 
-const (
-	NoUnkeyedLiteral_goname  = "noUnkeyedLiteral"
-	NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
+package versions
 
-	BuilderSuffix_goname = "_builder"
-)
+func init() {
+	if Compare(toolchain, Go1_19) < 0 {
+		toolchain = Go1_19
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
new file mode 100644
index 00000000..1a9efa12
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package versions
+
+func init() {
+	if Compare(toolchain, Go1_20) < 0 {
+		toolchain = Go1_20
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
new file mode 100644
index 00000000..b7ef216d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+// +build go1.21
+
+package versions
+
+func init() {
+	if Compare(toolchain, Go1_21) < 0 {
+		toolchain = Go1_21
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
index 0fc10ce4..562eef21 100644
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -5,29 +5,15 @@
 package versions
 
 import (
-	"go/ast"
 	"go/types"
 )
 
-// FileVersion returns a file's Go version.
-// The reported version is an unknown Future version if a
-// version cannot be determined.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In tools built with Go >= 1.22, the Go version of a file
-	// follow a cascades of sources:
-	// 1) types.Info.FileVersion, which follows the cascade:
-	//   1.a) file version (ast.File.GoVersion),
-	//   1.b) the package version (types.Config.GoVersion), or
-	// 2) is some unknown Future version.
-	//
-	// File versions require a valid package version to be provided to types
-	// in Config.GoVersion. Config.GoVersion is either from the package's module
-	// or the toolchain (go run). This value should be provided by go/packages
-	// or unitchecker.Config.GoVersion.
-	if v := info.FileVersions[file]; IsValid(v) {
-		return v
+// GoVersion returns the Go version of the type package.
+// It returns zero if no version can be determined.
+func GoVersion(pkg *types.Package) string {
+	// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
+	if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
+		return pkg.GoVersion()
 	}
-	// Note: we could instead return runtime.Version() [if valid].
-	// This would act as a max version on what a tool can support.
-	return Future
+	return ""
 }
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
new file mode 100644
index 00000000..b4345d33
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.22
+// +build !go1.22
+
+package versions
+
+import (
+	"go/ast"
+	"go/types"
+)
+
+// FileVersion returns a language version (<=1.21) derived from runtime.Version()
+// or an unknown future version.
+func FileVersion(info *types.Info, file *ast.File) string {
+	// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
+	// available. We use a go version derived from the toolchain used to
+	// compile the tool by default.
+	// This will be <= go1.21. We take this as the maximum version that
+	// this tool can support.
+	//
+	// There are no features currently in x/tools that need to tell fine grained
+	// differences for versions <1.22.
+	return toolchain
+}
+
+// InitFileVersions is a noop when compiled with this Go version.
+func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
new file mode 100644
index 00000000..aac5db62
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go
@@ -0,0 +1,41 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+// +build go1.22
+
+package versions
+
+import (
+	"go/ast"
+	"go/types"
+)
+
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+	// In tools built with Go >= 1.22, the Go version of a file
+	// follow a cascades of sources:
+	// 1) types.Info.FileVersion, which follows the cascade:
+	//   1.a) file version (ast.File.GoVersion),
+	//   1.b) the package version (types.Config.GoVersion), or
+	// 2) is some unknown Future version.
+	//
+	// File versions require a valid package version to be provided to types
+	// in Config.GoVersion. Config.GoVersion is either from the package's module
+	// or the toolchain (go run). This value should be provided by go/packages
+	// or unitchecker.Config.GoVersion.
+	if v := info.FileVersions[file]; IsValid(v) {
+		return v
+	}
+	// Note: we could instead return runtime.Version() [if valid].
+	// This would act as a max version on what a tool can support.
+	return Future
+}
+
+// InitFileVersions initializes info to record Go versions for Go files.
+func InitFileVersions(info *types.Info) {
+	info.FileVersions = make(map[*ast.File]string)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 024ffebd..8401be8c 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
 // dependency on the descriptor proto package).
 package descopts
 
-import "google.golang.org/protobuf/reflect/protoreflect"
+import pref "google.golang.org/protobuf/reflect/protoreflect"
 
 // These variables are set by the init function in descriptor.pb.go via logic
 // in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import "google.golang.org/protobuf/reflect/protoreflect"
 //
 // Each variable is populated with a nil pointer to the options struct.
 var (
-	File           protoreflect.ProtoMessage
-	Enum           protoreflect.ProtoMessage
-	EnumValue      protoreflect.ProtoMessage
-	Message        protoreflect.ProtoMessage
-	Field          protoreflect.ProtoMessage
-	Oneof          protoreflect.ProtoMessage
-	ExtensionRange protoreflect.ProtoMessage
-	Service        protoreflect.ProtoMessage
-	Method         protoreflect.ProtoMessage
+	File           pref.ProtoMessage
+	Enum           pref.ProtoMessage
+	EnumValue      pref.ProtoMessage
+	Message        pref.ProtoMessage
+	Field          pref.ProtoMessage
+	Oneof          pref.ProtoMessage
+	ExtensionRange pref.ProtoMessage
+	Service        pref.ProtoMessage
+	Method         pref.ProtoMessage
 )
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 5a57ef6f3c80a4a930b7bdb33b039ea94d1eb5f2..ff6a38360add36f53d48bb0863b701696e0d7b2d 100644
GIT binary patch
literal 93
zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j
Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H

literal 138
zcmd;*muO*EV!mX@pe4$|D8MAaq`<7fXux#Ijt$6VkYMDJmv|0Wz$CyZ!KlClRKN&Q
wzyMY7f?Y`%s2WL*1th1%ddZFnY{E-+C6MVz3P75fB^b3pHY+@1*LcYe04AXnGXMYp

diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index bf1aba0e..029a6a12 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -5,14 +5,9 @@
 // Package editionssupport defines constants for editions that are supported.
 package editionssupport
 
-import "google.golang.org/protobuf/types/descriptorpb"
+import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
 
 const (
 	Minimum = descriptorpb.Edition_EDITION_PROTO2
 	Maximum = descriptorpb.Edition_EDITION_2023
-
-	// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
-	// declared as supported. In other words: end users cannot use it, but
-	// testprotos inside Go Protobuf can.
-	MaximumKnown = descriptorpb.Edition_EDITION_2024
 )
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
new file mode 100644
index 00000000..fbcd3492
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
@@ -0,0 +1,40 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.13
+// +build !go1.13
+
+package errors
+
+import "reflect"
+
+// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
+func Is(err, target error) bool {
+	if target == nil {
+		return err == target
+	}
+
+	isComparable := reflect.TypeOf(target).Comparable()
+	for {
+		if isComparable && err == target {
+			return true
+		}
+		if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
+			return true
+		}
+		if err = unwrap(err); err == nil {
+			return false
+		}
+	}
+}
+
+func unwrap(err error) error {
+	u, ok := err.(interface {
+		Unwrap() error
+	})
+	if !ok {
+		return nil
+	}
+	return u.Unwrap()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
new file mode 100644
index 00000000..5e72f1cd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.13
+// +build go1.13
+
+package errors
+
+import "errors"
+
+// Is is errors.Is.
+func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 378b826f..df53ff40 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -32,7 +32,6 @@ const (
 	EditionProto2      Edition = 998
 	EditionProto3      Edition = 999
 	Edition2023        Edition = 1000
-	Edition2024        Edition = 1001
 	EditionUnsupported Edition = 100000
 )
 
@@ -78,48 +77,31 @@ type (
 		Locations SourceLocations
 	}
 
-	// EditionFeatures is a frequently-instantiated struct, so please take care
-	// to minimize padding when adding new fields to this struct (add them in
-	// the right place/order).
 	EditionFeatures struct {
-		// StripEnumPrefix determines if the plugin generates enum value
-		// constants as-is, with their prefix stripped, or both variants.
-		StripEnumPrefix int
-
 		// IsFieldPresence is true if field_presence is EXPLICIT
 		// https://protobuf.dev/editions/features/#field_presence
 		IsFieldPresence bool
-
 		// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
 		// https://protobuf.dev/editions/features/#field_presence
 		IsLegacyRequired bool
-
 		// IsOpenEnum is true if enum_type is OPEN
 		// https://protobuf.dev/editions/features/#enum_type
 		IsOpenEnum bool
-
 		// IsPacked is true if repeated_field_encoding is PACKED
 		// https://protobuf.dev/editions/features/#repeated_field_encoding
 		IsPacked bool
-
 		// IsUTF8Validated is true if utf_validation is VERIFY
 		// https://protobuf.dev/editions/features/#utf8_validation
 		IsUTF8Validated bool
-
 		// IsDelimitedEncoded is true if message_encoding is DELIMITED
 		// https://protobuf.dev/editions/features/#message_encoding
 		IsDelimitedEncoded bool
-
 		// IsJSONCompliant is true if json_format is ALLOW
 		// https://protobuf.dev/editions/features/#json_format
 		IsJSONCompliant bool
-
 		// GenerateLegacyUnmarshalJSON determines if the plugin generates the
 		// UnmarshalJSON([]byte) error method for enums.
 		GenerateLegacyUnmarshalJSON bool
-		// APILevel controls which API (Open, Hybrid or Opaque) should be used
-		// for generated code (.pb.go files).
-		APILevel int
 	}
 )
 
@@ -276,7 +258,6 @@ type (
 		StringName       stringName
 		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
 		IsWeak           bool // promoted from google.protobuf.FieldOptions
-		IsLazy           bool // promoted from google.protobuf.FieldOptions
 		Default          defaultValue
 		ContainingOneof  protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
 		Enum             protoreflect.EnumDescriptor
@@ -370,7 +351,6 @@ func (fd *Field) IsPacked() bool {
 }
 func (fd *Field) IsExtension() bool { return false }
 func (fd *Field) IsWeak() bool      { return fd.L1.IsWeak }
-func (fd *Field) IsLazy() bool      { return fd.L1.IsLazy }
 func (fd *Field) IsList() bool      { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
 func (fd *Field) IsMap() bool       { return fd.Message() != nil && fd.Message().IsMapEntry() }
 func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -445,7 +425,6 @@ type (
 		Extendee        protoreflect.MessageDescriptor
 		Cardinality     protoreflect.Cardinality
 		Kind            protoreflect.Kind
-		IsLazy          bool
 		EditionFeatures EditionFeatures
 	}
 	ExtensionL2 struct {
@@ -486,7 +465,6 @@ func (xd *Extension) IsPacked() bool {
 }
 func (xd *Extension) IsExtension() bool                      { return true }
 func (xd *Extension) IsWeak() bool                           { return false }
-func (xd *Extension) IsLazy() bool                           { return xd.L1.IsLazy }
 func (xd *Extension) IsList() bool                           { return xd.Cardinality() == protoreflect.Repeated }
 func (xd *Extension) IsMap() bool                            { return false }
 func (xd *Extension) MapKey() protoreflect.FieldDescriptor   { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index d2f54949..8a57d60b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,8 +495,6 @@ func (xd *Extension) unmarshalOptions(b []byte) {
 			switch num {
 			case genid.FieldOptions_Packed_field_number:
 				xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
-			case genid.FieldOptions_Lazy_field_number:
-				xd.L1.IsLazy = protowire.DecodeBool(v)
 			}
 		case protowire.BytesType:
 			v, m := protowire.ConsumeBytes(b)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 67a51b32..e56c91a8 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -504,8 +504,6 @@ func (fd *Field) unmarshalOptions(b []byte) {
 				fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
 			case genid.FieldOptions_Weak_field_number:
 				fd.L1.IsWeak = protowire.DecodeBool(v)
-			case genid.FieldOptions_Lazy_field_number:
-				fd.L1.IsLazy = protowire.DecodeBool(v)
 			case FieldOptions_EnforceUTF8:
 				fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 10132c9b..11f5f356 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -32,14 +32,6 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeVarint(b)
 			b = b[m:]
 			parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
-		case genid.GoFeatures_ApiLevel_field_number:
-			v, m := protowire.ConsumeVarint(b)
-			b = b[m:]
-			parent.APILevel = int(v)
-		case genid.GoFeatures_StripEnumPrefix_field_number:
-			v, m := protowire.ConsumeVarint(b)
-			b = b[m:]
-			parent.StripEnumPrefix = int(v)
 		default:
 			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
 		}
@@ -76,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeBytes(b)
 			b = b[m:]
 			switch num {
-			case genid.FeatureSet_Go_ext_number:
+			case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
 				parent = unmarshalGoFeature(v, parent)
 			}
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index d9b9d916..45ccd012 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
 // and the well-known types.
 package genid
 
-import "google.golang.org/protobuf/reflect/protoreflect"
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
 
 const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index f5ee7f5c..9a652a2b 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,59 +12,20 @@ import (
 
 const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
 
-// Names for pb.GoFeatures.
+// Names for google.protobuf.GoFeatures.
 const (
 	GoFeatures_message_name     protoreflect.Name     = "GoFeatures"
-	GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
+	GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
 )
 
-// Field names for pb.GoFeatures.
+// Field names for google.protobuf.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
-	GoFeatures_ApiLevel_field_name                protoreflect.Name = "api_level"
-	GoFeatures_StripEnumPrefix_field_name         protoreflect.Name = "strip_enum_prefix"
 
-	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
-	GoFeatures_ApiLevel_field_fullname                protoreflect.FullName = "pb.GoFeatures.api_level"
-	GoFeatures_StripEnumPrefix_field_fullname         protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
+	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
 )
 
-// Field numbers for pb.GoFeatures.
+// Field numbers for google.protobuf.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
-	GoFeatures_ApiLevel_field_number                protoreflect.FieldNumber = 2
-	GoFeatures_StripEnumPrefix_field_number         protoreflect.FieldNumber = 3
-)
-
-// Full and short names for pb.GoFeatures.APILevel.
-const (
-	GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
-	GoFeatures_APILevel_enum_name     = "APILevel"
-)
-
-// Enum values for pb.GoFeatures.APILevel.
-const (
-	GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
-	GoFeatures_API_OPEN_enum_value              = 1
-	GoFeatures_API_HYBRID_enum_value            = 2
-	GoFeatures_API_OPAQUE_enum_value            = 3
-)
-
-// Full and short names for pb.GoFeatures.StripEnumPrefix.
-const (
-	GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
-	GoFeatures_StripEnumPrefix_enum_name     = "StripEnumPrefix"
-)
-
-// Enum values for pb.GoFeatures.StripEnumPrefix.
-const (
-	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value   = 0
-	GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value          = 1
-	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
-	GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value         = 3
-)
-
-// Extension numbers
-const (
-	FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
 )
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index bef5a25f..8f9ea02f 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import "google.golang.org/protobuf/reflect/protoreflect"
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field names and numbers for synthetic map entry messages.
 const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 9404270d..429384b8 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import "google.golang.org/protobuf/reflect/protoreflect"
+import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field name and number for messages in wrappers.proto.
 const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
deleted file mode 100644
index 6075d6f6..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"strconv"
-	"sync/atomic"
-	"unsafe"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-func (Export) UnmarshalField(msg any, fieldNum int32) {
-	UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
-}
-
-// Present checks the presence set for a certain field number (zero
-// based, ordered by appearance in original proto file). part is
-// a pointer to the correct element in the bitmask array, num is the
-// field number unaltered.  Example (field number 70 -> part =
-// &m.XXX_presence[1], num = 70)
-func (Export) Present(part *uint32, num uint32) bool {
-	// This hook will read an unprotected shadow presence set if
-	// we're unning under the race detector
-	raceDetectHookPresent(part, num)
-	return atomic.LoadUint32(part)&(1<<(num%32)) > 0
-}
-
-// SetPresent adds a field to the presence set. part is a pointer to
-// the relevant element in the array and num is the field number
-// unaltered.  size is the number of fields in the protocol
-// buffer.
-func (Export) SetPresent(part *uint32, num uint32, size uint32) {
-	// This hook will mutate an unprotected shadow presence set if
-	// we're running under the race detector
-	raceDetectHookSetPresent(part, num, presenceSize(size))
-	for {
-		old := atomic.LoadUint32(part)
-		if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
-			return
-		}
-	}
-}
-
-// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
-// It is meant for use by builder methods, where the message is known not
-// to be accessible yet by other goroutines.
-func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
-	// This hook will mutate an unprotected shadow presence set if
-	// we're running under the race detector
-	raceDetectHookSetPresent(part, num, presenceSize(size))
-	*part |= 1 << (num % 32)
-}
-
-// ClearPresence removes a field from the presence set. part is a
-// pointer to the relevant element in the presence array and num is
-// the field number unaltered.
-func (Export) ClearPresent(part *uint32, num uint32) {
-	// This hook will mutate an unprotected shadow presence set if
-	// we're running under the race detector
-	raceDetectHookClearPresent(part, num)
-	for {
-		old := atomic.LoadUint32(part)
-		if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
-			return
-		}
-	}
-}
-
-// interfaceToPointer takes a pointer to an empty interface whose value is a
-// pointer type, and converts it into a "pointer" that points to the same
-// target
-func interfaceToPointer(i *any) pointer {
-	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-func (p pointer) atomicGetPointer() pointer {
-	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
-}
-
-func (p pointer) atomicSetPointer(q pointer) {
-	atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
-}
-
-// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
-// pointer) and returns true if the pointed-to pointer is nil (using an
-// atomic load).  This function is inlineable and, on x86, just becomes a
-// simple load and compare.
-func (Export) AtomicCheckPointerIsNil(ptr any) bool {
-	return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
-}
-
-// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
-// second is a pointer) and atomically sets the second pointer into location
-// referenced by first pointer.  Unfortunately, atomicSetPointer() does not inline
-// (even on x86), so this does not become a simple store on x86.
-func (Export) AtomicSetPointer(dstPtr, valPtr any) {
-	interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
-}
-
-// AtomicLoadPointer loads the pointer at the location pointed at by src,
-// and stores that pointer value into the location pointed at by dst.
-func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
-	*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
-}
-
-// AtomicInitializePointer makes ptr and dst point to the same value.
-//
-// If *ptr is a nil pointer, it sets *ptr = *dst.
-//
-// If *ptr is a non-nil pointer, it sets *dst = *ptr.
-func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
-	if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
-		*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
-	}
-}
-
-// MessageFieldStringOf returns the field formatted as a string,
-// either as the field name if resolvable otherwise as a decimal string.
-func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
-	fd := md.Fields().ByNumber(n)
-	if fd != nil {
-		return string(fd.Name())
-	}
-	return strconv.Itoa(int(n))
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
deleted file mode 100644
index ea276547..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !race
-
-package impl
-
-// There is no additional data as we're not running under race detector.
-type RaceDetectHookData struct{}
-
-// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
-func (presence) raceDetectHookPresent(num uint32)                       {}
-func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
-func (presence) raceDetectHookClearPresent(num uint32)                  {}
-func (presence) raceDetectHookAllocAndCopy(src presence)                {}
-
-// raceDetectHookPresent is called by the generated file interface
-// (*proto.internalFuncs) Present to optionally read an unprotected
-// shadow bitmap when race detection is enabled. In regular code it is
-// a noop.
-func raceDetectHookPresent(field *uint32, num uint32) {}
-
-// raceDetectHookSetPresent is called by the generated file interface
-// (*proto.internalFuncs) SetPresent to optionally write an unprotected
-// shadow bitmap when race detection is enabled. In regular code it is
-// a noop.
-func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
-
-// raceDetectHookClearPresent is called by the generated file interface
-// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
-// shadow bitmap when race detection is enabled. In regular code it is
-// a noop.
-func raceDetectHookClearPresent(field *uint32, num uint32) {}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
deleted file mode 100644
index e9a27583..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build race
-
-package impl
-
-// When running under race detector, we add a presence map of bytes, that we can access
-// in the hook functions so that we trigger the race detection whenever we have concurrent
-// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
-// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
-type RaceDetectHookData struct {
-	shadowPresence *[]byte
-}
-
-// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
-// using non-atomic operations.
-func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
-	sp := make([]byte, size)
-	atomicStoreShadowPresence(&data.shadowPresence, &sp)
-}
-
-func (p presence) raceDetectHookPresent(num uint32) {
-	data := p.toRaceDetectData()
-	if data == nil {
-		return
-	}
-	sp := atomicLoadShadowPresence(&data.shadowPresence)
-	if sp != nil {
-		_ = (*sp)[num]
-	}
-}
-
-func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
-	data := p.toRaceDetectData()
-	if data == nil {
-		return
-	}
-	sp := atomicLoadShadowPresence(&data.shadowPresence)
-	if sp == nil {
-		data.raceDetectHookAlloc(size)
-		sp = atomicLoadShadowPresence(&data.shadowPresence)
-	}
-	(*sp)[num] = 1
-}
-
-func (p presence) raceDetectHookClearPresent(num uint32) {
-	data := p.toRaceDetectData()
-	if data == nil {
-		return
-	}
-	sp := atomicLoadShadowPresence(&data.shadowPresence)
-	if sp != nil {
-		(*sp)[num] = 0
-
-	}
-}
-
-// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
-// shadowPresence bytes from src to lazy.
-func (p presence) raceDetectHookAllocAndCopy(q presence) {
-	sData := q.toRaceDetectData()
-	dData := p.toRaceDetectData()
-	if sData == nil {
-		return
-	}
-	srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
-	if srcSp == nil {
-		atomicStoreShadowPresence(&dData.shadowPresence, nil)
-		return
-	}
-	n := len(*srcSp)
-	dSlice := make([]byte, n)
-	atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
-	for i := 0; i < n; i++ {
-		dSlice[i] = (*srcSp)[i]
-	}
-}
-
-// raceDetectHookPresent is called by the generated file interface
-// (*proto.internalFuncs) Present to optionally read an unprotected
-// shadow bitmap when race detection is enabled. In regular code it is
-// a noop.
-func raceDetectHookPresent(field *uint32, num uint32) {
-	data := findPointerToRaceDetectData(field, num)
-	if data == nil {
-		return
-	}
-	sp := atomicLoadShadowPresence(&data.shadowPresence)
-	if sp != nil {
-		_ = (*sp)[num]
-	}
-}
-
-// raceDetectHookSetPresent is called by the generated file interface
-// (*proto.internalFuncs) SetPresent to optionally write an unprotected
-// shadow bitmap when race detection is enabled. In regular code it is
-// a noop.
-func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
-	data := findPointerToRaceDetectData(field, num)
-	if data == nil {
-		return
-	}
-	sp := atomicLoadShadowPresence(&data.shadowPresence)
-	if sp == nil {
-		data.raceDetectHookAlloc(size)
-		sp = atomicLoadShadowPresence(&data.shadowPresence)
-	}
-	(*sp)[num] = 1
-}
-
-// raceDetectHookClearPresent is called by the generated file interface
-// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
-// shadow bitmap when race detection is enabled. In regular code it is
-// a noop.
-func raceDetectHookClearPresent(field *uint32, num uint32) {
-	data := findPointerToRaceDetectData(field, num)
-	if data == nil {
-		return
-	}
-	sp := atomicLoadShadowPresence(&data.shadowPresence)
-	if sp != nil {
-		(*sp)[num] = 0
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index fe2c719c..f29e6a8f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -35,12 +35,6 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		}
 		return nil
 	}
-
-	var presence presence
-	if mi.presenceOffset.IsValid() {
-		presence = p.Apply(mi.presenceOffset).PresenceInfo()
-	}
-
 	if mi.extensionOffset.IsValid() {
 		e := p.Apply(mi.extensionOffset).Extensions()
 		if err := mi.isInitExtensions(e); err != nil {
@@ -51,33 +45,6 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		if !f.isRequired && f.funcs.isInit == nil {
 			continue
 		}
-
-		if f.presenceIndex != noPresence {
-			if !presence.Present(f.presenceIndex) {
-				if f.isRequired {
-					return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
-				}
-				continue
-			}
-			if f.funcs.isInit != nil {
-				f.mi.init()
-				if f.mi.needsInitCheck {
-					if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
-						lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
-						if !lazy.AllowedPartial() {
-							// Nothing to see here, it was checked on unmarshal
-							continue
-						}
-						mi.lazyUnmarshal(p, f.num)
-					}
-					if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
-						return err
-					}
-				}
-			}
-			continue
-		}
-
 		fptr := p.Apply(f.offset)
 		if f.isPointer && fptr.Elem().IsNil() {
 			if f.isRequired {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 0d5b546e..4bb0a7a2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,6 +67,7 @@ type lazyExtensionValue struct {
 	xi         *extensionFieldInfo
 	value      protoreflect.Value
 	b          []byte
+	fn         func() protoreflect.Value
 }
 
 type ExtensionField struct {
@@ -157,9 +158,10 @@ func (f *ExtensionField) lazyInit() {
 		}
 		f.lazy.value = val
 	} else {
-		panic("No support for lazy fns for ExtensionField")
+		f.lazy.value = f.lazy.fn()
 	}
 	f.lazy.xi = nil
+	f.lazy.fn = nil
 	f.lazy.b = nil
 	atomic.StoreUint32(&f.lazy.atomicOnce, 1)
 }
@@ -172,6 +174,13 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
 	f.lazy = nil
 }
 
+// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
+// This must not be called concurrently.
+func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
+	f.typ = t
+	f.lazy = &lazyExtensionValue{fn: fn}
+}
+
 // Value returns the value of the extension field.
 // This may be called concurrently.
 func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 7c1f66c8..78ee47e4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -65,9 +65,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
 			if err != nil {
 				return out, err
 			}
-			if cf.funcs.isInit == nil {
-				out.initialized = true
-			}
 			vi.Set(vw)
 			return out, nil
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
deleted file mode 100644
index 76818ea2..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"fmt"
-	"reflect"
-
-	"google.golang.org/protobuf/encoding/protowire"
-	"google.golang.org/protobuf/internal/errors"
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
-	mi := getMessageInfo(ft)
-	if mi == nil {
-		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
-	}
-	switch fd.Kind() {
-	case protoreflect.MessageKind:
-		return mi, pointerCoderFuncs{
-			size:      sizeOpaqueMessage,
-			marshal:   appendOpaqueMessage,
-			unmarshal: consumeOpaqueMessage,
-			isInit:    isInitOpaqueMessage,
-			merge:     mergeOpaqueMessage,
-		}
-	case protoreflect.GroupKind:
-		return mi, pointerCoderFuncs{
-			size:      sizeOpaqueGroup,
-			marshal:   appendOpaqueGroup,
-			unmarshal: consumeOpaqueGroup,
-			isInit:    isInitOpaqueMessage,
-			merge:     mergeOpaqueMessage,
-		}
-	}
-	panic("unexpected field kind")
-}
-
-func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
-}
-
-func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	mp := p.AtomicGetPointer()
-	calculatedSize := f.mi.sizePointer(mp, opts)
-	b = protowire.AppendVarint(b, f.wiretag)
-	b = protowire.AppendVarint(b, uint64(calculatedSize))
-	before := len(b)
-	b, err := f.mi.marshalAppendPointer(b, mp, opts)
-	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
-		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
-	}
-	return b, err
-}
-
-func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.BytesType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeBytes(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	mp := p.AtomicGetPointer()
-	if mp.IsNil() {
-		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
-	}
-	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
-	if err != nil {
-		return out, err
-	}
-	out.n = n
-	out.initialized = o.initialized
-	return out, nil
-}
-
-func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
-	mp := p.AtomicGetPointer()
-	if mp.IsNil() {
-		return nil
-	}
-	return f.mi.checkInitializedPointer(mp)
-}
-
-func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
-	dstmp := dst.AtomicGetPointer()
-	if dstmp.IsNil() {
-		dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
-	}
-	f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
-}
-
-func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
-}
-
-func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	b = protowire.AppendVarint(b, f.wiretag) // start group
-	b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
-	b = protowire.AppendVarint(b, f.wiretag+1) // end group
-	return b, err
-}
-
-func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.StartGroupType {
-		return out, errUnknown
-	}
-	mp := p.AtomicGetPointer()
-	if mp.IsNil() {
-		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
-	}
-	o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
-	return o, e
-}
-
-func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
-	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
-		panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
-	}
-	mt := ft.Elem().Elem() // *[]*T -> *T
-	mi := getMessageInfo(mt)
-	if mi == nil {
-		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
-	}
-	switch fd.Kind() {
-	case protoreflect.MessageKind:
-		return mi, pointerCoderFuncs{
-			size:      sizeOpaqueMessageSlice,
-			marshal:   appendOpaqueMessageSlice,
-			unmarshal: consumeOpaqueMessageSlice,
-			isInit:    isInitOpaqueMessageSlice,
-			merge:     mergeOpaqueMessageSlice,
-		}
-	case protoreflect.GroupKind:
-		return mi, pointerCoderFuncs{
-			size:      sizeOpaqueGroupSlice,
-			marshal:   appendOpaqueGroupSlice,
-			unmarshal: consumeOpaqueGroupSlice,
-			isInit:    isInitOpaqueMessageSlice,
-			merge:     mergeOpaqueMessageSlice,
-		}
-	}
-	panic("unexpected field kind")
-}
-
-func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.AtomicGetPointer().PointerSlice()
-	n := 0
-	for _, v := range s {
-		n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
-	}
-	return n
-}
-
-func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.AtomicGetPointer().PointerSlice()
-	var err error
-	for _, v := range s {
-		b = protowire.AppendVarint(b, f.wiretag)
-		siz := f.mi.sizePointer(v, opts)
-		b = protowire.AppendVarint(b, uint64(siz))
-		before := len(b)
-		b, err = f.mi.marshalAppendPointer(b, v, opts)
-		if err != nil {
-			return b, err
-		}
-		if measuredSize := len(b) - before; siz != measuredSize {
-			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
-		}
-	}
-	return b, nil
-}
-
-func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.BytesType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeBytes(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
-	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
-	if err != nil {
-		return out, err
-	}
-	sp := p.AtomicGetPointer()
-	if sp.IsNil() {
-		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
-	}
-	sp.AppendPointerSlice(mp)
-	out.n = n
-	out.initialized = o.initialized
-	return out, nil
-}
-
-func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
-	sp := p.AtomicGetPointer()
-	if sp.IsNil() {
-		return nil
-	}
-	s := sp.PointerSlice()
-	for _, v := range s {
-		if err := f.mi.checkInitializedPointer(v); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
-	ds := dst.AtomicGetPointer()
-	if ds.IsNil() {
-		ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
-	}
-	for _, sp := range src.AtomicGetPointer().PointerSlice() {
-		dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
-		f.mi.mergePointer(dm, sp, opts)
-		ds.AppendPointerSlice(dm)
-	}
-}
-
-func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.AtomicGetPointer().PointerSlice()
-	n := 0
-	for _, v := range s {
-		n += 2*f.tagsize + f.mi.sizePointer(v, opts)
-	}
-	return n
-}
-
-func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.AtomicGetPointer().PointerSlice()
-	var err error
-	for _, v := range s {
-		b = protowire.AppendVarint(b, f.wiretag) // start group
-		b, err = f.mi.marshalAppendPointer(b, v, opts)
-		if err != nil {
-			return b, err
-		}
-		b = protowire.AppendVarint(b, f.wiretag+1) // end group
-	}
-	return b, nil
-}
-
-func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.StartGroupType {
-		return out, errUnknown
-	}
-	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
-	out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
-	if err != nil {
-		return out, err
-	}
-	sp := p.AtomicGetPointer()
-	if sp.IsNil() {
-		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
-	}
-	sp.AppendPointerSlice(mp)
-	return out, err
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 2f7b363e..6b2fdbb7 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -32,10 +32,6 @@ type coderMessageInfo struct {
 	needsInitCheck     bool
 	isMessageSet       bool
 	numRequiredFields  uint8
-
-	lazyOffset     offset
-	presenceOffset offset
-	presenceSize   presenceSize
 }
 
 type coderFieldInfo struct {
@@ -49,19 +45,12 @@ type coderFieldInfo struct {
 	tagsize    int                      // size of the varint-encoded tag
 	isPointer  bool                     // true if IsNil may be called on the struct field
 	isRequired bool                     // true if field is required
-
-	isLazy        bool
-	presenceIndex uint32
 }
 
-const noPresence = 0xffffffff
-
 func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	mi.sizecacheOffset = invalidOffset
 	mi.unknownOffset = invalidOffset
 	mi.extensionOffset = invalidOffset
-	mi.lazyOffset = invalidOffset
-	mi.presenceOffset = si.presenceOffset
 
 	if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
 		mi.sizecacheOffset = si.sizecacheOffset
@@ -138,8 +127,6 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 			validation: newFieldValidationInfo(mi, si, fd, ft),
 			isPointer:  fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
 			isRequired: fd.Cardinality() == protoreflect.Required,
-
-			presenceIndex: noPresence,
 		}
 		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
 		mi.coderFields[cf.num] = cf
@@ -202,9 +189,6 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	if mi.methods.Merge == nil {
 		mi.methods.Merge = mi.merge
 	}
-	if mi.methods.Equal == nil {
-		mi.methods.Equal = equal
-	}
 }
 
 // getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
deleted file mode 100644
index 88c16ae5..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"fmt"
-	"reflect"
-	"sort"
-
-	"google.golang.org/protobuf/encoding/protowire"
-	"google.golang.org/protobuf/internal/encoding/messageset"
-	"google.golang.org/protobuf/internal/order"
-	"google.golang.org/protobuf/reflect/protoreflect"
-	piface "google.golang.org/protobuf/runtime/protoiface"
-)
-
-func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
-	mi.sizecacheOffset = si.sizecacheOffset
-	mi.unknownOffset = si.unknownOffset
-	mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
-	mi.extensionOffset = si.extensionOffset
-	mi.lazyOffset = si.lazyOffset
-	mi.presenceOffset = si.presenceOffset
-
-	mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
-	fields := mi.Desc.Fields()
-	for i := 0; i < fields.Len(); i++ {
-		fd := fields.Get(i)
-
-		fs := si.fieldsByNumber[fd.Number()]
-		if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
-			fs = si.oneofsByName[fd.ContainingOneof().Name()]
-		}
-		ft := fs.Type
-		var wiretag uint64
-		if !fd.IsPacked() {
-			wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
-		} else {
-			wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
-		}
-		var fieldOffset offset
-		var funcs pointerCoderFuncs
-		var childMessage *MessageInfo
-		switch {
-		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-			fieldOffset = offsetOf(fs, mi.Exporter)
-		case fd.IsWeak():
-			fieldOffset = si.weakOffset
-			funcs = makeWeakMessageFieldCoder(fd)
-		case fd.Message() != nil && !fd.IsMap():
-			fieldOffset = offsetOf(fs, mi.Exporter)
-			if fd.IsList() {
-				childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
-			} else {
-				childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
-			}
-		default:
-			fieldOffset = offsetOf(fs, mi.Exporter)
-			childMessage, funcs = fieldCoder(fd, ft)
-		}
-		cf := &coderFieldInfo{
-			num:        fd.Number(),
-			offset:     fieldOffset,
-			wiretag:    wiretag,
-			ft:         ft,
-			tagsize:    protowire.SizeVarint(wiretag),
-			funcs:      funcs,
-			mi:         childMessage,
-			validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
-			isPointer: (fd.Cardinality() == protoreflect.Repeated ||
-				fd.Kind() == protoreflect.MessageKind ||
-				fd.Kind() == protoreflect.GroupKind),
-			isRequired:    fd.Cardinality() == protoreflect.Required,
-			presenceIndex: noPresence,
-		}
-
-		// TODO: Use presence for all fields.
-		//
-		// In some cases, such as maps, presence means only "might be set" rather
-		// than "is definitely set", but every field should have a presence bit to
-		// permit us to skip over definitely-unset fields at marshal time.
-
-		var hasPresence bool
-		hasPresence, cf.isLazy = usePresenceForField(si, fd)
-
-		if hasPresence {
-			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
-		}
-
-		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
-		mi.coderFields[cf.num] = cf
-	}
-	for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
-		if od := oneofs.Get(i); !od.IsSynthetic() {
-			mi.initOneofFieldCoders(od, si.structInfo)
-		}
-	}
-	if messageset.IsMessageSet(mi.Desc) {
-		if !mi.extensionOffset.IsValid() {
-			panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
-		}
-		if !mi.unknownOffset.IsValid() {
-			panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
-		}
-		mi.isMessageSet = true
-	}
-	sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
-		return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
-	})
-
-	var maxDense protoreflect.FieldNumber
-	for _, cf := range mi.orderedCoderFields {
-		if cf.num >= 16 && cf.num >= 2*maxDense {
-			break
-		}
-		maxDense = cf.num
-	}
-	mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
-	for _, cf := range mi.orderedCoderFields {
-		if int(cf.num) > len(mi.denseCoderFields) {
-			break
-		}
-		mi.denseCoderFields[cf.num] = cf
-	}
-
-	// To preserve compatibility with historic wire output, marshal oneofs last.
-	if mi.Desc.Oneofs().Len() > 0 {
-		sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
-			fi := fields.ByNumber(mi.orderedCoderFields[i].num)
-			fj := fields.ByNumber(mi.orderedCoderFields[j].num)
-			return order.LegacyFieldOrder(fi, fj)
-		})
-	}
-
-	mi.needsInitCheck = needsInitCheck(mi.Desc)
-	if mi.methods.Marshal == nil && mi.methods.Size == nil {
-		mi.methods.Flags |= piface.SupportMarshalDeterministic
-		mi.methods.Marshal = mi.marshal
-		mi.methods.Size = mi.size
-	}
-	if mi.methods.Unmarshal == nil {
-		mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
-		mi.methods.Unmarshal = mi.unmarshal
-	}
-	if mi.methods.CheckInitialized == nil {
-		mi.methods.CheckInitialized = mi.checkInitialized
-	}
-	if mi.methods.Merge == nil {
-		mi.methods.Merge = mi.merge
-	}
-	if mi.methods.Equal == nil {
-		mi.methods.Equal = equal
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
new file mode 100644
index 00000000..145c577b
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
@@ -0,0 +1,210 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build purego || appengine
+// +build purego appengine
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/encoding/protowire"
+)
+
+func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
+	v := p.v.Elem().Int()
+	return f.tagsize + protowire.SizeVarint(uint64(v))
+}
+
+func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	v := p.v.Elem().Int()
+	b = protowire.AppendVarint(b, f.wiretag)
+	b = protowire.AppendVarint(b, uint64(v))
+	return b, nil
+}
+
+func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.VarintType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeVarint(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	p.v.Elem().SetInt(int64(v))
+	out.n = n
+	return out, nil
+}
+
+func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+	dst.v.Elem().Set(src.v.Elem())
+}
+
+var coderEnum = pointerCoderFuncs{
+	size:      sizeEnum,
+	marshal:   appendEnum,
+	unmarshal: consumeEnum,
+	merge:     mergeEnum,
+}
+
+func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	if p.v.Elem().Int() == 0 {
+		return 0
+	}
+	return sizeEnum(p, f, opts)
+}
+
+func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	if p.v.Elem().Int() == 0 {
+		return b, nil
+	}
+	return appendEnum(b, p, f, opts)
+}
+
+func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+	if src.v.Elem().Int() != 0 {
+		dst.v.Elem().Set(src.v.Elem())
+	}
+}
+
+var coderEnumNoZero = pointerCoderFuncs{
+	size:      sizeEnumNoZero,
+	marshal:   appendEnumNoZero,
+	unmarshal: consumeEnum,
+	merge:     mergeEnumNoZero,
+}
+
+func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return sizeEnum(pointer{p.v.Elem()}, f, opts)
+}
+
+func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	return appendEnum(b, pointer{p.v.Elem()}, f, opts)
+}
+
+func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.VarintType {
+		return out, errUnknown
+	}
+	if p.v.Elem().IsNil() {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
+	}
+	return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
+}
+
+func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+	if !src.v.Elem().IsNil() {
+		v := reflect.New(dst.v.Type().Elem().Elem())
+		v.Elem().Set(src.v.Elem().Elem())
+		dst.v.Elem().Set(v)
+	}
+}
+
+var coderEnumPtr = pointerCoderFuncs{
+	size:      sizeEnumPtr,
+	marshal:   appendEnumPtr,
+	unmarshal: consumeEnumPtr,
+	merge:     mergeEnumPtr,
+}
+
+func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.v.Elem()
+	for i, llen := 0, s.Len(); i < llen; i++ {
+		size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
+	}
+	return size
+}
+
+func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.v.Elem()
+	for i, llen := 0, s.Len(); i < llen; i++ {
+		b = protowire.AppendVarint(b, f.wiretag)
+		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+	}
+	return b, nil
+}
+
+func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	s := p.v.Elem()
+	if wtyp == protowire.BytesType {
+		b, n := protowire.ConsumeBytes(b)
+		if n < 0 {
+			return out, errDecode
+		}
+		for len(b) > 0 {
+			v, n := protowire.ConsumeVarint(b)
+			if n < 0 {
+				return out, errDecode
+			}
+			rv := reflect.New(s.Type().Elem()).Elem()
+			rv.SetInt(int64(v))
+			s.Set(reflect.Append(s, rv))
+			b = b[n:]
+		}
+		out.n = n
+		return out, nil
+	}
+	if wtyp != protowire.VarintType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeVarint(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	rv := reflect.New(s.Type().Elem()).Elem()
+	rv.SetInt(int64(v))
+	s.Set(reflect.Append(s, rv))
+	out.n = n
+	return out, nil
+}
+
+func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
+	dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
+}
+
+var coderEnumSlice = pointerCoderFuncs{
+	size:      sizeEnumSlice,
+	marshal:   appendEnumSlice,
+	unmarshal: consumeEnumSlice,
+	merge:     mergeEnumSlice,
+}
+
+func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.v.Elem()
+	llen := s.Len()
+	if llen == 0 {
+		return 0
+	}
+	n := 0
+	for i := 0; i < llen; i++ {
+		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+	}
+	return f.tagsize + protowire.SizeBytes(n)
+}
+
+func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.v.Elem()
+	llen := s.Len()
+	if llen == 0 {
+		return b, nil
+	}
+	b = protowire.AppendVarint(b, f.wiretag)
+	n := 0
+	for i := 0; i < llen; i++ {
+		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
+	}
+	b = protowire.AppendVarint(b, uint64(n))
+	for i := 0; i < llen; i++ {
+		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
+	}
+	return b, nil
+}
+
+var coderEnumPackedSlice = pointerCoderFuncs{
+	size:      sizeEnumPackedSlice,
+	marshal:   appendEnumPackedSlice,
+	unmarshal: consumeEnumSlice,
+	merge:     mergeEnumSlice,
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 077712c2..757642e2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !purego && !appengine
+// +build !purego,!appengine
+
 package impl
 
 // When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index f72ddd88..e06ece55 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
 	return protoreflect.ValueOfString(v.Convert(stringType).String())
 }
 func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
-	// protoreflect.Value.String never panics, so we go through an interface
+	// pref.Value.String never panics, so we go through an interface
 	// conversion here to check the type.
 	s := v.Interface().(string)
 	if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index e0dd21fa..cda0520c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -34,8 +34,6 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
 		AllowPartial:   true,
 		DiscardUnknown: o.DiscardUnknown(),
 		Resolver:       o.resolver,
-
-		NoLazyDecoding: o.NoLazyDecoding(),
 	}
 }
 
@@ -43,26 +41,13 @@ func (o unmarshalOptions) DiscardUnknown() bool {
 	return o.flags&protoiface.UnmarshalDiscardUnknown != 0
 }
 
-func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
-func (o unmarshalOptions) Validated() bool   { return o.flags&protoiface.UnmarshalValidated != 0 }
-func (o unmarshalOptions) NoLazyDecoding() bool {
-	return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
-}
-
-func (o unmarshalOptions) CanBeLazy() bool {
-	if o.resolver != protoregistry.GlobalTypes {
-		return false
-	}
-	// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
-	return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
+func (o unmarshalOptions) IsDefault() bool {
+	return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
 }
 
 var lazyUnmarshalOptions = unmarshalOptions{
 	resolver: protoregistry.GlobalTypes,
-
-	flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
-
-	depth: protowire.DefaultRecursionLimit,
+	depth:    protowire.DefaultRecursionLimit,
 }
 
 type unmarshalOutput struct {
@@ -109,30 +94,9 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 	if flags.ProtoLegacy && mi.isMessageSet {
 		return unmarshalMessageSet(mi, b, p, opts)
 	}
-
-	lazyDecoding := LazyEnabled() // default
-	if opts.NoLazyDecoding() {
-		lazyDecoding = false // explicitly disabled
-	}
-	if mi.lazyOffset.IsValid() && lazyDecoding {
-		return mi.unmarshalPointerLazy(b, p, groupTag, opts)
-	}
-	return mi.unmarshalPointerEager(b, p, groupTag, opts)
-}
-
-// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
-// The corresponding function for Lazy is in google_lazy.go.
-func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
-
 	initialized := true
 	var requiredMask uint64
 	var exts *map[int32]ExtensionField
-
-	var presence presence
-	if mi.presenceOffset.IsValid() {
-		presence = p.Apply(mi.presenceOffset).PresenceInfo()
-	}
-
 	start := len(b)
 	for len(b) > 0 {
 		// Parse the tag (field number and wire type).
@@ -190,11 +154,6 @@ func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag proto
 			if f.funcs.isInit != nil && !o.initialized {
 				initialized = false
 			}
-
-			if f.presenceIndex != noPresence {
-				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
-			}
-
 		default:
 			// Possible extension.
 			if exts == nil && mi.extensionOffset.IsValid() {
@@ -263,7 +222,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
 		return out, errUnknown
 	}
 	if flags.LazyUnmarshalExtensions {
-		if opts.CanBeLazy() && x.canLazy(xt) {
+		if opts.IsDefault() && x.canLazy(xt) {
 			out, valid := skipExtension(b, xi, num, wtyp, opts)
 			switch valid {
 			case ValidationValid:
@@ -311,13 +270,6 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
 		if n < 0 {
 			return out, ValidationUnknown
 		}
-
-		if opts.Validated() {
-			out.initialized = true
-			out.n = n
-			return out, ValidationValid
-		}
-
 		out, st := xi.validation.mi.validate(v, 0, opts)
 		out.n = n
 		return out, st
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index b2e21229..febd2122 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,8 +10,7 @@ import (
 	"sync/atomic"
 
 	"google.golang.org/protobuf/internal/flags"
-	"google.golang.org/protobuf/internal/protolazy"
-	"google.golang.org/protobuf/proto"
+	proto "google.golang.org/protobuf/proto"
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
 
@@ -72,39 +71,11 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 		e := p.Apply(mi.extensionOffset).Extensions()
 		size += mi.sizeExtensions(e, opts)
 	}
-
-	var lazy **protolazy.XXX_lazyUnmarshalInfo
-	var presence presence
-	if mi.presenceOffset.IsValid() {
-		presence = p.Apply(mi.presenceOffset).PresenceInfo()
-		if mi.lazyOffset.IsValid() {
-			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
-		}
-	}
-
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.size == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
-
-		if f.presenceIndex != noPresence {
-			if !presence.Present(f.presenceIndex) {
-				continue
-			}
-
-			if f.isLazy && fptr.AtomicGetPointer().IsNil() {
-				if lazyFields(opts) {
-					size += (*lazy).SizeField(uint32(f.num))
-					continue
-				} else {
-					mi.lazyUnmarshal(p, f.num)
-				}
-			}
-			size += f.funcs.size(fptr, f, opts)
-			continue
-		}
-
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -163,52 +134,11 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
 			return b, err
 		}
 	}
-
-	var lazy **protolazy.XXX_lazyUnmarshalInfo
-	var presence presence
-	if mi.presenceOffset.IsValid() {
-		presence = p.Apply(mi.presenceOffset).PresenceInfo()
-		if mi.lazyOffset.IsValid() {
-			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
-		}
-	}
-
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.marshal == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
-
-		if f.presenceIndex != noPresence {
-			if !presence.Present(f.presenceIndex) {
-				continue
-			}
-			if f.isLazy {
-				// Be careful, this field needs to be read atomically, like for a get
-				if f.isPointer && fptr.AtomicGetPointer().IsNil() {
-					if lazyFields(opts) {
-						b, _ = (*lazy).AppendField(b, uint32(f.num))
-						continue
-					} else {
-						mi.lazyUnmarshal(p, f.num)
-					}
-				}
-
-				b, err = f.funcs.marshal(b, fptr, f, opts)
-				if err != nil {
-					return b, err
-				}
-				continue
-			} else if f.isPointer && fptr.Elem().IsNil() {
-				continue
-			}
-			b, err = f.funcs.marshal(b, fptr, f, opts)
-			if err != nil {
-				return b, err
-			}
-			continue
-		}
-
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -233,14 +163,6 @@ func fullyLazyExtensions(opts marshalOptions) bool {
 	return opts.flags&piface.MarshalDeterministic == 0
 }
 
-// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
-func lazyFields(opts marshalOptions) bool {
-	// When deterministic marshaling is requested, force an unmarshal for lazy
-	// fields to produce a deterministic result, instead of passing through
-	// bytes lazily that may or may not match what Go Protobuf would produce.
-	return opts.flags&piface.MarshalDeterministic == 0
-}
-
 func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
 	if ext == nil {
 		return 0
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
deleted file mode 100644
index 9f6c32a7..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/equal.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"bytes"
-
-	"google.golang.org/protobuf/encoding/protowire"
-	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/runtime/protoiface"
-)
-
-func equal(in protoiface.EqualInput) protoiface.EqualOutput {
-	return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
-}
-
-// equalMessage is a fast-path variant of protoreflect.equalMessage.
-// It takes advantage of the internal messageState type to avoid
-// unnecessary allocations, type assertions.
-func equalMessage(mx, my protoreflect.Message) bool {
-	if mx == nil || my == nil {
-		return mx == my
-	}
-	if mx.Descriptor() != my.Descriptor() {
-		return false
-	}
-
-	msx, ok := mx.(*messageState)
-	if !ok {
-		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
-	}
-	msy, ok := my.(*messageState)
-	if !ok {
-		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
-	}
-
-	mi := msx.messageInfo()
-	miy := msy.messageInfo()
-	if mi != miy {
-		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
-	}
-	mi.init()
-	// Compares regular fields
-	// Modified Message.Range code that compares two messages of the same type
-	// while going over the fields.
-	for _, ri := range mi.rangeInfos {
-		var fd protoreflect.FieldDescriptor
-		var vx, vy protoreflect.Value
-
-		switch ri := ri.(type) {
-		case *fieldInfo:
-			hx := ri.has(msx.pointer())
-			hy := ri.has(msy.pointer())
-			if hx != hy {
-				return false
-			}
-			if !hx {
-				continue
-			}
-			fd = ri.fieldDesc
-			vx = ri.get(msx.pointer())
-			vy = ri.get(msy.pointer())
-		case *oneofInfo:
-			fnx := ri.which(msx.pointer())
-			fny := ri.which(msy.pointer())
-			if fnx != fny {
-				return false
-			}
-			if fnx <= 0 {
-				continue
-			}
-			fi := mi.fields[fnx]
-			fd = fi.fieldDesc
-			vx = fi.get(msx.pointer())
-			vy = fi.get(msy.pointer())
-		}
-
-		if !equalValue(fd, vx, vy) {
-			return false
-		}
-	}
-
-	// Compare extensions.
-	// This is more complicated because mx or my could have empty/nil extension maps,
-	// however some populated extension map values are equal to nil extension maps.
-	emx := mi.extensionMap(msx.pointer())
-	emy := mi.extensionMap(msy.pointer())
-	if emx != nil {
-		for k, x := range *emx {
-			xd := x.Type().TypeDescriptor()
-			xv := x.Value()
-			var y ExtensionField
-			ok := false
-			if emy != nil {
-				y, ok = (*emy)[k]
-			}
-			// We need to treat empty lists as equal to nil values
-			if emy == nil || !ok {
-				if xd.IsList() && xv.List().Len() == 0 {
-					continue
-				}
-				return false
-			}
-
-			if !equalValue(xd, xv, y.Value()) {
-				return false
-			}
-		}
-	}
-	if emy != nil {
-		// emy may have extensions emx does not have, need to check them as well
-		for k, y := range *emy {
-			if emx != nil {
-				// emx has the field, so we already checked it
-				if _, ok := (*emx)[k]; ok {
-					continue
-				}
-			}
-			// Empty lists are equal to nil
-			if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
-				continue
-			}
-
-			// Cant be equal if the extension is populated
-			return false
-		}
-	}
-
-	return equalUnknown(mx.GetUnknown(), my.GetUnknown())
-}
-
-func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
-	// slow path
-	if fd.Kind() != protoreflect.MessageKind {
-		return vx.Equal(vy)
-	}
-
-	// fast path special cases
-	if fd.IsMap() {
-		if fd.MapValue().Kind() == protoreflect.MessageKind {
-			return equalMessageMap(vx.Map(), vy.Map())
-		}
-		return vx.Equal(vy)
-	}
-
-	if fd.IsList() {
-		return equalMessageList(vx.List(), vy.List())
-	}
-
-	return equalMessage(vx.Message(), vy.Message())
-}
-
-// Mostly copied from protoreflect.equalMap.
-// This variant only works for messages as map types.
-// All other map types should be handled via Value.Equal.
-func equalMessageMap(mx, my protoreflect.Map) bool {
-	if mx.Len() != my.Len() {
-		return false
-	}
-	equal := true
-	mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
-		if !my.Has(k) {
-			equal = false
-			return false
-		}
-		vy := my.Get(k)
-		equal = equalMessage(vx.Message(), vy.Message())
-		return equal
-	})
-	return equal
-}
-
-// Mostly copied from protoreflect.equalList.
-// The only change is the usage of equalImpl instead of protoreflect.equalValue.
-func equalMessageList(lx, ly protoreflect.List) bool {
-	if lx.Len() != ly.Len() {
-		return false
-	}
-	for i := 0; i < lx.Len(); i++ {
-		// We only operate on messages here since equalImpl will not call us in any other case.
-		if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
-			return false
-		}
-	}
-	return true
-}
-
-// equalUnknown compares unknown fields by direct comparison on the raw bytes
-// of each individual field number.
-// Copied from protoreflect.equalUnknown.
-func equalUnknown(x, y protoreflect.RawFields) bool {
-	if len(x) != len(y) {
-		return false
-	}
-	if bytes.Equal([]byte(x), []byte(y)) {
-		return true
-	}
-
-	mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
-	my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
-	for len(x) > 0 {
-		fnum, _, n := protowire.ConsumeField(x)
-		mx[fnum] = append(mx[fnum], x[:n]...)
-		x = x[n:]
-	}
-	for len(y) > 0 {
-		fnum, _, n := protowire.ConsumeField(y)
-		my[fnum] = append(my[fnum], y[:n]...)
-		y = y[n:]
-	}
-	if len(mx) != len(my) {
-		return false
-	}
-
-	for k, v1 := range mx {
-		if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
-			return false
-		}
-	}
-
-	return true
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
deleted file mode 100644
index e8fb6c35..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/lazy.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"fmt"
-	"math/bits"
-	"os"
-	"reflect"
-	"sort"
-	"sync/atomic"
-
-	"google.golang.org/protobuf/encoding/protowire"
-	"google.golang.org/protobuf/internal/errors"
-	"google.golang.org/protobuf/internal/protolazy"
-	"google.golang.org/protobuf/reflect/protoreflect"
-	preg "google.golang.org/protobuf/reflect/protoregistry"
-	piface "google.golang.org/protobuf/runtime/protoiface"
-)
-
-var enableLazy int32 = func() int32 {
-	if os.Getenv("GOPROTODEBUG") == "nolazy" {
-		return 0
-	}
-	return 1
-}()
-
-// EnableLazyUnmarshal enables lazy unmarshaling.
-func EnableLazyUnmarshal(enable bool) {
-	if enable {
-		atomic.StoreInt32(&enableLazy, 1)
-		return
-	}
-	atomic.StoreInt32(&enableLazy, 0)
-}
-
-// LazyEnabled reports whether lazy unmarshalling is currently enabled.
-func LazyEnabled() bool {
-	return atomic.LoadInt32(&enableLazy) != 0
-}
-
-// UnmarshalField unmarshals a field in a message.
-func UnmarshalField(m interface{}, num protowire.Number) {
-	switch m := m.(type) {
-	case *messageState:
-		m.messageInfo().lazyUnmarshal(m.pointer(), num)
-	case *messageReflectWrapper:
-		m.messageInfo().lazyUnmarshal(m.pointer(), num)
-	default:
-		panic(fmt.Sprintf("unsupported wrapper type %T", m))
-	}
-}
-
-func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
-	var f *coderFieldInfo
-	if int(num) < len(mi.denseCoderFields) {
-		f = mi.denseCoderFields[num]
-	} else {
-		f = mi.coderFields[num]
-	}
-	if f == nil {
-		panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
-	}
-	lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
-	start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
-	if !found && multipleEntries == nil {
-		panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
-	}
-	// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
-	// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
-	fp := pointerOfValue(reflect.New(f.ft))
-	if multipleEntries != nil {
-		for _, entry := range multipleEntries {
-			mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
-		}
-	} else {
-		mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
-	}
-	p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
-}
-
-func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
-	opts := lazyUnmarshalOptions
-	opts.flags |= flags
-	for len(b) > 0 {
-		// Parse the tag (field number and wire type).
-		var tag uint64
-		if b[0] < 0x80 {
-			tag = uint64(b[0])
-			b = b[1:]
-		} else if len(b) >= 2 && b[1] < 128 {
-			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
-			b = b[2:]
-		} else {
-			var n int
-			tag, n = protowire.ConsumeVarint(b)
-			if n < 0 {
-				return errors.New("invalid wire data")
-			}
-			b = b[n:]
-		}
-		var num protowire.Number
-		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
-			return errors.New("invalid wire data")
-		} else {
-			num = protowire.Number(n)
-		}
-		wtyp := protowire.Type(tag & 7)
-		if num == f.num {
-			o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
-			if err == nil {
-				b = b[o.n:]
-				continue
-			}
-			if err != errUnknown {
-				return err
-			}
-		}
-		n := protowire.ConsumeFieldValue(num, wtyp, b)
-		if n < 0 {
-			return errors.New("invalid wire data")
-		}
-		b = b[n:]
-	}
-	return nil
-}
-
-func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
-	fmi := f.validation.mi
-	if fmi == nil {
-		fd := mi.Desc.Fields().ByNumber(f.num)
-		if fd == nil || !fd.IsWeak() {
-			return out, ValidationUnknown
-		}
-		messageName := fd.Message().FullName()
-		messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
-		if err != nil {
-			return out, ValidationUnknown
-		}
-		var ok bool
-		fmi, ok = messageType.(*MessageInfo)
-		if !ok {
-			return out, ValidationUnknown
-		}
-	}
-	fmi.init()
-	switch f.validation.typ {
-	case validationTypeMessage:
-		if wtyp != protowire.BytesType {
-			return out, ValidationWrongWireType
-		}
-		v, n := protowire.ConsumeBytes(b)
-		if n < 0 {
-			return out, ValidationInvalid
-		}
-		out, st := fmi.validate(v, 0, opts)
-		out.n = n
-		return out, st
-	case validationTypeGroup:
-		if wtyp != protowire.StartGroupType {
-			return out, ValidationWrongWireType
-		}
-		out, st := fmi.validate(b, f.num, opts)
-		return out, st
-	default:
-		return out, ValidationUnknown
-	}
-}
-
-// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
-// specifically handles lazy unmarshalling.  it expects lazyOffset and
-// presenceOffset to both be valid.
-func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	initialized := true
-	var requiredMask uint64
-	var lazy **protolazy.XXX_lazyUnmarshalInfo
-	var presence presence
-	var lazyIndex []protolazy.IndexEntry
-	var lastNum protowire.Number
-	outOfOrder := false
-	lazyDecode := false
-	presence = p.Apply(mi.presenceOffset).PresenceInfo()
-	lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
-	if !presence.AnyPresent(mi.presenceSize) {
-		if opts.CanBeLazy() {
-			// If the message contains existing data, we need to merge into it.
-			// Lazy unmarshaling doesn't merge, so only enable it when the
-			// message is empty (has no presence bitmap).
-			lazyDecode = true
-			if *lazy == nil {
-				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
-			}
-			(*lazy).SetUnmarshalFlags(opts.flags)
-			if !opts.AliasBuffer() {
-				// Make a copy of the buffer for lazy unmarshaling.
-				// Set the AliasBuffer flag so recursive unmarshal
-				// operations reuse the copy.
-				b = append([]byte{}, b...)
-				opts.flags |= piface.UnmarshalAliasBuffer
-			}
-			(*lazy).SetBuffer(b)
-		}
-	}
-	// Track special handling of lazy fields.
-	//
-	// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
-	// In the event that validation for a field fails, this map tracks handling of the field.
-	type lazyAction uint8
-	const (
-		lazyValidateOnly   lazyAction = iota // validate the field only
-		lazyUnmarshalNow                     // eagerly unmarshal the field
-		lazyUnmarshalLater                   // unmarshal the field after the message is fully processed
-	)
-	var lazyFields map[*coderFieldInfo]lazyAction
-	var exts *map[int32]ExtensionField
-	start := len(b)
-	pos := 0
-	for len(b) > 0 {
-		// Parse the tag (field number and wire type).
-		var tag uint64
-		if b[0] < 0x80 {
-			tag = uint64(b[0])
-			b = b[1:]
-		} else if len(b) >= 2 && b[1] < 128 {
-			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
-			b = b[2:]
-		} else {
-			var n int
-			tag, n = protowire.ConsumeVarint(b)
-			if n < 0 {
-				return out, errDecode
-			}
-			b = b[n:]
-		}
-		var num protowire.Number
-		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
-			return out, errors.New("invalid field number")
-		} else {
-			num = protowire.Number(n)
-		}
-		wtyp := protowire.Type(tag & 7)
-
-		if wtyp == protowire.EndGroupType {
-			if num != groupTag {
-				return out, errors.New("mismatching end group marker")
-			}
-			groupTag = 0
-			break
-		}
-
-		var f *coderFieldInfo
-		if int(num) < len(mi.denseCoderFields) {
-			f = mi.denseCoderFields[num]
-		} else {
-			f = mi.coderFields[num]
-		}
-		var n int
-		err := errUnknown
-		discardUnknown := false
-	Field:
-		switch {
-		case f != nil:
-			if f.funcs.unmarshal == nil {
-				break
-			}
-			if f.isLazy && lazyDecode {
-				switch {
-				case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
-					// Attempt to validate this field and leave it for later lazy unmarshaling.
-					o, valid := mi.skipField(b, f, wtyp, opts)
-					switch valid {
-					case ValidationValid:
-						// Skip over the valid field and continue.
-						err = nil
-						presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
-						requiredMask |= f.validation.requiredBit
-						if !o.initialized {
-							initialized = false
-						}
-						n = o.n
-						break Field
-					case ValidationInvalid:
-						return out, errors.New("invalid proto wire format")
-					case ValidationWrongWireType:
-						break Field
-					case ValidationUnknown:
-						if lazyFields == nil {
-							lazyFields = make(map[*coderFieldInfo]lazyAction)
-						}
-						if presence.Present(f.presenceIndex) {
-							// We were unable to determine if the field is valid or not,
-							// and we've already skipped over at least one instance of this
-							// field. Clear the presence bit (so if we stop decoding early,
-							// we don't leave a partially-initialized field around) and flag
-							// the field for unmarshaling before we return.
-							presence.ClearPresent(f.presenceIndex)
-							lazyFields[f] = lazyUnmarshalLater
-							discardUnknown = true
-							break Field
-						} else {
-							// We were unable to determine if the field is valid or not,
-							// but this is the first time we've seen it. Flag it as needing
-							// eager unmarshaling and fall through to the eager unmarshal case below.
-							lazyFields[f] = lazyUnmarshalNow
-						}
-					}
-				case lazyFields[f] == lazyUnmarshalLater:
-					// This field will be unmarshaled in a separate pass below.
-					// Skip over it here.
-					discardUnknown = true
-					break Field
-				default:
-					// Eagerly unmarshal the field.
-				}
-			}
-			if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
-				if p.Apply(f.offset).AtomicGetPointer().IsNil() {
-					mi.lazyUnmarshal(p, f.num)
-				}
-			}
-			var o unmarshalOutput
-			o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
-			n = o.n
-			if err != nil {
-				break
-			}
-			requiredMask |= f.validation.requiredBit
-			if f.funcs.isInit != nil && !o.initialized {
-				initialized = false
-			}
-			if f.presenceIndex != noPresence {
-				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
-			}
-		default:
-			// Possible extension.
-			if exts == nil && mi.extensionOffset.IsValid() {
-				exts = p.Apply(mi.extensionOffset).Extensions()
-				if *exts == nil {
-					*exts = make(map[int32]ExtensionField)
-				}
-			}
-			if exts == nil {
-				break
-			}
-			var o unmarshalOutput
-			o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
-			if err != nil {
-				break
-			}
-			n = o.n
-			if !o.initialized {
-				initialized = false
-			}
-		}
-		if err != nil {
-			if err != errUnknown {
-				return out, err
-			}
-			n = protowire.ConsumeFieldValue(num, wtyp, b)
-			if n < 0 {
-				return out, errDecode
-			}
-			if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
-				u := mi.mutableUnknownBytes(p)
-				*u = protowire.AppendTag(*u, num, wtyp)
-				*u = append(*u, b[:n]...)
-			}
-		}
-		b = b[n:]
-		end := start - len(b)
-		if lazyDecode && f != nil && f.isLazy {
-			if num != lastNum {
-				lazyIndex = append(lazyIndex, protolazy.IndexEntry{
-					FieldNum: uint32(num),
-					Start:    uint32(pos),
-					End:      uint32(end),
-				})
-			} else {
-				i := len(lazyIndex) - 1
-				lazyIndex[i].End = uint32(end)
-				lazyIndex[i].MultipleContiguous = true
-			}
-		}
-		if num < lastNum {
-			outOfOrder = true
-		}
-		pos = end
-		lastNum = num
-	}
-	if groupTag != 0 {
-		return out, errors.New("missing end group marker")
-	}
-	if lazyFields != nil {
-		// Some fields failed validation, and now need to be unmarshaled.
-		for f, action := range lazyFields {
-			if action != lazyUnmarshalLater {
-				continue
-			}
-			initialized = false
-			if *lazy == nil {
-				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
-			}
-			if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
-				return out, err
-			}
-			presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
-		}
-	}
-	if lazyDecode {
-		if outOfOrder {
-			sort.Slice(lazyIndex, func(i, j int) bool {
-				return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
-					(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
-						lazyIndex[i].Start < lazyIndex[j].Start)
-			})
-		}
-		if *lazy == nil {
-			*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
-		}
-
-		(*lazy).SetIndex(lazyIndex)
-	}
-	if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
-		initialized = false
-	}
-	if initialized {
-		out.initialized = true
-	}
-	out.n = start - len(b)
-	return out, nil
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index b6849d66..6e8677ee 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,7 +160,6 @@ func (x placeholderExtension) HasPresence() bool
 func (x placeholderExtension) HasOptionalKeyword() bool                           { return false }
 func (x placeholderExtension) IsExtension() bool                                  { return true }
 func (x placeholderExtension) IsWeak() bool                                       { return false }
-func (x placeholderExtension) IsLazy() bool                                       { return false }
 func (x placeholderExtension) IsPacked() bool                                     { return false }
 func (x placeholderExtension) IsList() bool                                       { return false }
 func (x placeholderExtension) IsMap() bool                                        { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index 8ffdce67..7e65f64f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -41,38 +41,11 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
 	if src.IsNil() {
 		return
 	}
-
-	var presenceSrc presence
-	var presenceDst presence
-	if mi.presenceOffset.IsValid() {
-		presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
-		presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
-	}
-
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.merge == nil {
 			continue
 		}
 		sfptr := src.Apply(f.offset)
-
-		if f.presenceIndex != noPresence {
-			if !presenceSrc.Present(f.presenceIndex) {
-				continue
-			}
-			dfptr := dst.Apply(f.offset)
-			if f.isLazy {
-				if sfptr.AtomicGetPointer().IsNil() {
-					mi.lazyUnmarshal(src, f.num)
-				}
-				if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
-					mi.lazyUnmarshal(dst, f.num)
-				}
-			}
-			f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
-			presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
-			continue
-		}
-
 		if f.isPointer && sfptr.Elem().IsNil() {
 			continue
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index fa10a0f5..019399d4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -30,8 +30,8 @@ type MessageInfo struct {
 	// Desc is the underlying message descriptor type and must be populated.
 	Desc protoreflect.MessageDescriptor
 
-	// Deprecated: Exporter will be removed the next time we bump
-	// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
+	// Exporter must be provided in a purego environment in order to provide
+	// access to unexported fields.
 	Exporter exporter
 
 	// OneofWrappers is list of pointers to oneof wrapper struct types.
@@ -79,9 +79,6 @@ func (mi *MessageInfo) initOnce() {
 	if mi.initDone == 1 {
 		return
 	}
-	if opaqueInitHook(mi) {
-		return
-	}
 
 	t := mi.GoReflectType
 	if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -136,9 +133,6 @@ type structInfo struct {
 	extensionOffset offset
 	extensionType   reflect.Type
 
-	lazyOffset     offset
-	presenceOffset offset
-
 	fieldsByNumber        map[protoreflect.FieldNumber]reflect.StructField
 	oneofsByName          map[protoreflect.Name]reflect.StructField
 	oneofWrappersByType   map[reflect.Type]protoreflect.FieldNumber
@@ -151,8 +145,6 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
 		weakOffset:      invalidOffset,
 		unknownOffset:   invalidOffset,
 		extensionOffset: invalidOffset,
-		lazyOffset:      invalidOffset,
-		presenceOffset:  invalidOffset,
 
 		fieldsByNumber:        map[protoreflect.FieldNumber]reflect.StructField{},
 		oneofsByName:          map[protoreflect.Name]reflect.StructField{},
@@ -183,10 +175,6 @@ fieldLoop:
 				si.extensionOffset = offsetOf(f, mi.Exporter)
 				si.extensionType = f.Type
 			}
-		case "lazyFields", "XXX_lazyUnmarshalInfo":
-			si.lazyOffset = offsetOf(f, mi.Exporter)
-		case "XXX_presence":
-			si.presenceOffset = offsetOf(f, mi.Exporter)
 		default:
 			for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
 				if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
deleted file mode 100644
index d407dd79..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"fmt"
-	"math"
-	"reflect"
-	"strings"
-	"sync/atomic"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type opaqueStructInfo struct {
-	structInfo
-}
-
-// isOpaque determines whether a protobuf message type is on the Opaque API.  It
-// checks whether the type is a Go struct that protoc-gen-go would generate.
-//
-// This function only detects newly generated messages from the v2
-// implementation of protoc-gen-go. It is unable to classify generated messages
-// that are too old or those that are generated by a different generator
-// such as protoc-gen-gogo.
-func isOpaque(t reflect.Type) bool {
-	// The current detection mechanism is to simply check the first field
-	// for a struct tag with the "protogen" key.
-	if t.Kind() == reflect.Struct && t.NumField() > 0 {
-		pgt := t.Field(0).Tag.Get("protogen")
-		return strings.HasPrefix(pgt, "opaque.")
-	}
-	return false
-}
-
-func opaqueInitHook(mi *MessageInfo) bool {
-	mt := mi.GoReflectType.Elem()
-	si := opaqueStructInfo{
-		structInfo: mi.makeStructInfo(mt),
-	}
-
-	if !isOpaque(mt) {
-		return false
-	}
-
-	defer atomic.StoreUint32(&mi.initDone, 1)
-
-	mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
-	fds := mi.Desc.Fields()
-	for i := 0; i < fds.Len(); i++ {
-		fd := fds.Get(i)
-		fs := si.fieldsByNumber[fd.Number()]
-		var fi fieldInfo
-		usePresence, _ := usePresenceForField(si, fd)
-
-		switch {
-		case fd.IsWeak():
-			// Weak fields are no different for opaque.
-			fi = fieldInfoForWeakMessage(fd, si.weakOffset)
-		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-			// Oneofs are no different for opaque.
-			fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
-		case fd.IsMap():
-			fi = mi.fieldInfoForMapOpaque(si, fd, fs)
-		case fd.IsList() && fd.Message() == nil && usePresence:
-			fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
-		case fd.IsList() && fd.Message() == nil:
-			// Proto3 lists without presence can use same access methods as open
-			fi = fieldInfoForList(fd, fs, mi.Exporter)
-		case fd.IsList() && usePresence:
-			fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
-		case fd.IsList():
-			// Proto3 opaque messages that does not need presence bitmap.
-			// Different representation than open struct, but same logic
-			fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
-		case fd.Message() != nil && usePresence:
-			fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
-		case fd.Message() != nil:
-			// Proto3 messages without presence can use same access methods as open
-			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
-		default:
-			fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
-		}
-		mi.fields[fd.Number()] = &fi
-	}
-	mi.oneofs = map[protoreflect.Name]*oneofInfo{}
-	for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
-		od := mi.Desc.Oneofs().Get(i)
-		if !od.IsSynthetic() {
-			mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter)
-		}
-	}
-
-	mi.denseFields = make([]*fieldInfo, fds.Len()*2)
-	for i := 0; i < fds.Len(); i++ {
-		if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
-			mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
-		}
-	}
-
-	for i := 0; i < fds.Len(); {
-		fd := fds.Get(i)
-		if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
-			mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
-			i += od.Fields().Len()
-		} else {
-			mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
-			i++
-		}
-	}
-
-	mi.makeExtensionFieldsFunc(mt, si.structInfo)
-	mi.makeUnknownFieldsFunc(mt, si.structInfo)
-	mi.makeOpaqueCoderMethods(mt, si)
-	mi.makeFieldTypes(si.structInfo)
-
-	return true
-}
-
-func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
-	ft := fs.Type
-	if ft.Kind() != reflect.Map {
-		panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
-	}
-	fieldOffset := offsetOf(fs, mi.Exporter)
-	conv := NewConverter(ft, fd)
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			// Don't bother checking presence bits, since we need to
-			// look at the map length even if the presence bit is set.
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			return rv.Len() > 0
-		},
-		clear: func(p pointer) {
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			rv.Set(reflect.Zero(rv.Type()))
-		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if rv.Len() == 0 {
-				return conv.Zero()
-			}
-			return conv.PBValueOf(rv)
-		},
-		set: func(p pointer, v protoreflect.Value) {
-			pv := conv.GoValueOf(v)
-			if pv.IsNil() {
-				panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			rv.Set(pv)
-		},
-		mutable: func(p pointer) protoreflect.Value {
-			v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if v.IsNil() {
-				v.Set(reflect.MakeMap(fs.Type))
-			}
-			return conv.PBValueOf(v)
-		},
-		newField: func() protoreflect.Value {
-			return conv.New()
-		},
-	}
-}
-
-func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
-	ft := fs.Type
-	if ft.Kind() != reflect.Slice {
-		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
-	}
-	conv := NewConverter(reflect.PtrTo(ft), fd)
-	fieldOffset := offsetOf(fs, mi.Exporter)
-	index, _ := presenceIndex(mi.Desc, fd)
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			return rv.Len() > 0
-		},
-		clear: func(p pointer) {
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			rv.Set(reflect.Zero(rv.Type()))
-		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
-			if rv.Elem().Len() == 0 {
-				return conv.Zero()
-			}
-			return conv.PBValueOf(rv)
-		},
-		set: func(p pointer, v protoreflect.Value) {
-			pv := conv.GoValueOf(v)
-			if pv.IsNil() {
-				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
-			}
-			mi.setPresent(p, index)
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			rv.Set(pv.Elem())
-		},
-		mutable: func(p pointer) protoreflect.Value {
-			mi.setPresent(p, index)
-			return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
-		},
-		newField: func() protoreflect.Value {
-			return conv.New()
-		},
-	}
-}
-
-func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
-	ft := fs.Type
-	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
-		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
-	}
-	conv := NewConverter(ft, fd)
-	fieldOffset := offsetOf(fs, mi.Exporter)
-	index, _ := presenceIndex(mi.Desc, fd)
-	fieldNumber := fd.Number()
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			if !mi.present(p, index) {
-				return false
-			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
-				// Lazily unmarshal this field.
-				mi.lazyUnmarshal(p, fieldNumber)
-				sp = p.Apply(fieldOffset).AtomicGetPointer()
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			return rv.Elem().Len() > 0
-		},
-		clear: func(p pointer) {
-			fp := p.Apply(fieldOffset)
-			sp := fp.AtomicGetPointer()
-			if sp.IsNil() {
-				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
-				mi.setPresent(p, index)
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
-		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			if !mi.present(p, index) {
-				return conv.Zero()
-			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
-				// Lazily unmarshal this field.
-				mi.lazyUnmarshal(p, fieldNumber)
-				sp = p.Apply(fieldOffset).AtomicGetPointer()
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			if rv.Elem().Len() == 0 {
-				return conv.Zero()
-			}
-			return conv.PBValueOf(rv)
-		},
-		set: func(p pointer, v protoreflect.Value) {
-			fp := p.Apply(fieldOffset)
-			sp := fp.AtomicGetPointer()
-			if sp.IsNil() {
-				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
-				mi.setPresent(p, index)
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			val := conv.GoValueOf(v)
-			if val.IsNil() {
-				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
-			} else {
-				rv.Elem().Set(val.Elem())
-			}
-		},
-		mutable: func(p pointer) protoreflect.Value {
-			fp := p.Apply(fieldOffset)
-			sp := fp.AtomicGetPointer()
-			if sp.IsNil() {
-				if mi.present(p, index) {
-					// Lazily unmarshal this field.
-					mi.lazyUnmarshal(p, fieldNumber)
-					sp = p.Apply(fieldOffset).AtomicGetPointer()
-				} else {
-					sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
-					mi.setPresent(p, index)
-				}
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			return conv.PBValueOf(rv)
-		},
-		newField: func() protoreflect.Value {
-			return conv.New()
-		},
-	}
-}
-
-func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
-	ft := fs.Type
-	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
-		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
-	}
-	conv := NewConverter(ft, fd)
-	fieldOffset := offsetOf(fs, mi.Exporter)
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
-				return false
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			return rv.Elem().Len() > 0
-		},
-		clear: func(p pointer) {
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if !sp.IsNil() {
-				rv := sp.AsValueOf(fs.Type.Elem())
-				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
-			}
-		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
-				return conv.Zero()
-			}
-			rv := sp.AsValueOf(fs.Type.Elem())
-			if rv.Elem().Len() == 0 {
-				return conv.Zero()
-			}
-			return conv.PBValueOf(rv)
-		},
-		set: func(p pointer, v protoreflect.Value) {
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if rv.IsNil() {
-				rv.Set(reflect.New(fs.Type.Elem()))
-			}
-			val := conv.GoValueOf(v)
-			if val.IsNil() {
-				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
-			} else {
-				rv.Elem().Set(val.Elem())
-			}
-		},
-		mutable: func(p pointer) protoreflect.Value {
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if rv.IsNil() {
-				rv.Set(reflect.New(fs.Type.Elem()))
-			}
-			return conv.PBValueOf(rv)
-		},
-		newField: func() protoreflect.Value {
-			return conv.New()
-		},
-	}
-}
-
-func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
-	ft := fs.Type
-	nullable := fd.HasPresence()
-	if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
-		nullable = true
-	}
-	deref := false
-	if nullable && ft.Kind() == reflect.Ptr {
-		ft = ft.Elem()
-		deref = true
-	}
-	conv := NewConverter(ft, fd)
-	fieldOffset := offsetOf(fs, mi.Exporter)
-	index, _ := presenceIndex(mi.Desc, fd)
-	var getter func(p pointer) protoreflect.Value
-	if !nullable {
-		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
-	} else {
-		getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
-	}
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			if nullable {
-				return mi.present(p, index)
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			switch rv.Kind() {
-			case reflect.Bool:
-				return rv.Bool()
-			case reflect.Int32, reflect.Int64:
-				return rv.Int() != 0
-			case reflect.Uint32, reflect.Uint64:
-				return rv.Uint() != 0
-			case reflect.Float32, reflect.Float64:
-				return rv.Float() != 0 || math.Signbit(rv.Float())
-			case reflect.String, reflect.Slice:
-				return rv.Len() > 0
-			default:
-				panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
-			}
-		},
-		clear: func(p pointer) {
-			if nullable {
-				mi.clearPresent(p, index)
-			}
-			// This is only valuable for bytes and strings, but we do it unconditionally.
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			rv.Set(reflect.Zero(rv.Type()))
-		},
-		get: getter,
-		// TODO: Implement unsafe fast path for set?
-		set: func(p pointer, v protoreflect.Value) {
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if deref {
-				if rv.IsNil() {
-					rv.Set(reflect.New(ft))
-				}
-				rv = rv.Elem()
-			}
-
-			rv.Set(conv.GoValueOf(v))
-			if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
-				rv.Set(emptyBytes)
-			}
-			if nullable {
-				mi.setPresent(p, index)
-			}
-		},
-		newField: func() protoreflect.Value {
-			return conv.New()
-		},
-	}
-}
-
-func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
-	ft := fs.Type
-	conv := NewConverter(ft, fd)
-	fieldOffset := offsetOf(fs, mi.Exporter)
-	index, _ := presenceIndex(mi.Desc, fd)
-	fieldNumber := fd.Number()
-	elemType := fs.Type.Elem()
-	return fieldInfo{
-		fieldDesc: fd,
-		has: func(p pointer) bool {
-			if p.IsNil() {
-				return false
-			}
-			return mi.present(p, index)
-		},
-		clear: func(p pointer) {
-			mi.clearPresent(p, index)
-			p.Apply(fieldOffset).AtomicSetNilPointer()
-		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			fp := p.Apply(fieldOffset)
-			mp := fp.AtomicGetPointer()
-			if mp.IsNil() {
-				// Lazily unmarshal this field.
-				mi.lazyUnmarshal(p, fieldNumber)
-				mp = fp.AtomicGetPointer()
-			}
-			rv := mp.AsValueOf(elemType)
-			return conv.PBValueOf(rv)
-		},
-		set: func(p pointer, v protoreflect.Value) {
-			val := pointerOfValue(conv.GoValueOf(v))
-			if val.IsNil() {
-				panic("invalid nil pointer")
-			}
-			p.Apply(fieldOffset).AtomicSetPointer(val)
-			mi.setPresent(p, index)
-		},
-		mutable: func(p pointer) protoreflect.Value {
-			fp := p.Apply(fieldOffset)
-			mp := fp.AtomicGetPointer()
-			if mp.IsNil() {
-				if mi.present(p, index) {
-					// Lazily unmarshal this field.
-					mi.lazyUnmarshal(p, fieldNumber)
-					mp = fp.AtomicGetPointer()
-				} else {
-					mp = pointerOfValue(conv.GoValueOf(conv.New()))
-					fp.AtomicSetPointer(mp)
-					mi.setPresent(p, index)
-				}
-			}
-			return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
-		},
-		newMessage: func() protoreflect.Message {
-			return conv.New().Message()
-		},
-		newField: func() protoreflect.Value {
-			return conv.New()
-		},
-	}
-}
-
-// A presenceList wraps a List, updating presence bits as necessary when the
-// list contents change.
-type presenceList struct {
-	pvalueList
-	setPresence func(bool)
-}
-type pvalueList interface {
-	protoreflect.List
-	//Unwrapper
-}
-
-func (list presenceList) Append(v protoreflect.Value) {
-	list.pvalueList.Append(v)
-	list.setPresence(true)
-}
-func (list presenceList) Truncate(i int) {
-	list.pvalueList.Truncate(i)
-	list.setPresence(i > 0)
-}
-
-// presenceIndex returns the index to pass to presence functions.
-//
-// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
-func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
-	found := false
-	var index, numIndices uint32
-	for i := 0; i < md.Fields().Len(); i++ {
-		f := md.Fields().Get(i)
-		if f == fd {
-			found = true
-			index = numIndices
-		}
-		if f.ContainingOneof() == nil || isLastOneofField(f) {
-			numIndices++
-		}
-	}
-	if !found {
-		panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
-	}
-	return index, presenceSize(numIndices)
-}
-
-func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
-	fields := fd.ContainingOneof().Fields()
-	return fields.Get(fields.Len()-1) == fd
-}
-
-func (mi *MessageInfo) setPresent(p pointer, index uint32) {
-	p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
-}
-
-func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
-	p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
-}
-
-func (mi *MessageInfo) present(p pointer, index uint32) bool {
-	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
-}
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field.  The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit.  Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
-	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
-	// Non-oneof scalar fields with explicit field presence use the presence array.
-	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
-	switch {
-	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-		return false, false
-	case fd.IsWeak():
-		return false, false
-	case fd.IsMap():
-		return false, false
-	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
-		return hasLazyField, hasLazyField
-	default:
-		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
deleted file mode 100644
index a6982569..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-types. DO NOT EDIT.
-
-package impl
-
-import (
-	"reflect"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
-	ft := fs.Type
-	if ft.Kind() == reflect.Ptr {
-		ft = ft.Elem()
-	}
-	if fd.Kind() == protoreflect.EnumKind {
-		// Enums for nullable opaque types.
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			return conv.PBValueOf(rv)
-		}
-	}
-	switch ft.Kind() {
-	case reflect.Bool:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Bool()
-			return protoreflect.ValueOfBool(*x)
-		}
-	case reflect.Int32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Int32()
-			return protoreflect.ValueOfInt32(*x)
-		}
-	case reflect.Uint32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Uint32()
-			return protoreflect.ValueOfUint32(*x)
-		}
-	case reflect.Int64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Int64()
-			return protoreflect.ValueOfInt64(*x)
-		}
-	case reflect.Uint64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Uint64()
-			return protoreflect.ValueOfUint64(*x)
-		}
-	case reflect.Float32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Float32()
-			return protoreflect.ValueOfFloat32(*x)
-		}
-	case reflect.Float64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Float64()
-			return protoreflect.ValueOfFloat64(*x)
-		}
-	case reflect.String:
-		if fd.Kind() == protoreflect.BytesKind {
-			return func(p pointer) protoreflect.Value {
-				if p.IsNil() || !mi.present(p, index) {
-					return conv.Zero()
-				}
-				x := p.Apply(fieldOffset).StringPtr()
-				if *x == nil {
-					return conv.Zero()
-				}
-				if len(**x) == 0 {
-					return protoreflect.ValueOfBytes(nil)
-				}
-				return protoreflect.ValueOfBytes([]byte(**x))
-			}
-		}
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).StringPtr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfString(**x)
-		}
-	case reflect.Slice:
-		if fd.Kind() == protoreflect.StringKind {
-			return func(p pointer) protoreflect.Value {
-				if p.IsNil() || !mi.present(p, index) {
-					return conv.Zero()
-				}
-				x := p.Apply(fieldOffset).Bytes()
-				return protoreflect.ValueOfString(string(*x))
-			}
-		}
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() || !mi.present(p, index) {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Bytes()
-			return protoreflect.ValueOfBytes(*x)
-		}
-	}
-	panic("unexpected protobuf kind: " + ft.Kind().String())
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index 31c19b54..ecb4623d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -205,11 +205,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
 		case fd.IsList():
 			if fd.Enum() != nil || fd.Message() != nil {
 				ft = fs.Type.Elem()
-
-				if ft.Kind() == reflect.Slice {
-					ft = ft.Elem()
-				}
-
 			}
 			isMessage = fd.Message() != nil
 		case fd.Enum() != nil:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index a7406462..986322b1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -256,7 +256,6 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 	ft := fs.Type
 	nullable := fd.HasPresence()
 	isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
-	var getter func(p pointer) protoreflect.Value
 	if nullable {
 		if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
 			// This never occurs for generated message types.
@@ -269,25 +268,19 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 		}
 	}
 	conv := NewConverter(ft, fd)
+
+	// TODO: Implement unsafe fast path?
 	fieldOffset := offsetOf(fs, x)
-
-	// Generate specialized getter functions to avoid going through reflect.Value
-	if nullable {
-		getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
-	} else {
-		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
-	}
-
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
 			if p.IsNil() {
 				return false
 			}
-			if nullable {
-				return !p.Apply(fieldOffset).Elem().IsNil()
-			}
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if nullable {
+				return !rv.IsNil()
+			}
 			switch rv.Kind() {
 			case reflect.Bool:
 				return rv.Bool()
@@ -307,8 +300,21 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			rv.Set(reflect.Zero(rv.Type()))
 		},
-		get: getter,
-		// TODO: Implement unsafe fast path for set?
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if nullable {
+				if rv.IsNil() {
+					return conv.Zero()
+				}
+				if rv.Kind() == reflect.Ptr {
+					rv = rv.Elem()
+				}
+			}
+			return conv.PBValueOf(rv)
+		},
 		set: func(p pointer, v protoreflect.Value) {
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable && rv.Kind() == reflect.Ptr {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
deleted file mode 100644
index af5e063a..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Code generated by generate-types. DO NOT EDIT.
-
-package impl
-
-import (
-	"reflect"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
-	ft := fs.Type
-	if ft.Kind() == reflect.Ptr {
-		ft = ft.Elem()
-	}
-	if fd.Kind() == protoreflect.EnumKind {
-		elemType := fs.Type.Elem()
-		// Enums for nullable types.
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
-			if rv.IsNil() {
-				return conv.Zero()
-			}
-			return conv.PBValueOf(rv.Elem())
-		}
-	}
-	switch ft.Kind() {
-	case reflect.Bool:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).BoolPtr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfBool(**x)
-		}
-	case reflect.Int32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Int32Ptr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfInt32(**x)
-		}
-	case reflect.Uint32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Uint32Ptr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfUint32(**x)
-		}
-	case reflect.Int64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Int64Ptr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfInt64(**x)
-		}
-	case reflect.Uint64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Uint64Ptr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfUint64(**x)
-		}
-	case reflect.Float32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Float32Ptr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfFloat32(**x)
-		}
-	case reflect.Float64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Float64Ptr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfFloat64(**x)
-		}
-	case reflect.String:
-		if fd.Kind() == protoreflect.BytesKind {
-			return func(p pointer) protoreflect.Value {
-				if p.IsNil() {
-					return conv.Zero()
-				}
-				x := p.Apply(fieldOffset).StringPtr()
-				if *x == nil {
-					return conv.Zero()
-				}
-				if len(**x) == 0 {
-					return protoreflect.ValueOfBytes(nil)
-				}
-				return protoreflect.ValueOfBytes([]byte(**x))
-			}
-		}
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).StringPtr()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfString(**x)
-		}
-	case reflect.Slice:
-		if fd.Kind() == protoreflect.StringKind {
-			return func(p pointer) protoreflect.Value {
-				if p.IsNil() {
-					return conv.Zero()
-				}
-				x := p.Apply(fieldOffset).Bytes()
-				if len(*x) == 0 {
-					return conv.Zero()
-				}
-				return protoreflect.ValueOfString(string(*x))
-			}
-		}
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Bytes()
-			if *x == nil {
-				return conv.Zero()
-			}
-			return protoreflect.ValueOfBytes(*x)
-		}
-	}
-	panic("unexpected protobuf kind: " + ft.Kind().String())
-}
-
-func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
-	ft := fs.Type
-	if fd.Kind() == protoreflect.EnumKind {
-		// Enums for non nullable types.
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			return conv.PBValueOf(rv)
-		}
-	}
-	switch ft.Kind() {
-	case reflect.Bool:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Bool()
-			return protoreflect.ValueOfBool(*x)
-		}
-	case reflect.Int32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Int32()
-			return protoreflect.ValueOfInt32(*x)
-		}
-	case reflect.Uint32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Uint32()
-			return protoreflect.ValueOfUint32(*x)
-		}
-	case reflect.Int64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Int64()
-			return protoreflect.ValueOfInt64(*x)
-		}
-	case reflect.Uint64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Uint64()
-			return protoreflect.ValueOfUint64(*x)
-		}
-	case reflect.Float32:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Float32()
-			return protoreflect.ValueOfFloat32(*x)
-		}
-	case reflect.Float64:
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Float64()
-			return protoreflect.ValueOfFloat64(*x)
-		}
-	case reflect.String:
-		if fd.Kind() == protoreflect.BytesKind {
-			return func(p pointer) protoreflect.Value {
-				if p.IsNil() {
-					return conv.Zero()
-				}
-				x := p.Apply(fieldOffset).String()
-				if len(*x) == 0 {
-					return protoreflect.ValueOfBytes(nil)
-				}
-				return protoreflect.ValueOfBytes([]byte(*x))
-			}
-		}
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).String()
-			return protoreflect.ValueOfString(*x)
-		}
-	case reflect.Slice:
-		if fd.Kind() == protoreflect.StringKind {
-			return func(p pointer) protoreflect.Value {
-				if p.IsNil() {
-					return conv.Zero()
-				}
-				x := p.Apply(fieldOffset).Bytes()
-				return protoreflect.ValueOfString(string(*x))
-			}
-		}
-		return func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			x := p.Apply(fieldOffset).Bytes()
-			return protoreflect.ValueOfBytes(*x)
-		}
-	}
-	panic("unexpected protobuf kind: " + ft.Kind().String())
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
new file mode 100644
index 00000000..da685e8a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -0,0 +1,215 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build purego || appengine
+// +build purego appengine
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+)
+
+const UnsafeEnabled = false
+
+// Pointer is an opaque pointer type.
+type Pointer any
+
+// offset represents the offset to a struct field, accessible from a pointer.
+// The offset is the field index into a struct.
+type offset struct {
+	index  int
+	export exporter
+}
+
+// offsetOf returns a field offset for the struct field.
+func offsetOf(f reflect.StructField, x exporter) offset {
+	if len(f.Index) != 1 {
+		panic("embedded structs are not supported")
+	}
+	if f.PkgPath == "" {
+		return offset{index: f.Index[0]} // field is already exported
+	}
+	if x == nil {
+		panic("exporter must be provided for unexported field")
+	}
+	return offset{index: f.Index[0], export: x}
+}
+
+// IsValid reports whether the offset is valid.
+func (f offset) IsValid() bool { return f.index >= 0 }
+
+// invalidOffset is an invalid field offset.
+var invalidOffset = offset{index: -1}
+
+// zeroOffset is a noop when calling pointer.Apply.
+var zeroOffset = offset{index: 0}
+
+// pointer is an abstract representation of a pointer to a struct or field.
+type pointer struct{ v reflect.Value }
+
+// pointerOf returns p as a pointer.
+func pointerOf(p Pointer) pointer {
+	return pointerOfIface(p)
+}
+
+// pointerOfValue returns v as a pointer.
+func pointerOfValue(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// pointerOfIface returns the pointer portion of an interface.
+func pointerOfIface(v any) pointer {
+	return pointer{v: reflect.ValueOf(v)}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p pointer) IsNil() bool {
+	return p.v.IsNil()
+}
+
+// Apply adds an offset to the pointer to derive a new pointer
+// to a specified field. The current pointer must be pointing at a struct.
+func (p pointer) Apply(f offset) pointer {
+	if f.export != nil {
+		if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
+			return pointer{v: v}
+		}
+	}
+	return pointer{v: p.v.Elem().Field(f.index).Addr()}
+}
+
+// AsValueOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
+func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
+	if got := p.v.Type().Elem(); got != t {
+		panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
+	}
+	return p.v
+}
+
+// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
+// It is equivalent to p.AsValueOf(t).Interface()
+func (p pointer) AsIfaceOf(t reflect.Type) any {
+	return p.AsValueOf(t).Interface()
+}
+
+func (p pointer) Bool() *bool              { return p.v.Interface().(*bool) }
+func (p pointer) BoolPtr() **bool          { return p.v.Interface().(**bool) }
+func (p pointer) BoolSlice() *[]bool       { return p.v.Interface().(*[]bool) }
+func (p pointer) Int32() *int32            { return p.v.Interface().(*int32) }
+func (p pointer) Int32Ptr() **int32        { return p.v.Interface().(**int32) }
+func (p pointer) Int32Slice() *[]int32     { return p.v.Interface().(*[]int32) }
+func (p pointer) Int64() *int64            { return p.v.Interface().(*int64) }
+func (p pointer) Int64Ptr() **int64        { return p.v.Interface().(**int64) }
+func (p pointer) Int64Slice() *[]int64     { return p.v.Interface().(*[]int64) }
+func (p pointer) Uint32() *uint32          { return p.v.Interface().(*uint32) }
+func (p pointer) Uint32Ptr() **uint32      { return p.v.Interface().(**uint32) }
+func (p pointer) Uint32Slice() *[]uint32   { return p.v.Interface().(*[]uint32) }
+func (p pointer) Uint64() *uint64          { return p.v.Interface().(*uint64) }
+func (p pointer) Uint64Ptr() **uint64      { return p.v.Interface().(**uint64) }
+func (p pointer) Uint64Slice() *[]uint64   { return p.v.Interface().(*[]uint64) }
+func (p pointer) Float32() *float32        { return p.v.Interface().(*float32) }
+func (p pointer) Float32Ptr() **float32    { return p.v.Interface().(**float32) }
+func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
+func (p pointer) Float64() *float64        { return p.v.Interface().(*float64) }
+func (p pointer) Float64Ptr() **float64    { return p.v.Interface().(**float64) }
+func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
+func (p pointer) String() *string          { return p.v.Interface().(*string) }
+func (p pointer) StringPtr() **string      { return p.v.Interface().(**string) }
+func (p pointer) StringSlice() *[]string   { return p.v.Interface().(*[]string) }
+func (p pointer) Bytes() *[]byte           { return p.v.Interface().(*[]byte) }
+func (p pointer) BytesPtr() **[]byte       { return p.v.Interface().(**[]byte) }
+func (p pointer) BytesSlice() *[][]byte    { return p.v.Interface().(*[][]byte) }
+func (p pointer) WeakFields() *weakFields  { return (*weakFields)(p.v.Interface().(*WeakFields)) }
+func (p pointer) Extensions() *map[int32]ExtensionField {
+	return p.v.Interface().(*map[int32]ExtensionField)
+}
+
+func (p pointer) Elem() pointer {
+	return pointer{v: p.v.Elem()}
+}
+
+// PointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) PointerSlice() []pointer {
+	// TODO: reconsider this
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// AppendPointerSlice appends v to p, which must be a []*T.
+func (p pointer) AppendPointerSlice(v pointer) {
+	sp := p.v.Elem()
+	sp.Set(reflect.Append(sp, v.v))
+}
+
+// SetPointer sets *p to v.
+func (p pointer) SetPointer(v pointer) {
+	p.v.Elem().Set(v.v)
+}
+
+func growSlice(p pointer, addCap int) {
+	// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
+	in := p.v.Elem()
+	out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
+	reflect.Copy(out, in)
+	p.v.Elem().Set(out)
+}
+
+func (p pointer) growBoolSlice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growInt32Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growUint32Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growInt64Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growUint64Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growFloat64Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growFloat32Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (Export) MessageStateOf(p Pointer) *messageState     { panic("not supported") }
+func (ms *messageState) pointer() pointer                 { panic("not supported") }
+func (ms *messageState) messageInfo() *MessageInfo        { panic("not supported") }
+func (ms *messageState) LoadMessageInfo() *MessageInfo    { panic("not supported") }
+func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
+
+type atomicNilMessage struct {
+	once sync.Once
+	m    messageReflectWrapper
+}
+
+func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
+	m.once.Do(func() {
+		m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
+		m.m.mi = mi
+	})
+	return &m.m
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 041ebde2..5f20ca5d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,14 +2,15 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+//go:build !purego && !appengine
+// +build !purego,!appengine
+
 package impl
 
 import (
 	"reflect"
 	"sync/atomic"
 	"unsafe"
-
-	"google.golang.org/protobuf/internal/protolazy"
 )
 
 const UnsafeEnabled = true
@@ -113,13 +114,6 @@ func (p pointer) BytesPtr() **[]byte                    { return (**[]byte)(p.p)
 func (p pointer) BytesSlice() *[][]byte                 { return (*[][]byte)(p.p) }
 func (p pointer) WeakFields() *weakFields               { return (*weakFields)(p.p) }
 func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
-func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
-	return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
-}
-
-func (p pointer) PresenceInfo() presence {
-	return presence{P: p.p}
-}
 
 func (p pointer) Elem() pointer {
 	return pointer{p: *(*unsafe.Pointer)(p.p)}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
deleted file mode 100644
index 38aa7b7d..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"sync/atomic"
-	"unsafe"
-)
-
-func (p pointer) AtomicGetPointer() pointer {
-	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
-}
-
-func (p pointer) AtomicSetPointer(v pointer) {
-	atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
-}
-
-func (p pointer) AtomicSetNilPointer() {
-	atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
-}
-
-func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
-	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
-		return v
-	}
-	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
-}
-
-type atomicV1MessageInfo struct{ p Pointer }
-
-func (mi *atomicV1MessageInfo) Get() Pointer {
-	return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
-}
-
-func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
-	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
-		return p
-	}
-	return mi.Get()
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
deleted file mode 100644
index 914cb1de..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
-	"sync/atomic"
-	"unsafe"
-)
-
-// presenceSize represents the size of a presence set, which should be the largest index of the set+1
-type presenceSize uint32
-
-// presence is the internal representation of the bitmap array in a generated protobuf
-type presence struct {
-	// This is a pointer to the beginning of an array of uint32
-	P unsafe.Pointer
-}
-
-func (p presence) toElem(num uint32) (ret *uint32) {
-	const (
-		bitsPerByte = 8
-		siz         = unsafe.Sizeof(*ret)
-	)
-	// p.P points to an array of uint32, num is the bit in this array that the
-	// caller wants to check/manipulate. Calculate the index in the array that
-	// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
-	offset := uintptr(num) / (siz * bitsPerByte) * siz
-	return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
-}
-
-// Present checks for the presence of a specific field number in a presence set.
-func (p presence) Present(num uint32) bool {
-	if p.P == nil {
-		return false
-	}
-	return Export{}.Present(p.toElem(num), num)
-}
-
-// SetPresent adds presence for a specific field number in a presence set.
-func (p presence) SetPresent(num uint32, size presenceSize) {
-	Export{}.SetPresent(p.toElem(num), num, uint32(size))
-}
-
-// SetPresentUnatomic adds presence for a specific field number in a presence set without using
-// atomic operations. Only to be called during unmarshaling.
-func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
-	Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
-}
-
-// ClearPresent removes presence for a specific field number in a presence set.
-func (p presence) ClearPresent(num uint32) {
-	Export{}.ClearPresent(p.toElem(num), num)
-}
-
-// LoadPresenceCache (together with PresentInCache) allows for a
-// cached version of checking for presence without re-reading the word
-// for every field. It is optimized for efficiency and assumes no
-// simltaneous mutation of the presence set (or at least does not have
-// a problem with simultaneous mutation giving inconsistent results).
-func (p presence) LoadPresenceCache() (current uint32) {
-	if p.P == nil {
-		return 0
-	}
-	return atomic.LoadUint32((*uint32)(p.P))
-}
-
-// PresentInCache reads presence from a cached word in the presence
-// bitmap. It caches up a new word if the bit is outside the
-// word. This is for really fast iteration through bitmaps in cases
-// where we either know that the bitmap will not be altered, or we
-// don't care about inconsistencies caused by simultaneous writes.
-func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
-	if num/32 != *cachedElement {
-		o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
-		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
-		*current = atomic.LoadUint32(q)
-		*cachedElement = num / 32
-	}
-	return (*current & (1 << (num % 32))) > 0
-}
-
-// AnyPresent checks if any field is marked as present in the bitmap.
-func (p presence) AnyPresent(size presenceSize) bool {
-	n := uintptr((size + 31) / 32)
-	for j := uintptr(0); j < n; j++ {
-		o := j * unsafe.Sizeof(uint32(0))
-		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
-		b := atomic.LoadUint32(q)
-		if b > 0 {
-			return true
-		}
-	}
-	return false
-}
-
-// toRaceDetectData finds the preceding RaceDetectHookData in a
-// message by using pointer arithmetic. As the type of the presence
-// set (bitmap) varies with the number of fields in the protobuf, we
-// can not have a struct type containing the array and the
-// RaceDetectHookData.  instead the RaceDetectHookData is placed
-// immediately before the bitmap array, and we find it by walking
-// backwards in the struct.
-//
-// This method is only called from the race-detect version of the code,
-// so RaceDetectHookData is never an empty struct.
-func (p presence) toRaceDetectData() *RaceDetectHookData {
-	var template struct {
-		d RaceDetectHookData
-		a [1]uint32
-	}
-	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
-	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
-}
-
-func atomicLoadShadowPresence(p **[]byte) *[]byte {
-	return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
-	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
-}
-
-// findPointerToRaceDetectData finds the preceding RaceDetectHookData
-// in a message by using pointer arithmetic. For the methods called
-// directy from generated code, we don't have a pointer to the
-// beginning of the presence set, but a pointer inside the array. As
-// we know the index of the bit we're manipulating (num), we can
-// calculate which element of the array ptr is pointing to. With that
-// information we find the preceding RaceDetectHookData and can
-// manipulate the shadow bitmap.
-//
-// This method is only called from the race-detect version of the
-// code, so RaceDetectHookData is never an empty struct.
-func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
-	var template struct {
-		d RaceDetectHookData
-		a [1]uint32
-	}
-	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
-	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index b534a3d6..a24e6bbd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -37,10 +37,6 @@ const (
 
 	// ValidationValid indicates that unmarshaling the message will succeed.
 	ValidationValid
-
-	// ValidationWrongWireType indicates that a validated field does not have
-	// the expected wire type.
-	ValidationWrongWireType
 )
 
 func (v ValidationStatus) String() string {
@@ -153,23 +149,11 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
 		switch fd.Kind() {
 		case protoreflect.MessageKind:
 			vi.typ = validationTypeMessage
-
-			if ft.Kind() == reflect.Ptr {
-				// Repeated opaque message fields are *[]*T.
-				ft = ft.Elem()
-			}
-
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
 		case protoreflect.GroupKind:
 			vi.typ = validationTypeGroup
-
-			if ft.Kind() == reflect.Ptr {
-				// Repeated opaque message fields are *[]*T.
-				ft = ft.Elem()
-			}
-
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
deleted file mode 100644
index 82e5cab4..00000000
--- a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper code for parsing a protocol buffer
-
-package protolazy
-
-import (
-	"errors"
-	"fmt"
-	"io"
-
-	"google.golang.org/protobuf/encoding/protowire"
-)
-
-// BufferReader is a structure encapsulating a protobuf and a current position
-type BufferReader struct {
-	Buf []byte
-	Pos int
-}
-
-// NewBufferReader creates a new BufferRead from a protobuf
-func NewBufferReader(buf []byte) BufferReader {
-	return BufferReader{Buf: buf, Pos: 0}
-}
-
-var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
-var errOverflow = errors.New("proto: integer overflow")
-
-func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
-	i := b.Pos
-	l := len(b.Buf)
-
-	for shift := uint(0); shift < 64; shift += 7 {
-		if i >= l {
-			err = io.ErrUnexpectedEOF
-			return
-		}
-		v := b.Buf[i]
-		i++
-		x |= (uint64(v) & 0x7F) << shift
-		if v < 0x80 {
-			b.Pos = i
-			return
-		}
-	}
-
-	// The number is too large to represent in a 64-bit value.
-	err = errOverflow
-	return
-}
-
-// decodeVarint decodes a varint at the current position
-func (b *BufferReader) DecodeVarint() (x uint64, err error) {
-	i := b.Pos
-	buf := b.Buf
-
-	if i >= len(buf) {
-		return 0, io.ErrUnexpectedEOF
-	} else if buf[i] < 0x80 {
-		b.Pos++
-		return uint64(buf[i]), nil
-	} else if len(buf)-i < 10 {
-		return b.DecodeVarintSlow()
-	}
-
-	var v uint64
-	// we already checked the first byte
-	x = uint64(buf[i]) & 127
-	i++
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 7
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 14
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 21
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 28
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 35
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 42
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 49
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 56
-	if v < 128 {
-		goto done
-	}
-
-	v = uint64(buf[i])
-	i++
-	x |= (v & 127) << 63
-	if v < 128 {
-		goto done
-	}
-
-	return 0, errOverflow
-
-done:
-	b.Pos = i
-	return
-}
-
-// decodeVarint32 decodes a varint32 at the current position
-func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
-	i := b.Pos
-	buf := b.Buf
-
-	if i >= len(buf) {
-		return 0, io.ErrUnexpectedEOF
-	} else if buf[i] < 0x80 {
-		b.Pos++
-		return uint32(buf[i]), nil
-	} else if len(buf)-i < 5 {
-		v, err := b.DecodeVarintSlow()
-		return uint32(v), err
-	}
-
-	var v uint32
-	// we already checked the first byte
-	x = uint32(buf[i]) & 127
-	i++
-
-	v = uint32(buf[i])
-	i++
-	x |= (v & 127) << 7
-	if v < 128 {
-		goto done
-	}
-
-	v = uint32(buf[i])
-	i++
-	x |= (v & 127) << 14
-	if v < 128 {
-		goto done
-	}
-
-	v = uint32(buf[i])
-	i++
-	x |= (v & 127) << 21
-	if v < 128 {
-		goto done
-	}
-
-	v = uint32(buf[i])
-	i++
-	x |= (v & 127) << 28
-	if v < 128 {
-		goto done
-	}
-
-	return 0, errOverflow
-
-done:
-	b.Pos = i
-	return
-}
-
-// skipValue skips a value in the protobuf, based on the specified tag
-func (b *BufferReader) SkipValue(tag uint32) (err error) {
-	wireType := tag & 0x7
-	switch protowire.Type(wireType) {
-	case protowire.VarintType:
-		err = b.SkipVarint()
-	case protowire.Fixed64Type:
-		err = b.SkipFixed64()
-	case protowire.BytesType:
-		var n uint32
-		n, err = b.DecodeVarint32()
-		if err == nil {
-			err = b.Skip(int(n))
-		}
-	case protowire.StartGroupType:
-		err = b.SkipGroup(tag)
-	case protowire.Fixed32Type:
-		err = b.SkipFixed32()
-	default:
-		err = fmt.Errorf("Unexpected wire type (%d)", wireType)
-	}
-	return
-}
-
-// skipGroup skips a group with the specified tag.  It executes efficiently using a tag stack
-func (b *BufferReader) SkipGroup(tag uint32) (err error) {
-	tagStack := make([]uint32, 0, 16)
-	tagStack = append(tagStack, tag)
-	var n uint32
-	for len(tagStack) > 0 {
-		tag, err = b.DecodeVarint32()
-		if err != nil {
-			return err
-		}
-		switch protowire.Type(tag & 0x7) {
-		case protowire.VarintType:
-			err = b.SkipVarint()
-		case protowire.Fixed64Type:
-			err = b.Skip(8)
-		case protowire.BytesType:
-			n, err = b.DecodeVarint32()
-			if err == nil {
-				err = b.Skip(int(n))
-			}
-		case protowire.StartGroupType:
-			tagStack = append(tagStack, tag)
-		case protowire.Fixed32Type:
-			err = b.SkipFixed32()
-		case protowire.EndGroupType:
-			if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
-				tagStack = tagStack[:len(tagStack)-1]
-			} else {
-				err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
-					protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
-			}
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// skipVarint effiently skips a varint
-func (b *BufferReader) SkipVarint() (err error) {
-	i := b.Pos
-
-	if len(b.Buf)-i < 10 {
-		// Use DecodeVarintSlow() to check for buffer overflow, but ignore result
-		if _, err := b.DecodeVarintSlow(); err != nil {
-			return err
-		}
-		return nil
-	}
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	i++
-
-	if b.Buf[i] < 0x80 {
-		goto out
-	}
-	return errOverflow
-
-out:
-	b.Pos = i + 1
-	return nil
-}
-
-// skip skips the specified number of bytes
-func (b *BufferReader) Skip(n int) (err error) {
-	if len(b.Buf) < b.Pos+n {
-		return io.ErrUnexpectedEOF
-	}
-	b.Pos += n
-	return
-}
-
-// skipFixed64 skips a fixed64
-func (b *BufferReader) SkipFixed64() (err error) {
-	return b.Skip(8)
-}
-
-// skipFixed32 skips a fixed32
-func (b *BufferReader) SkipFixed32() (err error) {
-	return b.Skip(4)
-}
-
-// skipBytes skips a set of bytes
-func (b *BufferReader) SkipBytes() (err error) {
-	n, err := b.DecodeVarint32()
-	if err != nil {
-		return err
-	}
-	return b.Skip(int(n))
-}
-
-// Done returns whether we are at the end of the protobuf
-func (b *BufferReader) Done() bool {
-	return b.Pos == len(b.Buf)
-}
-
-// Remaining returns how many bytes remain
-func (b *BufferReader) Remaining() int {
-	return len(b.Buf) - b.Pos
-}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
deleted file mode 100644
index ff4d4834..00000000
--- a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
+++ /dev/null
@@ -1,359 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protolazy contains internal data structures for lazy message decoding.
-package protolazy
-
-import (
-	"fmt"
-	"sort"
-
-	"google.golang.org/protobuf/encoding/protowire"
-	piface "google.golang.org/protobuf/runtime/protoiface"
-)
-
-// IndexEntry is the structure for an index of the fields in a message of a
-// proto (not descending to sub-messages)
-type IndexEntry struct {
-	FieldNum uint32
-	// first byte of this tag/field
-	Start uint32
-	// first byte after a contiguous sequence of bytes for this tag/field, which could
-	// include a single encoding of the field, or multiple encodings for the field
-	End uint32
-	// True if this protobuf segment includes multiple encodings of the field
-	MultipleContiguous bool
-}
-
-// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
-//
-// Deprecated: Do not use. This will be deleted in the near future.
-type XXX_lazyUnmarshalInfo struct {
-	// Index of fields and their positions in the protobuf for this
-	// message.  Make index be a pointer to a slice so it can be updated
-	// atomically.  The index pointer is only set once (lazily when/if
-	// the index is first needed), and must always be SET and LOADED
-	// ATOMICALLY.
-	index *[]IndexEntry
-	// The protobuf associated with this lazily decoded message.  It is
-	// only set during proto.Unmarshal().  It doesn't need to be set and
-	// loaded atomically, since any simultaneous set (Unmarshal) and read
-	// (during a get) would already be a race in the app code.
-	Protobuf []byte
-	// The flags present when Unmarshal was originally called for this particular message
-	unmarshalFlags piface.UnmarshalInputFlags
-}
-
-// The Buffer and SetBuffer methods let v2/internal/impl interact with
-// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
-
-// Buffer returns the lazy unmarshal buffer.
-//
-// Deprecated: Do not use. This will be deleted in the near future.
-func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
-	return lazy.Protobuf
-}
-
-// SetBuffer sets the lazy unmarshal buffer.
-//
-// Deprecated: Do not use. This will be deleted in the near future.
-func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
-	lazy.Protobuf = b
-}
-
-// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
-// The flags should reflect how Unmarshal was called.
-func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
-	lazy.unmarshalFlags = f
-}
-
-// UnmarshalFlags returns the original unmarshalInputFlags.
-func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
-	return lazy.unmarshalFlags
-}
-
-// AllowedPartial returns true if the user originally unmarshalled this message with
-// AllowPartial set to true
-func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
-	return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
-}
-
-func protoFieldNumber(tag uint32) uint32 {
-	return tag >> 3
-}
-
-// buildIndex builds an index of the specified protobuf, return the index
-// array and an error.
-func buildIndex(buf []byte) ([]IndexEntry, error) {
-	index := make([]IndexEntry, 0, 16)
-	var lastProtoFieldNum uint32
-	var outOfOrder bool
-
-	var r BufferReader = NewBufferReader(buf)
-
-	for !r.Done() {
-		var tag uint32
-		var err error
-		var curPos = r.Pos
-		// INLINED: tag, err = r.DecodeVarint32()
-		{
-			i := r.Pos
-			buf := r.Buf
-
-			if i >= len(buf) {
-				return nil, errOutOfBounds
-			} else if buf[i] < 0x80 {
-				r.Pos++
-				tag = uint32(buf[i])
-			} else if r.Remaining() < 5 {
-				var v uint64
-				v, err = r.DecodeVarintSlow()
-				tag = uint32(v)
-			} else {
-				var v uint32
-				// we already checked the first byte
-				tag = uint32(buf[i]) & 127
-				i++
-
-				v = uint32(buf[i])
-				i++
-				tag |= (v & 127) << 7
-				if v < 128 {
-					goto done
-				}
-
-				v = uint32(buf[i])
-				i++
-				tag |= (v & 127) << 14
-				if v < 128 {
-					goto done
-				}
-
-				v = uint32(buf[i])
-				i++
-				tag |= (v & 127) << 21
-				if v < 128 {
-					goto done
-				}
-
-				v = uint32(buf[i])
-				i++
-				tag |= (v & 127) << 28
-				if v < 128 {
-					goto done
-				}
-
-				return nil, errOutOfBounds
-
-			done:
-				r.Pos = i
-			}
-		}
-		// DONE: tag, err = r.DecodeVarint32()
-
-		fieldNum := protoFieldNumber(tag)
-		if fieldNum < lastProtoFieldNum {
-			outOfOrder = true
-		}
-
-		// Skip the current value -- will skip over an entire group as well.
-		// INLINED: err = r.SkipValue(tag)
-		wireType := tag & 0x7
-		switch protowire.Type(wireType) {
-		case protowire.VarintType:
-			// INLINED: err = r.SkipVarint()
-			i := r.Pos
-
-			if len(r.Buf)-i < 10 {
-				// Use DecodeVarintSlow() to skip while
-				// checking for buffer overflow, but ignore result
-				_, err = r.DecodeVarintSlow()
-				goto out2
-			}
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			i++
-
-			if r.Buf[i] < 0x80 {
-				goto out
-			}
-			return nil, errOverflow
-		out:
-			r.Pos = i + 1
-			// DONE: err = r.SkipVarint()
-		case protowire.Fixed64Type:
-			err = r.SkipFixed64()
-		case protowire.BytesType:
-			var n uint32
-			n, err = r.DecodeVarint32()
-			if err == nil {
-				err = r.Skip(int(n))
-			}
-		case protowire.StartGroupType:
-			err = r.SkipGroup(tag)
-		case protowire.Fixed32Type:
-			err = r.SkipFixed32()
-		default:
-			err = fmt.Errorf("Unexpected wire type (%d)", wireType)
-		}
-		// DONE: err = r.SkipValue(tag)
-
-	out2:
-		if err != nil {
-			return nil, err
-		}
-		if fieldNum != lastProtoFieldNum {
-			index = append(index, IndexEntry{FieldNum: fieldNum,
-				Start: uint32(curPos),
-				End:   uint32(r.Pos)},
-			)
-		} else {
-			index[len(index)-1].End = uint32(r.Pos)
-			index[len(index)-1].MultipleContiguous = true
-		}
-		lastProtoFieldNum = fieldNum
-	}
-	if outOfOrder {
-		sort.Slice(index, func(i, j int) bool {
-			return index[i].FieldNum < index[j].FieldNum ||
-				(index[i].FieldNum == index[j].FieldNum &&
-					index[i].Start < index[j].Start)
-		})
-	}
-	return index, nil
-}
-
-func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
-	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
-	if multipleEntries != nil {
-		for _, entry := range multipleEntries {
-			size += int(entry.End - entry.Start)
-		}
-		return size
-	}
-	if !found {
-		return 0
-	}
-	return int(end - start)
-}
-
-func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
-	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
-	if multipleEntries != nil {
-		for _, entry := range multipleEntries {
-			b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
-		}
-		return b, true
-	}
-	if !found {
-		return nil, false
-	}
-	b = append(b, lazy.Protobuf[start:end]...)
-	return b, true
-}
-
-func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
-	atomicStoreIndex(&lazy.index, &index)
-}
-
-// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
-// (including protobuf), returns startOffset/endOffset/found.
-func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
-	if lazy.Protobuf == nil {
-		// There is no backing protobuf for this message -- it was made from a builder
-		return 0, 0, false, false, nil
-	}
-	index := atomicLoadIndex(&lazy.index)
-	if index == nil {
-		r, err := buildIndex(lazy.Protobuf)
-		if err != nil {
-			panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
-		}
-		// lazy.index is a pointer to the slice returned by BuildIndex
-		index = &r
-		atomicStoreIndex(&lazy.index, index)
-	}
-	return lookupField(index, fieldNum)
-}
-
-// lookupField returns the offset at which the indicated field starts using
-// the index, offset immediately after field ends (including all instances of
-// a repeated field), and bools indicating if field was found and if there
-// are multiple encodings of the field in the byte range.
-//
-// To hande the uncommon case where there are repeated encodings for the same
-// field which are not consecutive in the protobuf (so we need to returns
-// multiple start/end offsets), we also return a slice multipleEntries.  If
-// multipleEntries is non-nil, then multiple entries were found, and the
-// values in the slice should be used, rather than start/end/found.
-func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
-	// The pointer indexp to the index was already loaded atomically.
-	// The slice is uniquely associated with the pointer, so it doesn't
-	// need to be loaded atomically.
-	index := *indexp
-	for i, entry := range index {
-		if fieldNum == entry.FieldNum {
-			if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
-				// Handle the uncommon case where there are
-				// repeated entries for the same field which
-				// are not contiguous in the protobuf.
-				multiple := make([]IndexEntry, 1, 2)
-				multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
-				i++
-				for i < len(index) && index[i].FieldNum == fieldNum {
-					multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
-					i++
-				}
-				return 0, 0, false, false, multiple
-
-			}
-			return entry.Start, entry.End, true, entry.MultipleContiguous, nil
-		}
-		if fieldNum < entry.FieldNum {
-			return 0, 0, false, false, nil
-		}
-	}
-	return 0, 0, false, false, nil
-}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
deleted file mode 100644
index dc2a64ca..00000000
--- a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protolazy
-
-import (
-	"sync/atomic"
-	"unsafe"
-)
-
-func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
-	return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
-	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
new file mode 100644
index 00000000..a1f6f333
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build purego || appengine
+// +build purego appengine
+
+package strs
+
+import pref "google.golang.org/protobuf/reflect/protoreflect"
+
+func UnsafeString(b []byte) string {
+	return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+	return []byte(s)
+}
+
+type Builder struct{}
+
+func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
+	return prefix.Append(name)
+}
+
+func (*Builder) MakeString(b []byte) string {
+	return string(b)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index 832a7988..a008acd0 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,7 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !go1.21
+//go:build !purego && !appengine && !go1.21
+// +build !purego,!appengine,!go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 1ffddf68..60166f2b 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,7 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.21
+//go:build !purego && !appengine && go1.21
+// +build !purego,!appengine,go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 3018450d..dbbf1f68 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
 //  10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 36
-	Patch      = 1
+	Minor      = 34
+	Patch      = 2
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index a3b5e142..d75a6534 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -47,12 +47,6 @@ type UnmarshalOptions struct {
 	// RecursionLimit limits how deeply messages may be nested.
 	// If zero, a default limit is applied.
 	RecursionLimit int
-
-	//
-	// NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
-	// default. Lazy decoding only affects submessages (annotated with [lazy =
-	// true] in the .proto file) within messages that use the Opaque API.
-	NoLazyDecoding bool
 }
 
 // Unmarshal parses the wire-format message in b and places the result in m.
@@ -110,16 +104,6 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
 		if o.DiscardUnknown {
 			in.Flags |= protoiface.UnmarshalDiscardUnknown
 		}
-
-		if !allowPartial {
-			// This does not affect how current unmarshal functions work, it just allows them
-			// to record this for lazy the decoding case.
-			in.Flags |= protoiface.UnmarshalCheckRequired
-		}
-		if o.NoLazyDecoding {
-			in.Flags |= protoiface.UnmarshalNoLazyDecoding
-		}
-
 		out, err = methods.Unmarshal(in)
 	} else {
 		o.RecursionLimit--
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index f0473c58..1f847bcc 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -63,8 +63,7 @@ type MarshalOptions struct {
 	// options (except for UseCachedSize itself).
 	//
 	// 2. The message and all its submessages have not changed in any
-	// way since the Size call. For lazily decoded messages, accessing
-	// a message results in decoding the message, which is a change.
+	// way since the Size call.
 	//
 	// If either of these invariants is violated,
 	// the results are undefined and may include panics or corrupted output.
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index c36d4a9c..1a0be1b0 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,7 +8,6 @@ import (
 	"reflect"
 
 	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/runtime/protoiface"
 )
 
 // Equal reports whether two messages are equal,
@@ -52,14 +51,6 @@ func Equal(x, y Message) bool {
 	if mx.IsValid() != my.IsValid() {
 		return false
 	}
-
-	// Only one of the messages needs to implement the fast-path for it to work.
-	pmx := protoMethods(mx)
-	pmy := protoMethods(my)
-	if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
-		return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
-	}
-
 	vx := protoreflect.ValueOfMessage(mx)
 	vy := protoreflect.ValueOfMessage(my)
 	return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 78445d11..d248f292 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,48 +39,6 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
 // If the field is unpopulated, it returns the default value for
 // scalars and an immutable, empty value for lists or messages.
 // It panics if xt does not extend m.
-//
-// The type of the value is dependent on the field type of the extension.
-// For extensions generated by protoc-gen-go, the Go type is as follows:
-//
-//	╔═══════════════════╤═════════════════════════╗
-//	║ Go type           │ Protobuf kind           ║
-//	╠═══════════════════╪═════════════════════════╣
-//	║ bool              │ bool                    ║
-//	║ int32             │ int32, sint32, sfixed32 ║
-//	║ int64             │ int64, sint64, sfixed64 ║
-//	║ uint32            │ uint32, fixed32         ║
-//	║ uint64            │ uint64, fixed64         ║
-//	║ float32           │ float                   ║
-//	║ float64           │ double                  ║
-//	║ string            │ string                  ║
-//	║ []byte            │ bytes                   ║
-//	║ protoreflect.Enum │ enum                    ║
-//	║ proto.Message     │ message, group          ║
-//	╚═══════════════════╧═════════════════════════╝
-//
-// The protoreflect.Enum and proto.Message types are the concrete Go type
-// associated with the named enum or message. Repeated fields are represented
-// using a Go slice of the base element type.
-//
-// If a generated extension descriptor variable is directly passed to
-// GetExtension, then the call should be followed immediately by a
-// type assertion to the expected output value. For example:
-//
-//	mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
-//
-// This pattern enables static analysis tools to verify that the asserted type
-// matches the Go type associated with the extension field and
-// also enables a possible future migration to a type-safe extension API.
-//
-// Since singular messages are the most common extension type, the pattern of
-// calling HasExtension followed by GetExtension may be simplified to:
-//
-//	if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
-//	    ... // make use of mm
-//	}
-//
-// The mm variable is non-nil if and only if HasExtension reports true.
 func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 	// Treat nil message interface as an empty message; return the default.
 	if m == nil {
@@ -93,35 +51,6 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 // SetExtension stores the value of an extension field.
 // It panics if m is invalid, xt does not extend m, or if type of v
 // is invalid for the specified extension field.
-//
-// The type of the value is dependent on the field type of the extension.
-// For extensions generated by protoc-gen-go, the Go type is as follows:
-//
-//	╔═══════════════════╤═════════════════════════╗
-//	║ Go type           │ Protobuf kind           ║
-//	╠═══════════════════╪═════════════════════════╣
-//	║ bool              │ bool                    ║
-//	║ int32             │ int32, sint32, sfixed32 ║
-//	║ int64             │ int64, sint64, sfixed64 ║
-//	║ uint32            │ uint32, fixed32         ║
-//	║ uint64            │ uint64, fixed64         ║
-//	║ float32           │ float                   ║
-//	║ float64           │ double                  ║
-//	║ string            │ string                  ║
-//	║ []byte            │ bytes                   ║
-//	║ protoreflect.Enum │ enum                    ║
-//	║ proto.Message     │ message, group          ║
-//	╚═══════════════════╧═════════════════════════╝
-//
-// The protoreflect.Enum and proto.Message types are the concrete Go type
-// associated with the named enum or message. Repeated fields are represented
-// using a Go slice of the base element type.
-//
-// If a generated extension descriptor variable is directly passed to
-// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
-// concrete type that matches the expected Go type for the extension descriptor
-// so that static analysis tools can verify type correctness.
-// This also enables a possible future migration to a type-safe extension API.
 func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
 	xd := xt.TypeDescriptor()
 	pv := xt.ValueOf(v)
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index c8675806..052fb5ae 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -12,19 +12,11 @@ import (
 )
 
 // Size returns the size in bytes of the wire-format encoding of m.
-//
-// Note that Size might return more bytes than Marshal will write in the case of
-// lazily decoded messages that arrive in non-minimal wire format: see
-// https://protobuf.dev/reference/go/size/ for more details.
 func Size(m Message) int {
 	return MarshalOptions{}.Size(m)
 }
 
 // Size returns the size in bytes of the wire-format encoding of m.
-//
-// Note that Size might return more bytes than Marshal will write in the case of
-// lazily decoded messages that arrive in non-minimal wire format: see
-// https://protobuf.dev/reference/go/size/ for more details.
 func (o MarshalOptions) Size(m Message) int {
 	// Treat a nil message interface as an empty message; nothing to output.
 	if m == nil {
diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
deleted file mode 100644
index 267fd0f1..00000000
--- a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-// ValueOrNil returns nil if has is false, or a pointer to a new variable
-// containing the value returned by the specified getter.
-//
-// This function is similar to the wrappers (proto.Int32(), proto.String(),
-// etc.), but is generic (works for any field type) and works with the hasser
-// and getter of a field, as opposed to a value.
-//
-// This is convenient when populating builder fields.
-//
-// Example:
-//
-//	hop := attr.GetDirectHop()
-//	injectedRoute := ripb.InjectedRoute_builder{
-//	  Prefixes: route.GetPrefixes(),
-//	  NextHop:  proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
-//	}
-func ValueOrNil[T any](has bool, getter func() T) *T {
-	if !has {
-		return nil
-	}
-	v := getter()
-	return &v
-}
-
-// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
-// it returns a pointer to an empty val message.
-//
-// This function allows for translating code from the old Open Struct API to the
-// new Opaque API.
-//
-// The old Open Struct API represented oneof fields with a wrapper struct:
-//
-//	var signedImg *accountpb.SignedImage
-//	profile := &accountpb.Profile{
-//		// The Avatar oneof will be set, with an empty SignedImage.
-//		Avatar: &accountpb.Profile_SignedImage{signedImg},
-//	}
-//
-// The new Opaque API treats oneof fields like regular fields, there are no more
-// wrapper structs:
-//
-//	var signedImg *accountpb.SignedImage
-//	profile := &accountpb.Profile{}
-//	profile.SetSignedImage(signedImg)
-//
-// For convenience, the Opaque API also offers Builders, which allow for a
-// direct translation of struct initialization. However, because Builders use
-// nilness to represent field presence (but there is no non-nil wrapper struct
-// anymore), Builders cannot distinguish between an unset oneof and a set oneof
-// with nil message. The above code would need to be translated with help of the
-// ValueOrDefault function to retain the same behavior:
-//
-//	var signedImg *accountpb.SignedImage
-//	return &accountpb.Profile_builder{
-//		SignedImage: proto.ValueOrDefault(signedImg),
-//	}.Build()
-func ValueOrDefault[T interface {
-	*P
-	Message
-}, P any](val T) T {
-	if val == nil {
-		return T(new(P))
-	}
-	return val
-}
-
-// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
-// type []byte.
-func ValueOrDefaultBytes(val []byte) []byte {
-	if val == nil {
-		return []byte{}
-	}
-	return val
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 69a05050..8fbecb4f 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -13,8 +13,6 @@
 package protodesc
 
 import (
-	"strings"
-
 	"google.golang.org/protobuf/internal/editionssupport"
 	"google.golang.org/protobuf/internal/errors"
 	"google.golang.org/protobuf/internal/filedesc"
@@ -104,17 +102,13 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 	default:
 		return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
 	}
+	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
+		return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+	}
 	f.L1.Path = fd.GetName()
 	if f.L1.Path == "" {
 		return nil, errors.New("file path must be populated")
 	}
-	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
-		// Allow cmd/protoc-gen-go/testdata to use any edition for easier
-		// testing of upcoming edition features.
-		if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
-			return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
-		}
-	}
 	f.L1.Package = protoreflect.FullName(fd.GetPackage())
 	if !f.L1.Package.IsValid() && f.L1.Package != "" {
 		return nil, errors.New("invalid package: %q", f.L1.Package)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index ebcb4a8a..85617554 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -150,7 +150,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
 			opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
 			f.L1.Options = func() protoreflect.ProtoMessage { return opts }
 			f.L1.IsWeak = opts.GetWeak()
-			f.L1.IsLazy = opts.GetLazy()
 			if opts.Packed != nil {
 				f.L1.EditionFeatures.IsPacked = opts.GetPacked()
 			}
@@ -215,9 +214,6 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
 		if xd.JsonName != nil {
 			x.L2.StringName.InitJSON(xd.GetJsonName())
 		}
-		if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
-			x.L1.Kind = protoreflect.GroupKind
-		}
 	}
 	return xs, nil
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index bf0a0ccd..804830ed 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -14,7 +14,7 @@ import (
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/types/descriptorpb"
-	"google.golang.org/protobuf/types/gofeaturespb"
+	gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
 )
 
 var defaults = &descriptorpb.FeatureSetDefaults{}
@@ -43,8 +43,6 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
 		return descriptorpb.Edition_EDITION_PROTO3
 	case filedesc.Edition2023:
 		return descriptorpb.Edition_EDITION_2023
-	case filedesc.Edition2024:
-		return descriptorpb.Edition_EDITION_2024
 	default:
 		panic(fmt.Sprintf("unknown value for edition: %v", ed))
 	}
@@ -129,12 +127,6 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp
 		if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
 			parentFS.GenerateLegacyUnmarshalJSON = *luje
 		}
-		if sep := goFeatures.StripEnumPrefix; sep != nil {
-			parentFS.StripEnumPrefix = int(*sep)
-		}
-		if al := goFeatures.ApiLevel; al != nil {
-			parentFS.APILevel = int(*al)
-		}
 	}
 
 	return parentFS
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index 742cb518..d5d5af6e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,7 +23,6 @@ type (
 		Unmarshal        func(unmarshalInput) (unmarshalOutput, error)
 		Merge            func(mergeInput) mergeOutput
 		CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
-		Equal            func(equalInput) equalOutput
 	}
 	supportFlags = uint64
 	sizeInput    = struct {
@@ -76,13 +75,4 @@ type (
 	checkInitializedOutput = struct {
 		pragma.NoUnkeyedLiterals
 	}
-	equalInput = struct {
-		pragma.NoUnkeyedLiterals
-		MessageA Message
-		MessageB Message
-	}
-	equalOutput = struct {
-		pragma.NoUnkeyedLiterals
-		Equal bool
-	}
 )
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a4b78ace..a7b0d06f 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -152,7 +152,7 @@ type Message interface {
 	// This method may return nil.
 	//
 	// The returned methods type is identical to
-	// [google.golang.org/protobuf/runtime/protoiface.Methods].
+	// google.golang.org/protobuf/runtime/protoiface.Methods.
 	// Consult the protoiface package documentation for details.
 	ProtoMethods() *methods
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
new file mode 100644
index 00000000..75f83a2a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build purego || appengine
+// +build purego appengine
+
+package protoreflect
+
+import "google.golang.org/protobuf/internal/pragma"
+
+type valueType int
+
+const (
+	nilType valueType = iota
+	boolType
+	int32Type
+	int64Type
+	uint32Type
+	uint64Type
+	float32Type
+	float64Type
+	stringType
+	bytesType
+	enumType
+	ifaceType
+)
+
+// value is a union where only one type can be represented at a time.
+// This uses a distinct field for each type. This is type safe in Go, but
+// occupies more memory than necessary (72B).
+type value struct {
+	pragma.DoNotCompare // 0B
+
+	typ   valueType // 8B
+	num   uint64    // 8B
+	str   string    // 16B
+	bin   []byte    // 24B
+	iface any       // 16B
+}
+
+func valueOfString(v string) Value {
+	return Value{typ: stringType, str: v}
+}
+func valueOfBytes(v []byte) Value {
+	return Value{typ: bytesType, bin: v}
+}
+func valueOfIface(v any) Value {
+	return Value{typ: ifaceType, iface: v}
+}
+
+func (v Value) getString() string {
+	return v.str
+}
+func (v Value) getBytes() []byte {
+	return v.bin
+}
+func (v Value) getIface() any {
+	return v.iface
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 0015fcb3..7f3583ea 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,7 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !go1.21
+//go:build !purego && !appengine && !go1.21
+// +build !purego,!appengine,!go1.21
 
 package protoreflect
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index 479527b5..f7d38699 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,7 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.21
+//go:build !purego && !appengine && go1.21
+// +build !purego,!appengine,go1.21
 
 package protoreflect
 
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 28e9e9f0..44cf467d 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,9 +39,6 @@ type Methods = struct {
 
 	// CheckInitialized returns an error if any required fields in the message are not set.
 	CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
-
-	// Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
-	Equal func(EqualInput) EqualOutput
 }
 
 // SupportFlags indicate support for optional features.
@@ -122,22 +119,6 @@ type UnmarshalInputFlags = uint8
 
 const (
 	UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
-
-	// UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
-	// The unmarshaller must not modify the contents of the buffer.
-	UnmarshalAliasBuffer
-
-	// UnmarshalValidated indicates that validation has already been
-	// performed on the input buffer.
-	UnmarshalValidated
-
-	// UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
-	// initialized.
-	UnmarshalCheckRequired
-
-	// UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
-	// lazy decoding, even when otherwise available.
-	UnmarshalNoLazyDecoding
 )
 
 // UnmarshalOutputFlags are output from the Unmarshal method.
@@ -185,18 +166,3 @@ type CheckInitializedInput = struct {
 type CheckInitializedOutput = struct {
 	pragma.NoUnkeyedLiterals
 }
-
-// EqualInput is input to the Equal method.
-type EqualInput = struct {
-	pragma.NoUnkeyedLiterals
-
-	MessageA protoreflect.Message
-	MessageB protoreflect.Message
-}
-
-// EqualOutput is output from the Equal method.
-type EqualOutput = struct {
-	pragma.NoUnkeyedLiterals
-
-	Equal bool
-}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
index 93df1b56..4a1ab7fb 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -15,7 +15,6 @@ import (
 	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/filetype"
 	"google.golang.org/protobuf/internal/impl"
-	"google.golang.org/protobuf/internal/protolazy"
 )
 
 // UnsafeEnabled specifies whether package unsafe can be used.
@@ -40,9 +39,6 @@ type (
 	ExtensionFieldV1 = impl.ExtensionField
 
 	Pointer = impl.Pointer
-
-	LazyUnmarshalInfo  = *protolazy.XXX_lazyUnmarshalInfo
-	RaceDetectHookData = impl.RaceDetectHookData
 )
 
 var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index a551e7ae..9403eb07 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -69,7 +69,7 @@ const (
 	Edition_EDITION_2023 Edition = 1000
 	Edition_EDITION_2024 Edition = 1001
 	// Placeholder editions for testing feature resolution.  These should not be
-	// used or relied on outside of tests.
+	// used or relyed on outside of tests.
 	Edition_EDITION_1_TEST_ONLY     Edition = 1
 	Edition_EDITION_2_TEST_ONLY     Edition = 2
 	Edition_EDITION_99997_TEST_ONLY Edition = 99997
@@ -577,6 +577,8 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
 }
 
 // If set to RETENTION_SOURCE, the option will be omitted from the binary.
+// Note: as of January 2023, support for this is in progress and does not yet
+// have an effect (b/264593489).
 type FieldOptions_OptionRetention int32
 
 const (
@@ -638,7 +640,8 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) {
 
 // This indicates the types of entities that the field may apply to when used
 // as an option. If it is unset, then the field may be freely used as an
-// option on any kind of entity.
+// option on any kind of entity. Note: as of January 2023, support for this is
+// in progress and does not yet have an effect (b/264593489).
 type FieldOptions_OptionTargetType int32
 
 const (
@@ -1205,18 +1208,20 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
 // The protocol compiler can output a FileDescriptorSet containing the .proto
 // files it parses.
 type FileDescriptorSet struct {
-	state           protoimpl.MessageState `protogen:"open.v1"`
-	File            []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
-	extensionFields protoimpl.ExtensionFields
-	unknownFields   protoimpl.UnknownFields
-	sizeCache       protoimpl.SizeCache
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
 }
 
 func (x *FileDescriptorSet) Reset() {
 	*x = FileDescriptorSet{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FileDescriptorSet) String() string {
@@ -1227,7 +1232,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
 
 func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1251,9 +1256,12 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto {
 
 // Describes a complete .proto file.
 type FileDescriptorProto struct {
-	state   protoimpl.MessageState `protogen:"open.v1"`
-	Name    *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`       // file name, relative to root of source tree
-	Package *string                `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`       // file name, relative to root of source tree
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
 	// Names of files imported by this file.
 	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
 	// Indexes of the public imported files in the dependency list above.
@@ -1278,16 +1286,16 @@ type FileDescriptorProto struct {
 	// If `edition` is present, this value must be "editions".
 	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
 	// The edition of the proto file.
-	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 }
 
 func (x *FileDescriptorProto) Reset() {
 	*x = FileDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FileDescriptorProto) String() string {
@@ -1298,7 +1306,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
 
 func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1406,7 +1414,10 @@ func (x *FileDescriptorProto) GetEdition() Edition {
 
 // Describes a message type.
 type DescriptorProto struct {
-	state          protoimpl.MessageState            `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
 	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
@@ -1418,16 +1429,16 @@ type DescriptorProto struct {
 	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved field names, which may not be used by fields in the same message.
 	// A given name may only be reserved once.
-	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
 }
 
 func (x *DescriptorProto) Reset() {
 	*x = DescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *DescriptorProto) String() string {
@@ -1438,7 +1449,7 @@ func (*DescriptorProto) ProtoMessage() {}
 
 func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1524,7 +1535,11 @@ func (x *DescriptorProto) GetReservedName() []string {
 }
 
 type ExtensionRangeOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 	// For external users: DO NOT USE. We are in the process of open sourcing
@@ -1536,10 +1551,7 @@ type ExtensionRangeOptions struct {
 	// The verification state of the range.
 	// TODO: flip the default to DECLARATION once all empty ranges
 	// are marked as UNVERIFIED.
-	Verification    *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
-	extensionFields protoimpl.ExtensionFields
-	unknownFields   protoimpl.UnknownFields
-	sizeCache       protoimpl.SizeCache
+	Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
 }
 
 // Default values for ExtensionRangeOptions fields.
@@ -1549,9 +1561,11 @@ const (
 
 func (x *ExtensionRangeOptions) Reset() {
 	*x = ExtensionRangeOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *ExtensionRangeOptions) String() string {
@@ -1562,7 +1576,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1607,7 +1621,10 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica
 
 // Describes a field within a message.
 type FieldDescriptorProto struct {
-	state  protoimpl.MessageState      `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
 	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
@@ -1659,15 +1676,15 @@ type FieldDescriptorProto struct {
 	// Proto2 optional fields do not set this flag, because they already indicate
 	// optional with `LABEL_OPTIONAL`.
 	Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
-	unknownFields  protoimpl.UnknownFields
-	sizeCache      protoimpl.SizeCache
 }
 
 func (x *FieldDescriptorProto) Reset() {
 	*x = FieldDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FieldDescriptorProto) String() string {
@@ -1678,7 +1695,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
 
 func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1772,18 +1789,21 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool {
 
 // Describes a oneof.
 type OneofDescriptorProto struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	Name          *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Options       *OneofOptions          `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name    *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
 }
 
 func (x *OneofDescriptorProto) Reset() {
 	*x = OneofDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *OneofDescriptorProto) String() string {
@@ -1794,7 +1814,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
 
 func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1825,7 +1845,10 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions {
 
 // Describes an enum type.
 type EnumDescriptorProto struct {
-	state   protoimpl.MessageState      `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
 	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
@@ -1835,16 +1858,16 @@ type EnumDescriptorProto struct {
 	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved enum value names, which may not be reused. A given name may only
 	// be reserved once.
-	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
 }
 
 func (x *EnumDescriptorProto) Reset() {
 	*x = EnumDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *EnumDescriptorProto) String() string {
@@ -1855,7 +1878,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
 
 func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1907,19 +1930,22 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
 
 // Describes a value within an enum.
 type EnumValueDescriptorProto struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	Name          *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Number        *int32                 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
-	Options       *EnumValueOptions      `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name    *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number  *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
 }
 
 func (x *EnumValueDescriptorProto) Reset() {
 	*x = EnumValueDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *EnumValueDescriptorProto) String() string {
@@ -1930,7 +1956,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
 
 func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1968,19 +1994,22 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
 
 // Describes a service.
 type ServiceDescriptorProto struct {
-	state         protoimpl.MessageState   `protogen:"open.v1"`
-	Name          *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Method        []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
-	Options       *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name    *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method  []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
 }
 
 func (x *ServiceDescriptorProto) Reset() {
 	*x = ServiceDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *ServiceDescriptorProto) String() string {
@@ -1991,7 +2020,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
 
 func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2029,8 +2058,11 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions {
 
 // Describes a method of a service.
 type MethodDescriptorProto struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
-	Name  *string                `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
 	// Input and output type names.  These are resolved in the same way as
 	// FieldDescriptorProto.type_name, but must refer to a message type.
 	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
@@ -2040,8 +2072,6 @@ type MethodDescriptorProto struct {
 	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
 	// Identifies if server streams multiple server messages
 	ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
-	unknownFields   protoimpl.UnknownFields
-	sizeCache       protoimpl.SizeCache
 }
 
 // Default values for MethodDescriptorProto fields.
@@ -2052,9 +2082,11 @@ const (
 
 func (x *MethodDescriptorProto) Reset() {
 	*x = MethodDescriptorProto{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *MethodDescriptorProto) String() string {
@@ -2065,7 +2097,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
 
 func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2123,7 +2155,11 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool {
 }
 
 type FileOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Sets the Java package where classes generated from this .proto will be
 	// placed.  By default, the proto package is used, but this is often
 	// inappropriate because proto packages do not normally start with backwards
@@ -2215,9 +2251,6 @@ type FileOptions struct {
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for FileOptions fields.
@@ -2234,9 +2267,11 @@ const (
 
 func (x *FileOptions) Reset() {
 	*x = FileOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FileOptions) String() string {
@@ -2247,7 +2282,7 @@ func (*FileOptions) ProtoMessage() {}
 
 func (x *FileOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2411,7 +2446,11 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type MessageOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Set true to use the old proto1 MessageSet wire format for extensions.
 	// This is provided for backwards-compatibility with the MessageSet wire
 	// format.  You should not use this for any other reason:  It's less
@@ -2484,9 +2523,6 @@ type MessageOptions struct {
 	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for MessageOptions fields.
@@ -2498,9 +2534,11 @@ const (
 
 func (x *MessageOptions) Reset() {
 	*x = MessageOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *MessageOptions) String() string {
@@ -2511,7 +2549,7 @@ func (*MessageOptions) ProtoMessage() {}
 
 func (x *MessageOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2577,14 +2615,17 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type FieldOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
-	// NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead.
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// The ctype option instructs the C++ code generator to use a different
 	// representation of the field than it normally would.  See the specific
 	// options below.  This option is only implemented to support use of
 	// [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of
-	// type "bytes" in the open source release.
-	// TODO: make ctype actually deprecated.
+	// type "bytes" in the open source release -- sorry, we'll try to include
+	// other types in a future version!
 	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
 	// The packed option can be enabled for repeated primitive fields to enable
 	// a more efficient representation on the wire. Rather than repeatedly
@@ -2651,9 +2692,6 @@ type FieldOptions struct {
 	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for FieldOptions fields.
@@ -2669,9 +2707,11 @@ const (
 
 func (x *FieldOptions) Reset() {
 	*x = FieldOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FieldOptions) String() string {
@@ -2682,7 +2722,7 @@ func (*FieldOptions) ProtoMessage() {}
 
 func (x *FieldOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2796,21 +2836,24 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type OneofOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Any features defined in the specific edition.
 	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 func (x *OneofOptions) Reset() {
 	*x = OneofOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *OneofOptions) String() string {
@@ -2821,7 +2864,7 @@ func (*OneofOptions) ProtoMessage() {}
 
 func (x *OneofOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2851,7 +2894,11 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type EnumOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Set this option to true to allow mapping different tag names to the same
 	// value.
 	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
@@ -2873,9 +2920,6 @@ type EnumOptions struct {
 	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for EnumOptions fields.
@@ -2885,9 +2929,11 @@ const (
 
 func (x *EnumOptions) Reset() {
 	*x = EnumOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *EnumOptions) String() string {
@@ -2898,7 +2944,7 @@ func (*EnumOptions) ProtoMessage() {}
 
 func (x *EnumOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2950,7 +2996,11 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type EnumValueOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Is this enum value deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
 	// for the enum value, or it will be completely ignored; in the very least,
@@ -2966,9 +3016,6 @@ type EnumValueOptions struct {
 	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for EnumValueOptions fields.
@@ -2979,9 +3026,11 @@ const (
 
 func (x *EnumValueOptions) Reset() {
 	*x = EnumValueOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *EnumValueOptions) String() string {
@@ -2992,7 +3041,7 @@ func (*EnumValueOptions) ProtoMessage() {}
 
 func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3043,7 +3092,11 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type ServiceOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Any features defined in the specific edition.
 	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
 	// Is this service deprecated?
@@ -3053,9 +3106,6 @@ type ServiceOptions struct {
 	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for ServiceOptions fields.
@@ -3065,9 +3115,11 @@ const (
 
 func (x *ServiceOptions) Reset() {
 	*x = ServiceOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *ServiceOptions) String() string {
@@ -3078,7 +3130,7 @@ func (*ServiceOptions) ProtoMessage() {}
 
 func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3115,7 +3167,11 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
 }
 
 type MethodOptions struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	// Is this method deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
 	// for the method, or it will be completely ignored; in the very least,
@@ -3126,9 +3182,6 @@ type MethodOptions struct {
 	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	extensionFields     protoimpl.ExtensionFields
-	unknownFields       protoimpl.UnknownFields
-	sizeCache           protoimpl.SizeCache
 }
 
 // Default values for MethodOptions fields.
@@ -3139,9 +3192,11 @@ const (
 
 func (x *MethodOptions) Reset() {
 	*x = MethodOptions{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *MethodOptions) String() string {
@@ -3152,7 +3207,7 @@ func (*MethodOptions) ProtoMessage() {}
 
 func (x *MethodOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3202,8 +3257,11 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
 // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
 // in them.
 type UninterpretedOption struct {
-	state protoimpl.MessageState          `protogen:"open.v1"`
-	Name  []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
 	// The value of the uninterpreted option, in whatever type the tokenizer
 	// identified it as during parsing. Exactly one of these should be set.
 	IdentifierValue  *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
@@ -3212,15 +3270,15 @@ type UninterpretedOption struct {
 	DoubleValue      *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
 	StringValue      []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
 	AggregateValue   *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
-	unknownFields    protoimpl.UnknownFields
-	sizeCache        protoimpl.SizeCache
 }
 
 func (x *UninterpretedOption) Reset() {
 	*x = UninterpretedOption{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *UninterpretedOption) String() string {
@@ -3231,7 +3289,7 @@ func (*UninterpretedOption) ProtoMessage() {}
 
 func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3302,23 +3360,26 @@ func (x *UninterpretedOption) GetAggregateValue() string {
 // be designed and implemented to handle this, hopefully before we ever hit a
 // conflict here.
 type FeatureSet struct {
-	state                 protoimpl.MessageState            `protogen:"open.v1"`
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
 	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
 	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
 	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
 	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
 	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
 	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
-	extensionFields       protoimpl.ExtensionFields
-	unknownFields         protoimpl.UnknownFields
-	sizeCache             protoimpl.SizeCache
 }
 
 func (x *FeatureSet) Reset() {
 	*x = FeatureSet{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FeatureSet) String() string {
@@ -3329,7 +3390,7 @@ func (*FeatureSet) ProtoMessage() {}
 
 func (x *FeatureSet) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3391,7 +3452,10 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
 // feature resolution. The resolution with this object becomes a simple search
 // for the closest matching edition, followed by proto merges.
 type FeatureSetDefaults struct {
-	state    protoimpl.MessageState                         `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
 	// The minimum supported edition (inclusive) when this was constructed.
 	// Editions before this will not have defaults.
@@ -3399,15 +3463,15 @@ type FeatureSetDefaults struct {
 	// The maximum known edition (inclusive) when this was constructed. Editions
 	// after this will not have reliable defaults.
 	MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
-	unknownFields  protoimpl.UnknownFields
-	sizeCache      protoimpl.SizeCache
 }
 
 func (x *FeatureSetDefaults) Reset() {
 	*x = FeatureSetDefaults{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FeatureSetDefaults) String() string {
@@ -3418,7 +3482,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
 
 func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3457,7 +3521,10 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
 // Encapsulates information about the original source file from which a
 // FileDescriptorProto was generated.
 type SourceCodeInfo struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// A Location identifies a piece of source code in a .proto file which
 	// corresponds to a particular definition.  This information is intended
 	// to be useful to IDEs, code indexers, documentation generators, and similar
@@ -3506,17 +3573,16 @@ type SourceCodeInfo struct {
 	//   - Code which tries to interpret locations should probably be designed to
 	//     ignore those that it doesn't understand, as more types of locations could
 	//     be recorded in the future.
-	Location        []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
-	extensionFields protoimpl.ExtensionFields
-	unknownFields   protoimpl.UnknownFields
-	sizeCache       protoimpl.SizeCache
+	Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
 }
 
 func (x *SourceCodeInfo) Reset() {
 	*x = SourceCodeInfo{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *SourceCodeInfo) String() string {
@@ -3527,7 +3593,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
 
 func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3553,19 +3619,22 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
 // file. A GeneratedCodeInfo message is associated with only one generated
 // source file, but may contain references to different source .proto files.
 type GeneratedCodeInfo struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// An Annotation connects some span of text in generated code to an element
 	// of its generating .proto file.
-	Annotation    []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
 }
 
 func (x *GeneratedCodeInfo) Reset() {
 	*x = GeneratedCodeInfo{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *GeneratedCodeInfo) String() string {
@@ -3576,7 +3645,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3599,19 +3668,22 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
 }
 
 type DescriptorProto_ExtensionRange struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
-	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
-	Options       *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Start   *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+	End     *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
+	Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
 }
 
 func (x *DescriptorProto_ExtensionRange) Reset() {
 	*x = DescriptorProto_ExtensionRange{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3622,7 +3694,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3662,18 +3734,21 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
 // fields or extension ranges in the same message. Reserved ranges may
 // not overlap.
 type DescriptorProto_ReservedRange struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
-	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+	End   *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Exclusive.
 }
 
 func (x *DescriptorProto_ReservedRange) Reset() {
 	*x = DescriptorProto_ReservedRange{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *DescriptorProto_ReservedRange) String() string {
@@ -3684,7 +3759,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3714,7 +3789,10 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 {
 }
 
 type ExtensionRangeOptions_Declaration struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// The extension number declared within the extension range.
 	Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"`
 	// The fully-qualified name of the extension field. There must be a leading
@@ -3730,16 +3808,16 @@ type ExtensionRangeOptions_Declaration struct {
 	Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"`
 	// If true, indicates that the extension must be defined as repeated.
 	// Otherwise the extension must be defined as optional.
-	Repeated      *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
 }
 
 func (x *ExtensionRangeOptions_Declaration) Reset() {
 	*x = ExtensionRangeOptions_Declaration{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3750,7 +3828,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3807,18 +3885,21 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool {
 // is inclusive such that it can appropriately represent the entire int32
 // domain.
 type EnumDescriptorProto_EnumReservedRange struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	Start         *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
-	End           *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Inclusive.
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+	End   *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`     // Inclusive.
 }
 
 func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
 	*x = EnumDescriptorProto_EnumReservedRange{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3829,7 +3910,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
 
 func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3859,18 +3940,21 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
 }
 
 type FieldOptions_EditionDefault struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	Edition       *Edition               `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
-	Value         *string                `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	Value   *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
 }
 
 func (x *FieldOptions_EditionDefault) Reset() {
 	*x = FieldOptions_EditionDefault{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FieldOptions_EditionDefault) String() string {
@@ -3881,7 +3965,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
 
 func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3912,7 +3996,10 @@ func (x *FieldOptions_EditionDefault) GetValue() string {
 
 // Information about the support window of a feature.
 type FieldOptions_FeatureSupport struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// The edition that this feature was first available in.  In editions
 	// earlier than this one, the default assigned to EDITION_LEGACY will be
 	// used, and proto files will not be able to override it.
@@ -3927,15 +4014,15 @@ type FieldOptions_FeatureSupport struct {
 	// this one, the last default assigned will be used, and proto files will
 	// not be able to override it.
 	EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"`
-	unknownFields  protoimpl.UnknownFields
-	sizeCache      protoimpl.SizeCache
 }
 
 func (x *FieldOptions_FeatureSupport) Reset() {
 	*x = FieldOptions_FeatureSupport{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FieldOptions_FeatureSupport) String() string {
@@ -3946,7 +4033,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {}
 
 func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3995,18 +4082,21 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition {
 // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
 // "foo.(bar.baz).moo".
 type UninterpretedOption_NamePart struct {
-	state         protoimpl.MessageState `protogen:"open.v1"`
-	NamePart      *string                `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
-	IsExtension   *bool                  `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
-	unknownFields protoimpl.UnknownFields
+	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	NamePart    *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension *bool   `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
 }
 
 func (x *UninterpretedOption_NamePart) Reset() {
 	*x = UninterpretedOption_NamePart{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *UninterpretedOption_NamePart) String() string {
@@ -4017,7 +4107,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {}
 
 func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4051,21 +4141,24 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
 // the defaults at the closest matching edition ordered at or before it should
 // be used.  This field must be in strict ascending order by edition.
 type FeatureSetDefaults_FeatureSetEditionDefault struct {
-	state   protoimpl.MessageState `protogen:"open.v1"`
-	Edition *Edition               `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 	// Defaults of features that can be overridden in this edition.
 	OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"`
 	// Defaults of features that can't be overridden in this edition.
 	FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
 }
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4076,7 +4169,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4113,7 +4206,10 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur
 }
 
 type SourceCodeInfo_Location struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// Identifies which part of the FileDescriptorProto was defined at this
 	// location.
 	//
@@ -4205,15 +4301,15 @@ type SourceCodeInfo_Location struct {
 	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
 	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
 	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
-	unknownFields           protoimpl.UnknownFields
-	sizeCache               protoimpl.SizeCache
 }
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *SourceCodeInfo_Location) String() string {
@@ -4224,7 +4320,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4275,7 +4371,10 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
 }
 
 type GeneratedCodeInfo_Annotation struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// Identifies the element in the original source .proto file. This field
 	// is formatted the same as SourceCodeInfo.Location.path.
 	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
@@ -4287,17 +4386,17 @@ type GeneratedCodeInfo_Annotation struct {
 	// Identifies the ending offset in bytes in the generated code that
 	// relates to the identified object. The end offset should be one past
 	// the last relevant byte (so the length of the text = end - begin).
-	End           *int32                                 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
-	Semantic      *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	End      *int32                                 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
 }
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4308,7 +4407,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4364,474 +4463,494 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
 	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
 	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
 	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+	0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
 	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
 	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
 	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
-	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
-	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
-	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
-	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
-	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
-	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
-	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
-	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
-	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
-	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
-	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
-	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
-	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
-	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
-	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
-	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
-	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
-	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
-	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
-	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
+	0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65,
+	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65,
+	0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c,
+	0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20,
+	0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e,
+	0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65,
+	0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e,
+	0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43,
+	0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
 	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
-	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
-	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
-	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
-	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
-	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
-	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
-	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
-	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
-	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
-	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
-	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
-	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
-	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
-	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
-	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
-	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
-	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
-	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
-	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
-	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
-	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
-	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
-	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
-	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
-	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
-	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
+	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
+	0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e,
+	0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+	0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
 	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
 	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
-	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
-	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
-	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
-	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
-	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
-	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
-	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
-	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
-	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
-	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
-	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
-	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
-	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
-	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
-	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
-	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
-	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
-	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
-	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
-	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
-	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
+	0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
+	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+	0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
+	0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
+	0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
+	0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
+	0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
+	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
+	0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
+	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
+	0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
 	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
-	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
-	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
-	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
-	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
-	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
-	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
-	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
-	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
-	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
-	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
-	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
-	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
-	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
-	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
-	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
-	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
-	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
-	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
-	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
-	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
-	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
-	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
-	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
-	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
-	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
-	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
-	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
-	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
-	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
-	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
-	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
-	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
-	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
-	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
-	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
-	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
-	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
-	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
-	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
-	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
-	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
-	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
-	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
-	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
-	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
-	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
-	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
-	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
-	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
-	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
-	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
-	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
-	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
-	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
-	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
-	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
-	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
-	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
-	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
-	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
-	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
-	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
-	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
-	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
-	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
-	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
-	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
-	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
-	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
-	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
-	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
-	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+	0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
+	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
-	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
-	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
-	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
-	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
-	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
-	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
-	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
-	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
-	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
-	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
-	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
-	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
-	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
-	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
-	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
-	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
-	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
-	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
-	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
+	0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+	0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
+	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+	0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
+	0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
+	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
+	0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
+	0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
+	0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
+	0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88,
+	0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c,
+	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c,
+	0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
+	0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73,
+	0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
+	0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
+	0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66,
+	0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b,
+	0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a,
+	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08,
+	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a,
+	0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+	0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+	0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a,
+	0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
+	0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f,
+	0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20,
+	0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
+	0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6,
+	0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a,
+	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a,
+	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a,
+	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12,
+	0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12,
+	0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d,
+	0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12,
+	0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32,
+	0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45,
+	0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49,
+	0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
+	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+	0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e,
+	0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45,
+	0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45,
+	0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14,
+	0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
+	0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+	0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36,
+	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
+	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+	0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+	0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
+	0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e,
+	0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
+	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
+	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
+	0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+	0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01,
+	0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06,
+	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
+	0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68,
+	0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74,
+	0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74,
+	0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74,
+	0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75,
+	0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+	0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
+	0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+	0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
+	0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65,
+	0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+	0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+	0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b,
+	0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50,
+	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f,
+	0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43,
+	0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61,
+	0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18,
+	0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61,
+	0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12,
+	0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
+	0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
+	0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61,
+	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e,
+	0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74,
+	0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18,
+	0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61,
+	0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66,
+	0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f,
+	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f,
+	0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d,
+	0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63,
+	0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61,
+	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01,
+	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e,
+	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15,
+	0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+	0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65,
+	0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12,
+	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47,
+	0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
+	0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
+	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
+	0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
+	0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
+	0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
+	0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
+	0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
+	0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+	0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
+	0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+	0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
+	0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
+	0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
+	0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
+	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
+	0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
+	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
+	0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+	0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
+	0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
+	0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70,
+	0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50,
+	0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49,
+	0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e,
+	0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
+	0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70,
+	0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+	0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14,
+	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f,
+	0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64,
+	0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61,
+	0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+	0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+	0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70,
+	0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61,
+	0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+	0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e,
+	0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73,
+	0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72,
+	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37,
+	0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+	0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
+	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
+	0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04,
+	0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a,
+	0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16,
+	0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06,
+	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65,
+	0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53,
+	0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12,
+	0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e,
+	0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20,
+	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65,
+	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+	0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+	0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a,
+	0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c,
+	0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01,
+	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
+	0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13,
+	0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+	0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a,
+	0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+	0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+	0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+	0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+	0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f,
+	0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+	0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+	0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
+	0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
+	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75,
+	0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65,
+	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a,
+	0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12,
+	0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f,
+	0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
+	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
+	0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11,
+	0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+	0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
+	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69,
+	0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65,
+	0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
+	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+	0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
+	0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
+	0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
+	0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
+	0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
+	0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
+	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
+	0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+	0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
+	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
+	0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
+	0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+	0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+	0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
+	0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
+	0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+	0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
+	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
+	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
+	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
+	0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
+	0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
+	0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+	0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f,
+	0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
 	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
 	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
 	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
 	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
-	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
-	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
-	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
-	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
-	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
-	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
-	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
-	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
-	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
-	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
-	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
-	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
-	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
-	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
-	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
-	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
-	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
-	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
-	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
-	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
-	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
-	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
-	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
-	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
-	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
-	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
-	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
+	0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
+	0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
+	0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
+	0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+	0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+	0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
+	0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
+	0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+	0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+	0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
 	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
 	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
 	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
@@ -4840,296 +4959,274 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
 	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
 	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
 	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
-	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
-	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
-	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
-	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
-	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
-	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
-	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
-	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
-	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
-	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
-	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
-	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02,
+	0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
 	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
 	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
-	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
-	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
-	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
-	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
-	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
-	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61,
+	0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
+	0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f,
+	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18,
+	0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70,
+	0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70,
+	0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
 	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
 	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
 	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
 	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
-	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
-	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
-	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
-	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
-	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
-	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
-	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
-	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
-	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
-	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
-	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
-	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
-	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
-	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
-	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
-	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
-	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08,
+	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
-	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
-	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
-	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
-	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
-	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
-	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
-	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
-	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
-	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
-	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
-	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
-	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
-	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
-	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
-	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
-	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
-	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
-	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
-	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
-	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
-	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
-	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
-	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
-	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
-	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
-	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
-	0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
-	0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
-	0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
-	0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
-	0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
-	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
-	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
-	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
-	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
-	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
-	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
-	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
-	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
-	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
-	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
-	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
-	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
-	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
-	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
-	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
-	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
-	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
-	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
-	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
-	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
-	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
-	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+	0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
+	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75,
+	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02,
+	0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65,
+	0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79,
+	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
+	0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d,
+	0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08,
+	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
-	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
-	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
-	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
-	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
-	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
-	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
-	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
-	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
-	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
-	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
-	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
-	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
-	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
-	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
-	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
-	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
-	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
-	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
-	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
-	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
-	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
-	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
-	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
-	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
-	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
-	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
-	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
-	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
-	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
-	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
-	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
-	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
-	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
-	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
-	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
-	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
-	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
-	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
-	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
-	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
-	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
-	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
-	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
-	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
-	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
+	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+	0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
+	0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
+	0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f,
+	0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10,
+	0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10,
+	0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a,
+	0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
+	0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74,
+	0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69,
+	0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10,
+	0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
+	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65,
+	0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21,
+	0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+	0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61,
+	0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a,
+	0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
+	0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61,
+	0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73,
+	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01,
+	0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c,
+	0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c,
+	0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c,
+	0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66,
+	0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09,
+	0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75,
+	0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01,
+	0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01,
+	0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07,
+	0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72,
+	0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e,
+	0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74,
+	0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42,
+	0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
+	0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50,
+	0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15,
+	0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63,
+	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61,
+	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38,
+	0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98,
+	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6,
+	0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2,
+	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73,
+	0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01,
+	0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47,
+	0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01,
+	0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
+	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01,
+	0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53,
+	0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05,
+	0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a,
+	0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46,
+	0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49,
+	0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49,
+	0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45,
+	0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d,
+	0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50,
+	0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f,
+	0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10,
+	0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65,
+	0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45,
+	0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43,
+	0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+	0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45,
+	0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66,
+	0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55,
+	0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55,
+	0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49,
+	0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04,
+	0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45,
+	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41,
+	0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
+	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f,
+	0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45,
+	0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f,
+	0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f,
+	0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
+	0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c,
+	0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52,
+	0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e,
+	0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07,
+	0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+	0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65,
+	0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+	0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f,
+	0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
+	0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d,
+	0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e,
+	0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69,
+	0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
+	0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
+	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f,
+	0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61,
+	0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66,
+	0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+	0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a,
+	0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+	0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75,
+	0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce,
+	0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70,
+	0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70,
+	0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28,
+	0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c,
+	0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f,
+	0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
+	0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65,
+	0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64,
+	0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73,
+	0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44,
+	0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22,
+	0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64,
+	0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65,
+	0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e,
+	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28,
+	0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05,
+	0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67,
+	0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52,
+	0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+	0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08,
+	0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61,
+	0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07,
+	0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53,
+	0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13,
+	0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c,
+	0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54,
+	0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a,
+	0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10,
+	0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30,
+	0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+	0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54,
+	0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
+	0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54,
+	0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44,
+	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54,
+	0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49,
+	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
+	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54,
+	0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f,
+	0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49,
+	0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13,
+	0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa,
+	0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
 }
 
 var (
@@ -5288,6 +5385,424 @@ func file_google_protobuf_descriptor_proto_init() {
 	if File_google_protobuf_descriptor_proto != nil {
 		return
 	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*FileDescriptorSet); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any {
+			switch v := v.(*FileDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any {
+			switch v := v.(*DescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any {
+			switch v := v.(*ExtensionRangeOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any {
+			switch v := v.(*FieldDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any {
+			switch v := v.(*OneofDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any {
+			switch v := v.(*EnumDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any {
+			switch v := v.(*EnumValueDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any {
+			switch v := v.(*ServiceDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any {
+			switch v := v.(*MethodDescriptorProto); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any {
+			switch v := v.(*FileOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any {
+			switch v := v.(*MessageOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any {
+			switch v := v.(*FieldOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any {
+			switch v := v.(*OneofOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any {
+			switch v := v.(*EnumOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any {
+			switch v := v.(*EnumValueOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any {
+			switch v := v.(*ServiceOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any {
+			switch v := v.(*MethodOptions); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any {
+			switch v := v.(*UninterpretedOption); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any {
+			switch v := v.(*FeatureSet); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any {
+			switch v := v.(*FeatureSetDefaults); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any {
+			switch v := v.(*SourceCodeInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any {
+			switch v := v.(*GeneratedCodeInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any {
+			switch v := v.(*DescriptorProto_ExtensionRange); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any {
+			switch v := v.(*DescriptorProto_ReservedRange); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any {
+			switch v := v.(*ExtensionRangeOptions_Declaration); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any {
+			switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any {
+			switch v := v.(*FieldOptions_EditionDefault); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any {
+			switch v := v.(*FieldOptions_FeatureSupport); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any {
+			switch v := v.(*UninterpretedOption_NamePart); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any {
+			switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any {
+			switch v := v.(*SourceCodeInfo_Location); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any {
+			switch v := v.(*GeneratedCodeInfo_Annotation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index e0b72eaf..a2ca940c 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -18,150 +18,22 @@ import (
 	sync "sync"
 )
 
-type GoFeatures_APILevel int32
-
-const (
-	// API_LEVEL_UNSPECIFIED results in selecting the OPEN API,
-	// but needs to be a separate value to distinguish between
-	// an explicitly set api level or a missing api level.
-	GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0
-	GoFeatures_API_OPEN              GoFeatures_APILevel = 1
-	GoFeatures_API_HYBRID            GoFeatures_APILevel = 2
-	GoFeatures_API_OPAQUE            GoFeatures_APILevel = 3
-)
-
-// Enum value maps for GoFeatures_APILevel.
-var (
-	GoFeatures_APILevel_name = map[int32]string{
-		0: "API_LEVEL_UNSPECIFIED",
-		1: "API_OPEN",
-		2: "API_HYBRID",
-		3: "API_OPAQUE",
-	}
-	GoFeatures_APILevel_value = map[string]int32{
-		"API_LEVEL_UNSPECIFIED": 0,
-		"API_OPEN":              1,
-		"API_HYBRID":            2,
-		"API_OPAQUE":            3,
-	}
-)
-
-func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel {
-	p := new(GoFeatures_APILevel)
-	*p = x
-	return p
-}
-
-func (x GoFeatures_APILevel) String() string {
-	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
-}
-
-func (GoFeatures_APILevel) Type() protoreflect.EnumType {
-	return &file_google_protobuf_go_features_proto_enumTypes[0]
-}
-
-func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber {
-	return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error {
-	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
-	if err != nil {
-		return err
-	}
-	*x = GoFeatures_APILevel(num)
-	return nil
-}
-
-// Deprecated: Use GoFeatures_APILevel.Descriptor instead.
-func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) {
-	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
-}
-
-type GoFeatures_StripEnumPrefix int32
-
-const (
-	GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED   GoFeatures_StripEnumPrefix = 0
-	GoFeatures_STRIP_ENUM_PREFIX_KEEP          GoFeatures_StripEnumPrefix = 1
-	GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
-	GoFeatures_STRIP_ENUM_PREFIX_STRIP         GoFeatures_StripEnumPrefix = 3
-)
-
-// Enum value maps for GoFeatures_StripEnumPrefix.
-var (
-	GoFeatures_StripEnumPrefix_name = map[int32]string{
-		0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
-		1: "STRIP_ENUM_PREFIX_KEEP",
-		2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
-		3: "STRIP_ENUM_PREFIX_STRIP",
-	}
-	GoFeatures_StripEnumPrefix_value = map[string]int32{
-		"STRIP_ENUM_PREFIX_UNSPECIFIED":   0,
-		"STRIP_ENUM_PREFIX_KEEP":          1,
-		"STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
-		"STRIP_ENUM_PREFIX_STRIP":         3,
-	}
-)
-
-func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
-	p := new(GoFeatures_StripEnumPrefix)
-	*p = x
-	return p
-}
-
-func (x GoFeatures_StripEnumPrefix) String() string {
-	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor()
-}
-
-func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
-	return &file_google_protobuf_go_features_proto_enumTypes[1]
-}
-
-func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
-	return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
-	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
-	if err != nil {
-		return err
-	}
-	*x = GoFeatures_StripEnumPrefix(num)
-	return nil
-}
-
-// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
-func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
-	return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1}
-}
-
 type GoFeatures struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// Whether or not to generate the deprecated UnmarshalJSON method for enums.
-	// Can only be true for proto using the Open Struct api.
 	LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
-	// One of OPEN, HYBRID or OPAQUE.
-	ApiLevel        *GoFeatures_APILevel        `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"`
-	StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
-	unknownFields   protoimpl.UnknownFields
-	sizeCache       protoimpl.SizeCache
 }
 
 func (x *GoFeatures) Reset() {
 	*x = GoFeatures{}
-	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *GoFeatures) String() string {
@@ -172,7 +44,7 @@ func (*GoFeatures) ProtoMessage() {}
 
 func (x *GoFeatures) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -194,20 +66,6 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
 	return false
 }
 
-func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel {
-	if x != nil && x.ApiLevel != nil {
-		return *x.ApiLevel
-	}
-	return GoFeatures_API_LEVEL_UNSPECIFIED
-}
-
-func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
-	if x != nil && x.StripEnumPrefix != nil {
-		return *x.StripEnumPrefix
-	}
-	return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
-}
-
 var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
 	{
 		ExtendedType:  (*descriptorpb.FeatureSet)(nil),
@@ -232,7 +90,7 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{
 	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
 	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
+	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f,
 	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
 	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
 	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
@@ -245,44 +103,14 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{
 	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
 	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
 	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
-	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
-	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
-	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
-	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
-	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
-	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
-	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
-	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
-	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
-	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
-	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
-	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
-	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
-	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
-	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
-	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
-	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
-	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
-	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
-	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
-	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
-	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
-	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
-	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
+	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12,
+	0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
 }
 
 var (
@@ -297,24 +125,19 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
 	return file_google_protobuf_go_features_proto_rawDescData
 }
 
-var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
 var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
 var file_google_protobuf_go_features_proto_goTypes = []any{
-	(GoFeatures_APILevel)(0),        // 0: pb.GoFeatures.APILevel
-	(GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix
-	(*GoFeatures)(nil),              // 2: pb.GoFeatures
-	(*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet
+	(*GoFeatures)(nil),              // 0: pb.GoFeatures
+	(*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
 }
 var file_google_protobuf_go_features_proto_depIdxs = []int32{
-	0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel
-	1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
-	3, // 2: pb.go:extendee -> google.protobuf.FeatureSet
-	2, // 3: pb.go:type_name -> pb.GoFeatures
-	4, // [4:4] is the sub-list for method output_type
-	4, // [4:4] is the sub-list for method input_type
-	3, // [3:4] is the sub-list for extension type_name
-	2, // [2:3] is the sub-list for extension extendee
-	0, // [0:2] is the sub-list for field type_name
+	1, // 0: pb.go:extendee -> google.protobuf.FeatureSet
+	0, // 1: pb.go:type_name -> pb.GoFeatures
+	2, // [2:2] is the sub-list for method output_type
+	2, // [2:2] is the sub-list for method input_type
+	1, // [1:2] is the sub-list for extension type_name
+	0, // [0:1] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_go_features_proto_init() }
@@ -322,19 +145,32 @@ func file_google_protobuf_go_features_proto_init() {
 	if File_google_protobuf_go_features_proto != nil {
 		return
 	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*GoFeatures); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
-			NumEnums:      2,
+			NumEnums:      0,
 			NumMessages:   1,
 			NumExtensions: 1,
 			NumServices:   0,
 		},
 		GoTypes:           file_google_protobuf_go_features_proto_goTypes,
 		DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
-		EnumInfos:         file_google_protobuf_go_features_proto_enumTypes,
 		MessageInfos:      file_google_protobuf_go_features_proto_msgTypes,
 		ExtensionInfos:    file_google_protobuf_go_features_proto_extTypes,
 	}.Build()
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 191552cc..7172b43d 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -210,7 +210,10 @@ import (
 //	  "value": "1.212s"
 //	}
 type Any struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// A URL/resource name that uniquely identifies the type of the serialized
 	// protocol buffer message. This string must contain at least
 	// one "/" character. The last segment of the URL's path must represent
@@ -241,9 +244,7 @@ type Any struct {
 	// used with implementation specific semantics.
 	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
 	// Must be a valid serialized protocol buffer of the above specified type.
-	Value         []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
 }
 
 // New marshals src into a new Any instance.
@@ -367,9 +368,11 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
 
 func (x *Any) Reset() {
 	*x = Any{}
-	mi := &file_google_protobuf_any_proto_msgTypes[0]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_any_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *Any) String() string {
@@ -380,7 +383,7 @@ func (*Any) ProtoMessage() {}
 
 func (x *Any) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_any_proto_msgTypes[0]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -458,6 +461,20 @@ func file_google_protobuf_any_proto_init() {
 	if File_google_protobuf_any_proto != nil {
 		return
 	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*Any); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 34d76e6c..1b71bcd9 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -141,7 +141,10 @@ import (
 // be expressed in JSON format as "3.000000001s", and 3 seconds and 1
 // microsecond should be expressed in JSON format as "3.000001s".
 type Duration struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// Signed seconds of the span of time. Must be from -315,576,000,000
 	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
 	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
@@ -152,9 +155,7 @@ type Duration struct {
 	// of one second or more, a non-zero value for the `nanos` field must be
 	// of the same sign as the `seconds` field. Must be from -999,999,999
 	// to +999,999,999 inclusive.
-	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
 }
 
 // New constructs a new Duration from the provided time.Duration.
@@ -244,9 +245,11 @@ func (x *Duration) check() uint {
 
 func (x *Duration) Reset() {
 	*x = Duration{}
-	mi := &file_google_protobuf_duration_proto_msgTypes[0]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_duration_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *Duration) String() string {
@@ -257,7 +260,7 @@ func (*Duration) ProtoMessage() {}
 
 func (x *Duration) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_duration_proto_msgTypes[0]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -336,6 +339,20 @@ func file_google_protobuf_duration_proto_init() {
 	if File_google_protobuf_duration_proto != nil {
 		return
 	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*Duration); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 9550109a..83a5a645 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -170,7 +170,10 @@ import (
 // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
 // ) to obtain a formatter capable of generating timestamps in this format.
 type Timestamp struct {
-	state protoimpl.MessageState `protogen:"open.v1"`
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
 	// Represents seconds of UTC time since Unix epoch
 	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
 	// 9999-12-31T23:59:59Z inclusive.
@@ -179,9 +182,7 @@ type Timestamp struct {
 	// second values with fractions must still have non-negative nanos values
 	// that count forward in time. Must be from 0 to 999,999,999
 	// inclusive.
-	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-	unknownFields protoimpl.UnknownFields
-	sizeCache     protoimpl.SizeCache
+	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
 }
 
 // Now constructs a new Timestamp from the current time.
@@ -253,9 +254,11 @@ func (x *Timestamp) check() uint {
 
 func (x *Timestamp) Reset() {
 	*x = Timestamp{}
-	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
-	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-	ms.StoreMessageInfo(mi)
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
 }
 
 func (x *Timestamp) String() string {
@@ -266,7 +269,7 @@ func (*Timestamp) ProtoMessage() {}
 
 func (x *Timestamp) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
-	if x != nil {
+	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -345,6 +348,20 @@ func file_google_protobuf_timestamp_proto_init() {
 	if File_google_protobuf_timestamp_proto != nil {
 		return
 	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*Timestamp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index be0766c7..203481eb 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -15,9 +15,9 @@ github.com/davecgh/go-spew/spew
 # github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
 ## explicit
 github.com/dchest/safefile
-# github.com/go-task/slim-sprig/v3 v3.0.0
-## explicit; go 1.20
-github.com/go-task/slim-sprig/v3
+# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
+## explicit; go 1.13
+github.com/go-task/slim-sprig
 # github.com/golang/protobuf v1.5.3
 ## explicit; go 1.9
 github.com/golang/protobuf/proto
@@ -25,8 +25,8 @@ github.com/golang/protobuf/ptypes
 github.com/golang/protobuf/ptypes/any
 github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/timestamp
-# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad
-## explicit; go 1.22
+# github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38
+## explicit; go 1.14
 github.com/google/pprof/profile
 # github.com/hashicorp/go-immutable-radix v1.3.1
 ## explicit
@@ -34,8 +34,8 @@ github.com/hashicorp/go-immutable-radix
 # github.com/hashicorp/go-syslog v1.0.0
 ## explicit
 github.com/hashicorp/go-syslog
-# github.com/hashicorp/golang-lru v1.0.2
-## explicit; go 1.12
+# github.com/hashicorp/golang-lru v0.5.0
+## explicit
 github.com/hashicorp/golang-lru/simplelru
 # github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
 ## explicit; go 1.12
@@ -68,11 +68,11 @@ github.com/kardianos/service
 # github.com/lifenjoiner/dhcpdns v0.0.6
 ## explicit; go 1.20
 github.com/lifenjoiner/dhcpdns
-# github.com/miekg/dns v1.1.62
+# github.com/miekg/dns v1.1.63
 ## explicit; go 1.19
 github.com/miekg/dns
-# github.com/onsi/ginkgo/v2 v2.22.2
-## explicit; go 1.22.0
+# github.com/onsi/ginkgo/v2 v2.9.5
+## explicit; go 1.18
 github.com/onsi/ginkgo/v2/config
 github.com/onsi/ginkgo/v2/formatter
 github.com/onsi/ginkgo/v2/ginkgo
@@ -107,7 +107,7 @@ github.com/powerman/deepequal
 # github.com/quic-go/qpack v0.5.1
 ## explicit; go 1.22
 github.com/quic-go/qpack
-# github.com/quic-go/quic-go v0.48.2
+# github.com/quic-go/quic-go v0.49.0
 ## explicit; go 1.22
 github.com/quic-go/quic-go
 github.com/quic-go/quic-go/http3
@@ -146,11 +146,11 @@ golang.org/x/crypto/nacl/box
 golang.org/x/crypto/nacl/secretbox
 golang.org/x/crypto/poly1305
 golang.org/x/crypto/salsa20/salsa
-# golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
-## explicit; go 1.22.0
+# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
+## explicit; go 1.20
 golang.org/x/exp/rand
-# golang.org/x/mod v0.22.0
-## explicit; go 1.22.0
+# golang.org/x/mod v0.18.0
+## explicit; go 1.18
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
@@ -186,15 +186,14 @@ golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.29.0
-## explicit; go 1.22.0
-golang.org/x/tools/cover
+# golang.org/x/tools v0.22.0
+## explicit; go 1.19
 golang.org/x/tools/go/ast/astutil
 golang.org/x/tools/go/ast/inspector
 golang.org/x/tools/go/gcexportdata
+golang.org/x/tools/go/internal/packagesdriver
 golang.org/x/tools/go/packages
 golang.org/x/tools/go/types/objectpath
-golang.org/x/tools/go/types/typeutil
 golang.org/x/tools/imports
 golang.org/x/tools/internal/aliases
 golang.org/x/tools/internal/event
@@ -205,11 +204,10 @@ golang.org/x/tools/internal/gcimporter
 golang.org/x/tools/internal/gocommand
 golang.org/x/tools/internal/gopathwalk
 golang.org/x/tools/internal/imports
-golang.org/x/tools/internal/modindex
 golang.org/x/tools/internal/packagesinternal
 golang.org/x/tools/internal/pkgbits
 golang.org/x/tools/internal/stdlib
-golang.org/x/tools/internal/typeparams
+golang.org/x/tools/internal/tokeninternal
 golang.org/x/tools/internal/typesinternal
 golang.org/x/tools/internal/versions
 # google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
@@ -229,8 +227,8 @@ google.golang.org/grpc/internal/status
 google.golang.org/grpc/resolver
 google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/status
-# google.golang.org/protobuf v1.36.1
-## explicit; go 1.21
+# google.golang.org/protobuf v1.34.2
+## explicit; go 1.20
 google.golang.org/protobuf/encoding/prototext
 google.golang.org/protobuf/encoding/protowire
 google.golang.org/protobuf/internal/descfmt
@@ -250,7 +248,6 @@ google.golang.org/protobuf/internal/genid
 google.golang.org/protobuf/internal/impl
 google.golang.org/protobuf/internal/order
 google.golang.org/protobuf/internal/pragma
-google.golang.org/protobuf/internal/protolazy
 google.golang.org/protobuf/internal/set
 google.golang.org/protobuf/internal/strs
 google.golang.org/protobuf/internal/version

From 549426ace7c4ce3ac4619af379906bb28fdb7d72 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Sun, 26 Jan 2025 23:59:36 +0100
Subject: [PATCH 17/48] Reverse test for clarity; no behavioral change

---
 dnscrypt-proxy/xtransport.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 39186f8f..80402746 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -236,7 +236,7 @@ func (xTransport *XTransport) rebuildTransport() {
 					continue
 				}
 				for _, supportedVersion := range suite.SupportedVersions {
-					if supportedVersion != tls.VersionTLS13 {
+					if supportedVersion == tls.VersionTLS12 {
 						for _, expectedSuiteID := range xTransport.tlsCipherSuite {
 							if expectedSuiteID == suite.ID {
 								compatibleSuitesCount += 1

From 80ccb1696abd364cc38fa260cae78e7e9d43a57e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 28 Jan 2025 03:34:52 +0000
Subject: [PATCH 18/48] Bump softprops/action-gh-release

Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 1fd6f3f2b1b859145679a1892abda586ef982a84 to 51cfd90a6d81cfe329568f851fe2236ab4416d17.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/1fd6f3f2b1b859145679a1892abda586ef982a84...51cfd90a6d81cfe329568f851fe2236ab4416d17)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/releases.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index a0d8959b..09e76936 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -83,7 +83,7 @@ jobs:
           prerelease: false
 
       - name: Upload release assets
-        uses: softprops/action-gh-release@1fd6f3f2b1b859145679a1892abda586ef982a84
+        uses: softprops/action-gh-release@51cfd90a6d81cfe329568f851fe2236ab4416d17
         if: startsWith(github.ref, 'refs/tags/')
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

From f2527886ccb06a371b121ecc699836a32024e3c8 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Sun, 2 Feb 2025 23:10:00 +0100
Subject: [PATCH 19/48] Clarify example

---
 dnscrypt-proxy/example-blocked-ips.txt | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/example-blocked-ips.txt b/dnscrypt-proxy/example-blocked-ips.txt
index c72d8f50..5c1b8957 100644
--- a/dnscrypt-proxy/example-blocked-ips.txt
+++ b/dnscrypt-proxy/example-blocked-ips.txt
@@ -2,7 +2,8 @@
 #        IP blocklist        #
 ##############################
 
-## Rules for IP-based response blocking
+## Rules for blocking DNS resposnes if they contain
+## IP addresses matching patterns.
 ##
 ## Sample feeds of suspect IP addresses:
 ## - https://github.com/stamparm/ipsum

From 09b0ddd96c6d0ad94562fc72ac9f3760cf21e67d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 5 Feb 2025 03:53:42 +0000
Subject: [PATCH 20/48] Bump softprops/action-gh-release

Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 51cfd90a6d81cfe329568f851fe2236ab4416d17 to a92196038060e0c673cf3ba5b98710dd716731df.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/51cfd90a6d81cfe329568f851fe2236ab4416d17...a92196038060e0c673cf3ba5b98710dd716731df)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/releases.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index 09e76936..4be08d84 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -83,7 +83,7 @@ jobs:
           prerelease: false
 
       - name: Upload release assets
-        uses: softprops/action-gh-release@51cfd90a6d81cfe329568f851fe2236ab4416d17
+        uses: softprops/action-gh-release@a92196038060e0c673cf3ba5b98710dd716731df
         if: startsWith(github.ref, 'refs/tags/')
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

From fbbf90b7025720951ad7ef0cac603d4fc8478bc6 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Thu, 6 Feb 2025 12:00:47 +0100
Subject: [PATCH 21/48] U1;10;0cpdate deps

---
 go.mod                                        |   2 +-
 go.sum                                        |   4 +-
 vendor/golang.org/x/sys/cpu/cpu.go            |   3 +
 vendor/golang.org/x/sys/cpu/cpu_x86.go        |  21 +++-
 vendor/golang.org/x/sys/unix/auxv.go          |  36 ++++++
 .../golang.org/x/sys/unix/auxv_unsupported.go |  13 ++
 .../golang.org/x/sys/unix/syscall_solaris.go  |  87 +++++++++++++
 vendor/golang.org/x/sys/unix/zerrors_linux.go |  20 ++-
 .../x/sys/unix/zerrors_linux_386.go           |   3 +
 .../x/sys/unix/zerrors_linux_amd64.go         |   3 +
 .../x/sys/unix/zerrors_linux_arm.go           |   3 +
 .../x/sys/unix/zerrors_linux_arm64.go         |   4 +
 .../x/sys/unix/zerrors_linux_loong64.go       |   3 +
 .../x/sys/unix/zerrors_linux_mips.go          |   3 +
 .../x/sys/unix/zerrors_linux_mips64.go        |   3 +
 .../x/sys/unix/zerrors_linux_mips64le.go      |   3 +
 .../x/sys/unix/zerrors_linux_mipsle.go        |   3 +
 .../x/sys/unix/zerrors_linux_ppc.go           |   3 +
 .../x/sys/unix/zerrors_linux_ppc64.go         |   3 +
 .../x/sys/unix/zerrors_linux_ppc64le.go       |   3 +
 .../x/sys/unix/zerrors_linux_riscv64.go       |   3 +
 .../x/sys/unix/zerrors_linux_s390x.go         |   3 +
 .../x/sys/unix/zerrors_linux_sparc64.go       |   3 +
 .../x/sys/unix/zsyscall_solaris_amd64.go      | 114 ++++++++++++++++++
 .../x/sys/unix/zsysnum_linux_386.go           |   4 +
 .../x/sys/unix/zsysnum_linux_amd64.go         |   4 +
 .../x/sys/unix/zsysnum_linux_arm.go           |   4 +
 .../x/sys/unix/zsysnum_linux_arm64.go         |   4 +
 .../x/sys/unix/zsysnum_linux_loong64.go       |   4 +
 .../x/sys/unix/zsysnum_linux_mips.go          |   4 +
 .../x/sys/unix/zsysnum_linux_mips64.go        |   4 +
 .../x/sys/unix/zsysnum_linux_mips64le.go      |   4 +
 .../x/sys/unix/zsysnum_linux_mipsle.go        |   4 +
 .../x/sys/unix/zsysnum_linux_ppc.go           |   4 +
 .../x/sys/unix/zsysnum_linux_ppc64.go         |   4 +
 .../x/sys/unix/zsysnum_linux_ppc64le.go       |   4 +
 .../x/sys/unix/zsysnum_linux_riscv64.go       |   4 +
 .../x/sys/unix/zsysnum_linux_s390x.go         |   4 +
 .../x/sys/unix/zsysnum_linux_sparc64.go       |   4 +
 vendor/golang.org/x/sys/unix/ztypes_linux.go  |   6 +-
 vendor/modules.txt                            |   2 +-
 41 files changed, 403 insertions(+), 11 deletions(-)
 create mode 100644 vendor/golang.org/x/sys/unix/auxv.go
 create mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go

diff --git a/go.mod b/go.mod
index 0e97f22c..34e6dfc8 100644
--- a/go.mod
+++ b/go.mod
@@ -24,7 +24,7 @@ require (
 	github.com/quic-go/quic-go v0.49.0
 	golang.org/x/crypto v0.32.0
 	golang.org/x/net v0.34.0
-	golang.org/x/sys v0.29.0
+	golang.org/x/sys v0.30.0
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1
 )
 
diff --git a/go.sum b/go.sum
index e2dcddc8..c78b1206 100644
--- a/go.sum
+++ b/go.sum
@@ -100,8 +100,8 @@ golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
-golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
 golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index 02609d5b..9c105f23 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -72,6 +72,9 @@ var X86 struct {
 	HasSSSE3            bool // Supplemental streaming SIMD extension 3
 	HasSSE41            bool // Streaming SIMD extension 4 and 4.1
 	HasSSE42            bool // Streaming SIMD extension 4 and 4.2
+	HasAVXIFMA          bool // Advanced vector extension Integer Fused Multiply Add
+	HasAVXVNNI          bool // Advanced vector extension Vector Neural Network Instructions
+	HasAVXVNNIInt8      bool // Advanced vector extension Vector Neural Network Int8 instructions
 	_                   CacheLinePad
 }
 
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index 600a6807..1e642f33 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -53,6 +53,9 @@ func initOptions() {
 		{Name: "sse41", Feature: &X86.HasSSE41},
 		{Name: "sse42", Feature: &X86.HasSSE42},
 		{Name: "ssse3", Feature: &X86.HasSSSE3},
+		{Name: "avxifma", Feature: &X86.HasAVXIFMA},
+		{Name: "avxvnni", Feature: &X86.HasAVXVNNI},
+		{Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
 
 		// These capabilities should always be enabled on amd64:
 		{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
@@ -106,7 +109,7 @@ func archInit() {
 		return
 	}
 
-	_, ebx7, ecx7, edx7 := cpuid(7, 0)
+	eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
 	X86.HasBMI1 = isSet(3, ebx7)
 	X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
 	X86.HasBMI2 = isSet(8, ebx7)
@@ -134,14 +137,24 @@ func archInit() {
 		X86.HasAVX512VAES = isSet(9, ecx7)
 		X86.HasAVX512VBMI2 = isSet(6, ecx7)
 		X86.HasAVX512BITALG = isSet(12, ecx7)
-
-		eax71, _, _, _ := cpuid(7, 1)
-		X86.HasAVX512BF16 = isSet(5, eax71)
 	}
 
 	X86.HasAMXTile = isSet(24, edx7)
 	X86.HasAMXInt8 = isSet(25, edx7)
 	X86.HasAMXBF16 = isSet(22, edx7)
+
+	// These features depend on the second level of extended features.
+	if eax7 >= 1 {
+		eax71, _, _, edx71 := cpuid(7, 1)
+		if X86.HasAVX512 {
+			X86.HasAVX512BF16 = isSet(5, eax71)
+		}
+		if X86.HasAVX {
+			X86.HasAVXIFMA = isSet(23, eax71)
+			X86.HasAVXVNNI = isSet(4, eax71)
+			X86.HasAVXVNNIInt8 = isSet(4, edx71)
+		}
+	}
 }
 
 func isSet(bitpos uint, value uint32) bool {
diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go
new file mode 100644
index 00000000..37a82528
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
+// The returned slice is always a fresh copy, owned by the caller.
+// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
+// which happens in some locked-down environments and build modes.
+func Auxv() ([][2]uintptr, error) {
+	vec := runtime_getAuxv()
+	vecLen := len(vec)
+
+	if vecLen == 0 {
+		return nil, syscall.ENOENT
+	}
+
+	if vecLen%2 != 0 {
+		return nil, syscall.EINVAL
+	}
+
+	result := make([]uintptr, vecLen)
+	copy(result, vec)
+	return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
new file mode 100644
index 00000000..1200487f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import "syscall"
+
+func Auxv() ([][2]uintptr, error) {
+	return nil, syscall.ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 21974af0..abc39554 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
 func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
 	return ioctlPtrRet(fd, req, unsafe.Pointer(s))
 }
+
+// Ucred Helpers
+// See ucred(3c) and getpeerucred(3c)
+
+//sys	getpeerucred(fd uintptr, ucred *uintptr) (err error)
+//sys	ucredFree(ucred uintptr) = ucred_free
+//sys	ucredGet(pid int) (ucred uintptr, err error) = ucred_get
+//sys	ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
+//sys	ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
+//sys	ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
+//sys	ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
+//sys	ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
+//sys	ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
+//sys	ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
+
+// Ucred is an opaque struct that holds user credentials.
+type Ucred struct {
+	ucred uintptr
+}
+
+// We need to ensure that ucredFree is called on the underlying ucred
+// when the Ucred is garbage collected.
+func ucredFinalizer(u *Ucred) {
+	ucredFree(u.ucred)
+}
+
+func GetPeerUcred(fd uintptr) (*Ucred, error) {
+	var ucred uintptr
+	err := getpeerucred(fd, &ucred)
+	if err != nil {
+		return nil, err
+	}
+	result := &Ucred{
+		ucred: ucred,
+	}
+	// set the finalizer on the result so that the ucred will be freed
+	runtime.SetFinalizer(result, ucredFinalizer)
+	return result, nil
+}
+
+func UcredGet(pid int) (*Ucred, error) {
+	ucred, err := ucredGet(pid)
+	if err != nil {
+		return nil, err
+	}
+	result := &Ucred{
+		ucred: ucred,
+	}
+	// set the finalizer on the result so that the ucred will be freed
+	runtime.SetFinalizer(result, ucredFinalizer)
+	return result, nil
+}
+
+func (u *Ucred) Geteuid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGeteuid(u.ucred)
+}
+
+func (u *Ucred) Getruid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetruid(u.ucred)
+}
+
+func (u *Ucred) Getsuid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetsuid(u.ucred)
+}
+
+func (u *Ucred) Getegid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetegid(u.ucred)
+}
+
+func (u *Ucred) Getrgid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetrgid(u.ucred)
+}
+
+func (u *Ucred) Getsgid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetsgid(u.ucred)
+}
+
+func (u *Ucred) Getpid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetpid(u.ucred)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 6ebc48b3..4f432bfe 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1245,6 +1245,7 @@ const (
 	FAN_REPORT_DFID_NAME                        = 0xc00
 	FAN_REPORT_DFID_NAME_TARGET                 = 0x1e00
 	FAN_REPORT_DIR_FID                          = 0x400
+	FAN_REPORT_FD_ERROR                         = 0x2000
 	FAN_REPORT_FID                              = 0x200
 	FAN_REPORT_NAME                             = 0x800
 	FAN_REPORT_PIDFD                            = 0x80
@@ -1330,8 +1331,10 @@ const (
 	FUSE_SUPER_MAGIC                            = 0x65735546
 	FUTEXFS_SUPER_MAGIC                         = 0xbad1dea
 	F_ADD_SEALS                                 = 0x409
+	F_CREATED_QUERY                             = 0x404
 	F_DUPFD                                     = 0x0
 	F_DUPFD_CLOEXEC                             = 0x406
+	F_DUPFD_QUERY                               = 0x403
 	F_EXLCK                                     = 0x4
 	F_GETFD                                     = 0x1
 	F_GETFL                                     = 0x3
@@ -1551,6 +1554,7 @@ const (
 	IPPROTO_ROUTING                             = 0x2b
 	IPPROTO_RSVP                                = 0x2e
 	IPPROTO_SCTP                                = 0x84
+	IPPROTO_SMC                                 = 0x100
 	IPPROTO_TCP                                 = 0x6
 	IPPROTO_TP                                  = 0x1d
 	IPPROTO_UDP                                 = 0x11
@@ -1623,6 +1627,8 @@ const (
 	IPV6_UNICAST_IF                             = 0x4c
 	IPV6_USER_FLOW                              = 0xe
 	IPV6_V6ONLY                                 = 0x1a
+	IPV6_VERSION                                = 0x60
+	IPV6_VERSION_MASK                           = 0xf0
 	IPV6_XFRM_POLICY                            = 0x23
 	IP_ADD_MEMBERSHIP                           = 0x23
 	IP_ADD_SOURCE_MEMBERSHIP                    = 0x27
@@ -1867,6 +1873,7 @@ const (
 	MADV_UNMERGEABLE                            = 0xd
 	MADV_WILLNEED                               = 0x3
 	MADV_WIPEONFORK                             = 0x12
+	MAP_DROPPABLE                               = 0x8
 	MAP_FILE                                    = 0x0
 	MAP_FIXED                                   = 0x10
 	MAP_FIXED_NOREPLACE                         = 0x100000
@@ -1967,6 +1974,7 @@ const (
 	MSG_PEEK                                    = 0x2
 	MSG_PROXY                                   = 0x10
 	MSG_RST                                     = 0x1000
+	MSG_SOCK_DEVMEM                             = 0x2000000
 	MSG_SYN                                     = 0x400
 	MSG_TRUNC                                   = 0x20
 	MSG_TRYHARD                                 = 0x4
@@ -2083,6 +2091,7 @@ const (
 	NFC_ATR_REQ_MAXSIZE                         = 0x40
 	NFC_ATR_RES_GB_MAXSIZE                      = 0x2f
 	NFC_ATR_RES_MAXSIZE                         = 0x40
+	NFC_ATS_MAXSIZE                             = 0x14
 	NFC_COMM_ACTIVE                             = 0x0
 	NFC_COMM_PASSIVE                            = 0x1
 	NFC_DEVICE_NAME_MAXSIZE                     = 0x8
@@ -2163,6 +2172,7 @@ const (
 	NFNL_SUBSYS_QUEUE                           = 0x3
 	NFNL_SUBSYS_ULOG                            = 0x4
 	NFS_SUPER_MAGIC                             = 0x6969
+	NFT_BITWISE_BOOL                            = 0x0
 	NFT_CHAIN_FLAGS                             = 0x7
 	NFT_CHAIN_MAXNAMELEN                        = 0x100
 	NFT_CT_MAX                                  = 0x17
@@ -2491,6 +2501,7 @@ const (
 	PR_GET_PDEATHSIG                            = 0x2
 	PR_GET_SECCOMP                              = 0x15
 	PR_GET_SECUREBITS                           = 0x1b
+	PR_GET_SHADOW_STACK_STATUS                  = 0x4a
 	PR_GET_SPECULATION_CTRL                     = 0x34
 	PR_GET_TAGGED_ADDR_CTRL                     = 0x38
 	PR_GET_THP_DISABLE                          = 0x2a
@@ -2499,6 +2510,7 @@ const (
 	PR_GET_TIMING                               = 0xd
 	PR_GET_TSC                                  = 0x19
 	PR_GET_UNALIGN                              = 0x5
+	PR_LOCK_SHADOW_STACK_STATUS                 = 0x4c
 	PR_MCE_KILL                                 = 0x21
 	PR_MCE_KILL_CLEAR                           = 0x0
 	PR_MCE_KILL_DEFAULT                         = 0x2
@@ -2525,6 +2537,8 @@ const (
 	PR_PAC_GET_ENABLED_KEYS                     = 0x3d
 	PR_PAC_RESET_KEYS                           = 0x36
 	PR_PAC_SET_ENABLED_KEYS                     = 0x3c
+	PR_PMLEN_MASK                               = 0x7f000000
+	PR_PMLEN_SHIFT                              = 0x18
 	PR_PPC_DEXCR_CTRL_CLEAR                     = 0x4
 	PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC              = 0x10
 	PR_PPC_DEXCR_CTRL_EDITABLE                  = 0x1
@@ -2592,6 +2606,7 @@ const (
 	PR_SET_PTRACER                              = 0x59616d61
 	PR_SET_SECCOMP                              = 0x16
 	PR_SET_SECUREBITS                           = 0x1c
+	PR_SET_SHADOW_STACK_STATUS                  = 0x4b
 	PR_SET_SPECULATION_CTRL                     = 0x35
 	PR_SET_SYSCALL_USER_DISPATCH                = 0x3b
 	PR_SET_TAGGED_ADDR_CTRL                     = 0x37
@@ -2602,6 +2617,9 @@ const (
 	PR_SET_UNALIGN                              = 0x6
 	PR_SET_VMA                                  = 0x53564d41
 	PR_SET_VMA_ANON_NAME                        = 0x0
+	PR_SHADOW_STACK_ENABLE                      = 0x1
+	PR_SHADOW_STACK_PUSH                        = 0x4
+	PR_SHADOW_STACK_WRITE                       = 0x2
 	PR_SME_GET_VL                               = 0x40
 	PR_SME_SET_VL                               = 0x3f
 	PR_SME_SET_VL_ONEXEC                        = 0x40000
@@ -2911,7 +2929,6 @@ const (
 	RTM_NEWNEXTHOP                              = 0x68
 	RTM_NEWNEXTHOPBUCKET                        = 0x74
 	RTM_NEWNSID                                 = 0x58
-	RTM_NEWNVLAN                                = 0x70
 	RTM_NEWPREFIX                               = 0x34
 	RTM_NEWQDISC                                = 0x24
 	RTM_NEWROUTE                                = 0x18
@@ -2920,6 +2937,7 @@ const (
 	RTM_NEWTCLASS                               = 0x28
 	RTM_NEWTFILTER                              = 0x2c
 	RTM_NEWTUNNEL                               = 0x78
+	RTM_NEWVLAN                                 = 0x70
 	RTM_NR_FAMILIES                             = 0x1b
 	RTM_NR_MSGTYPES                             = 0x6c
 	RTM_SETDCB                                  = 0x4f
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index c0d45e32..75207613 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -116,6 +116,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -304,6 +306,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c731d24f..c68acda5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -116,6 +116,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -305,6 +307,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 680018a4..a8c607ab 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -310,6 +312,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index a63909f3..18563dd8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -109,6 +109,7 @@ const (
 	F_SETOWN                         = 0x8
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
+	GCS_MAGIC                        = 0x47435300
 	HIDIOCGRAWINFO                   = 0x80084803
 	HIDIOCGRDESC                     = 0x90044802
 	HIDIOCGRDESCSIZE                 = 0x80044801
@@ -119,6 +120,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -302,6 +305,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9b0a2573..22912cda 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -116,6 +116,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -297,6 +299,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 958e6e06..29344eb3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +305,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 50c7f25b..20d51fb9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +305,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ced21d66..321b6090 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +305,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 226c0441..9bacdf1e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +305,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 3122737c..c2242726 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x80
 	IUCLC                            = 0x1000
 	IXOFF                            = 0x400
@@ -358,6 +360,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index eb5d3467..6270c8ee 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x80
 	IUCLC                            = 0x1000
 	IXOFF                            = 0x400
@@ -362,6 +364,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e921ebc6..9966c194 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x80
 	IUCLC                            = 0x1000
 	IXOFF                            = 0x400
@@ -362,6 +364,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 38ba81c5..848e5fcc 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -294,6 +296,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 71f04009..669b2adb 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -115,6 +115,8 @@ const (
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -366,6 +368,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index c44a3133..4834e575 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -119,6 +119,8 @@ const (
 	IN_CLOEXEC                       = 0x400000
 	IN_NONBLOCK                      = 0x4000
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -357,6 +359,7 @@ const (
 	SCM_TIMESTAMPING_OPT_STATS       = 0x38
 	SCM_TIMESTAMPING_PKTINFO         = 0x3c
 	SCM_TIMESTAMPNS                  = 0x21
+	SCM_TS_OPT_ID                    = 0x5a
 	SCM_TXTIME                       = 0x3f
 	SCM_WIFI_STATUS                  = 0x25
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 829b87fe..c6545413 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -141,6 +141,16 @@ import (
 //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
 //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
 //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
+//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
+//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
+//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
+//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
 //go:cgo_import_dynamic libc_port_create port_create "libc.so"
 //go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
 //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@@ -280,6 +290,16 @@ import (
 //go:linkname procgetpeername libc_getpeername
 //go:linkname procsetsockopt libc_setsockopt
 //go:linkname procrecvfrom libc_recvfrom
+//go:linkname procgetpeerucred libc_getpeerucred
+//go:linkname procucred_get libc_ucred_get
+//go:linkname procucred_geteuid libc_ucred_geteuid
+//go:linkname procucred_getegid libc_ucred_getegid
+//go:linkname procucred_getruid libc_ucred_getruid
+//go:linkname procucred_getrgid libc_ucred_getrgid
+//go:linkname procucred_getsuid libc_ucred_getsuid
+//go:linkname procucred_getsgid libc_ucred_getsgid
+//go:linkname procucred_getpid libc_ucred_getpid
+//go:linkname procucred_free libc_ucred_free
 //go:linkname procport_create libc_port_create
 //go:linkname procport_associate libc_port_associate
 //go:linkname procport_dissociate libc_port_dissociate
@@ -420,6 +440,16 @@ var (
 	procgetpeername,
 	procsetsockopt,
 	procrecvfrom,
+	procgetpeerucred,
+	procucred_get,
+	procucred_geteuid,
+	procucred_getegid,
+	procucred_getruid,
+	procucred_getrgid,
+	procucred_getsuid,
+	procucred_getsgid,
+	procucred_getpid,
+	procucred_free,
 	procport_create,
 	procport_associate,
 	procport_dissociate,
@@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGet(pid int) (ucred uintptr, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+	ucred = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGeteuid(ucred uintptr) (uid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetegid(ucred uintptr) (gid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetruid(ucred uintptr) (uid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetrgid(ucred uintptr) (gid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsuid(ucred uintptr) (uid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsgid(ucred uintptr) (gid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetpid(ucred uintptr) (pid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredFree(ucred uintptr) {
+	sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func port_create() (n int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
 	n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 524b0820..c79aaff3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -458,4 +458,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR            = 460
 	SYS_LSM_LIST_MODULES             = 461
 	SYS_MSEAL                        = 462
+	SYS_SETXATTRAT                   = 463
+	SYS_GETXATTRAT                   = 464
+	SYS_LISTXATTRAT                  = 465
+	SYS_REMOVEXATTRAT                = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index f485dbf4..5eb45069 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -381,4 +381,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 70b35bf3..05e50297 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -422,4 +422,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR            = 460
 	SYS_LSM_LIST_MODULES             = 461
 	SYS_MSEAL                        = 462
+	SYS_SETXATTRAT                   = 463
+	SYS_GETXATTRAT                   = 464
+	SYS_LISTXATTRAT                  = 465
+	SYS_REMOVEXATTRAT                = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 1893e2fe..38c53ec5 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -325,4 +325,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 16a4017d..31d2e71a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -321,4 +321,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 7e567f1e..f4184a33 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -442,4 +442,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR            = 4460
 	SYS_LSM_LIST_MODULES             = 4461
 	SYS_MSEAL                        = 4462
+	SYS_SETXATTRAT                   = 4463
+	SYS_GETXATTRAT                   = 4464
+	SYS_LISTXATTRAT                  = 4465
+	SYS_REMOVEXATTRAT                = 4466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 38ae55e5..05b99622 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -372,4 +372,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 5460
 	SYS_LSM_LIST_MODULES        = 5461
 	SYS_MSEAL                   = 5462
+	SYS_SETXATTRAT              = 5463
+	SYS_GETXATTRAT              = 5464
+	SYS_LISTXATTRAT             = 5465
+	SYS_REMOVEXATTRAT           = 5466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 55e92e60..43a256e9 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -372,4 +372,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 5460
 	SYS_LSM_LIST_MODULES        = 5461
 	SYS_MSEAL                   = 5462
+	SYS_SETXATTRAT              = 5463
+	SYS_GETXATTRAT              = 5464
+	SYS_LISTXATTRAT             = 5465
+	SYS_REMOVEXATTRAT           = 5466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 60658d6a..eea5ddfc 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -442,4 +442,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR            = 4460
 	SYS_LSM_LIST_MODULES             = 4461
 	SYS_MSEAL                        = 4462
+	SYS_SETXATTRAT                   = 4463
+	SYS_GETXATTRAT                   = 4464
+	SYS_LISTXATTRAT                  = 4465
+	SYS_REMOVEXATTRAT                = 4466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index e203e8a7..0d777bfb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -449,4 +449,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR            = 460
 	SYS_LSM_LIST_MODULES             = 461
 	SYS_MSEAL                        = 462
+	SYS_SETXATTRAT                   = 463
+	SYS_GETXATTRAT                   = 464
+	SYS_LISTXATTRAT                  = 465
+	SYS_REMOVEXATTRAT                = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 5944b97d..b4463650 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -421,4 +421,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index c66d416d..0c7d21c1 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -421,4 +421,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index a5459e76..84053916 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -326,4 +326,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 01d86825..fcf1b790 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -387,4 +387,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 7b703e77..52d15b5f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -400,4 +400,8 @@ const (
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
 )
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 5537148d..a46abe64 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -4747,7 +4747,7 @@ const (
 	NL80211_ATTR_MAC_HINT                                   = 0xc8
 	NL80211_ATTR_MAC_MASK                                   = 0xd7
 	NL80211_ATTR_MAX_AP_ASSOC_STA                           = 0xca
-	NL80211_ATTR_MAX                                        = 0x14c
+	NL80211_ATTR_MAX                                        = 0x14d
 	NL80211_ATTR_MAX_CRIT_PROT_DURATION                     = 0xb4
 	NL80211_ATTR_MAX_CSA_COUNTERS                           = 0xce
 	NL80211_ATTR_MAX_MATCH_SETS                             = 0x85
@@ -5519,7 +5519,7 @@ const (
 	NL80211_MNTR_FLAG_CONTROL                               = 0x3
 	NL80211_MNTR_FLAG_COOK_FRAMES                           = 0x5
 	NL80211_MNTR_FLAG_FCSFAIL                               = 0x1
-	NL80211_MNTR_FLAG_MAX                                   = 0x6
+	NL80211_MNTR_FLAG_MAX                                   = 0x7
 	NL80211_MNTR_FLAG_OTHER_BSS                             = 0x4
 	NL80211_MNTR_FLAG_PLCPFAIL                              = 0x2
 	NL80211_MPATH_FLAG_ACTIVE                               = 0x1
@@ -6174,3 +6174,5 @@ type SockDiagReq struct {
 	Family   uint8
 	Protocol uint8
 }
+
+const RTM_NEWNVLAN = 0x70
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 203481eb..6448dc61 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -171,7 +171,7 @@ golang.org/x/net/proxy
 # golang.org/x/sync v0.10.0
 ## explicit; go 1.18
 golang.org/x/sync/errgroup
-# golang.org/x/sys v0.29.0
+# golang.org/x/sys v0.30.0
 ## explicit; go 1.18
 golang.org/x/sys/cpu
 golang.org/x/sys/unix

From 2c14511156cd31b16f88a6e0c3e6627167a28864 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Pablo=20M=C3=A9ndez=20Hern=C3=A1ndez?= 
Date: Sat, 8 Feb 2025 13:43:30 +0100
Subject: [PATCH 22/48] Fix typo in comment

---
 dnscrypt-proxy/example-blocked-ips.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/example-blocked-ips.txt b/dnscrypt-proxy/example-blocked-ips.txt
index 5c1b8957..e46a5884 100644
--- a/dnscrypt-proxy/example-blocked-ips.txt
+++ b/dnscrypt-proxy/example-blocked-ips.txt
@@ -2,7 +2,7 @@
 #        IP blocklist        #
 ##############################
 
-## Rules for blocking DNS resposnes if they contain
+## Rules for blocking DNS responses if they contain
 ## IP addresses matching patterns.
 ##
 ## Sample feeds of suspect IP addresses:

From 8264b43199a2f1210ba76bebf05a2b108378ad92 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Wed, 19 Feb 2025 10:30:02 +0100
Subject: [PATCH 23/48] Update deps

---
 go.mod                                        |  12 +-
 go.sum                                        |  20 +-
 .../github.com/lifenjoiner/dhcpdns/dhcpdns.go |   7 +-
 .../github.com/lifenjoiner/dhcpdns/readme.md  |   2 +
 vendor/golang.org/x/net/http2/http2.go        |  39 +-
 vendor/golang.org/x/net/http2/server.go       |   4 +-
 vendor/golang.org/x/net/http2/transport.go    | 330 +--------------
 vendor/golang.org/x/net/http2/write.go        |   3 +-
 .../x/net/internal/httpcommon/ascii.go        |  53 +++
 .../httpcommon}/headermap.go                  |  18 +-
 .../x/net/internal/httpcommon/request.go      | 379 ++++++++++++++++++
 vendor/golang.org/x/sync/errgroup/errgroup.go |   1 +
 vendor/modules.txt                            |  11 +-
 13 files changed, 517 insertions(+), 362 deletions(-)
 create mode 100644 vendor/golang.org/x/net/internal/httpcommon/ascii.go
 rename vendor/golang.org/x/net/{http2 => internal/httpcommon}/headermap.go (77%)
 create mode 100644 vendor/golang.org/x/net/internal/httpcommon/request.go

diff --git a/go.mod b/go.mod
index 34e6dfc8..52eb56f4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
 module github.com/dnscrypt/dnscrypt-proxy
 
-go 1.23.5
+go 1.24.0
 
 require (
 	github.com/BurntSushi/toml v1.4.0
@@ -17,13 +17,13 @@ require (
 	github.com/jedisct1/xsecretbox v0.0.0-20241212092125-3afc4917ac41
 	github.com/k-sone/critbitgo v1.4.0
 	github.com/kardianos/service v1.2.2
-	github.com/lifenjoiner/dhcpdns v0.0.6
+	github.com/lifenjoiner/dhcpdns v0.0.7
 	github.com/miekg/dns v1.1.63
 	github.com/opencoff/go-sieve v0.2.1
 	github.com/powerman/check v1.8.0
 	github.com/quic-go/quic-go v0.49.0
-	golang.org/x/crypto v0.32.0
-	golang.org/x/net v0.34.0
+	golang.org/x/crypto v0.33.0
+	golang.org/x/net v0.35.0
 	golang.org/x/sys v0.30.0
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1
 )
@@ -44,8 +44,8 @@ require (
 	go.uber.org/mock v0.5.0 // indirect
 	golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
 	golang.org/x/mod v0.18.0 // indirect
-	golang.org/x/sync v0.10.0 // indirect
-	golang.org/x/text v0.21.0 // indirect
+	golang.org/x/sync v0.11.0 // indirect
+	golang.org/x/text v0.22.0 // indirect
 	golang.org/x/tools v0.22.0 // indirect
 	google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
 	google.golang.org/grpc v1.56.3 // indirect
diff --git a/go.sum b/go.sum
index c78b1206..27ba21e3 100644
--- a/go.sum
+++ b/go.sum
@@ -55,8 +55,8 @@ github.com/k-sone/critbitgo v1.4.0 h1:l71cTyBGeh6X5ATh6Fibgw3+rtNT80BA0uNNWgkPrb
 github.com/k-sone/critbitgo v1.4.0/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s=
 github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
 github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
-github.com/lifenjoiner/dhcpdns v0.0.6 h1:rn4Y5RRR5sgQ6RjWenwhA7i/uHzHW9hbZpCobA4CAJs=
-github.com/lifenjoiner/dhcpdns v0.0.6/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
+github.com/lifenjoiner/dhcpdns v0.0.7 h1:VJM2aFWHU9V7M5v4UYYNaHhIHZkbdvSI6WGGpq6/TNQ=
+github.com/lifenjoiner/dhcpdns v0.0.7/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
 github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
 github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
 github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
@@ -87,23 +87,23 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
 github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
 go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
-golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
-golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
+golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
 golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
 golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
-golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
+golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
+golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
 golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
 golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
diff --git a/vendor/github.com/lifenjoiner/dhcpdns/dhcpdns.go b/vendor/github.com/lifenjoiner/dhcpdns/dhcpdns.go
index 10485c87..470d9098 100644
--- a/vendor/github.com/lifenjoiner/dhcpdns/dhcpdns.go
+++ b/vendor/github.com/lifenjoiner/dhcpdns/dhcpdns.go
@@ -241,7 +241,11 @@ func getOutboundParams(ip string) (*net.IPAddr, *net.Interface, error) {
 		var ipUnicast net.IP
 		var got bool
 		for _, addr := range addrs {
-			ipi := addr.(*net.IPNet).IP
+			ipa, ok := addr.(*net.IPNet)
+			if !ok {
+				continue
+			}
+			ipi := ipa.IP
 			if ipi.Equal(ipAddr.IP) {
 				got = true
 			}
@@ -398,6 +402,7 @@ type Detector struct {
 	sync.RWMutex
 	got bool
 	// RemoteIPPort is the remote IPPort to detect within UDP.
+	// Won't send any data to it. `Dial` in UDP only detects if the network is available.
 	RemoteIPPort string
 	lastActiveIP string
 	dns          []net.IP
diff --git a/vendor/github.com/lifenjoiner/dhcpdns/readme.md b/vendor/github.com/lifenjoiner/dhcpdns/readme.md
index 8ae2e684..cda07099 100644
--- a/vendor/github.com/lifenjoiner/dhcpdns/readme.md
+++ b/vendor/github.com/lifenjoiner/dhcpdns/readme.md
@@ -12,6 +12,8 @@ go build
 cli
 ```
 
+It will need root privileges on operating systems other than Windows.
+
 ## Homepage
 
 https://github.com/lifenjoiner/dhcpdns
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index c7601c90..6c18ea23 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -34,11 +34,19 @@ import (
 )
 
 var (
-	VerboseLogs                    bool
-	logFrameWrites                 bool
-	logFrameReads                  bool
-	inTests                        bool
-	disableExtendedConnectProtocol bool
+	VerboseLogs    bool
+	logFrameWrites bool
+	logFrameReads  bool
+	inTests        bool
+
+	// Enabling extended CONNECT by causes browsers to attempt to use
+	// WebSockets-over-HTTP/2. This results in problems when the server's websocket
+	// package doesn't support extended CONNECT.
+	//
+	// Disable extended CONNECT by default for now.
+	//
+	// Issue #71128.
+	disableExtendedConnectProtocol = true
 )
 
 func init() {
@@ -51,8 +59,8 @@ func init() {
 		logFrameWrites = true
 		logFrameReads = true
 	}
-	if strings.Contains(e, "http2xconnect=0") {
-		disableExtendedConnectProtocol = true
+	if strings.Contains(e, "http2xconnect=1") {
+		disableExtendedConnectProtocol = false
 	}
 }
 
@@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
 	s.v = save
 }
 
-// validPseudoPath reports whether v is a valid :path pseudo-header
-// value. It must be either:
-//
-//   - a non-empty string starting with '/'
-//   - the string '*', for OPTIONS requests.
-//
-// For now this is only used a quick check for deciding when to clean
-// up Opaque URLs before sending requests from the Transport.
-// See golang.org/issue/16847
-//
-// We used to enforce that the path also didn't start with "//", but
-// Google's GFE accepts such paths and Chrome sends them, so ignore
-// that part of the spec. See golang.org/issue/19103.
-func validPseudoPath(v string) bool {
-	return (len(v) > 0 && v[0] == '/') || v == "*"
-}
-
 // incomparable is a zero-width, non-comparable type. Adding it to a struct
 // makes that struct also non-comparable, and generally doesn't add
 // any size (as long as it's first).
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b55547ae..7434b878 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -50,6 +50,7 @@ import (
 
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/internal/httpcommon"
 )
 
 const (
@@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
 
 func (sc *serverConn) canonicalHeader(v string) string {
 	sc.serveG.check()
-	buildCommonHeaderMapsOnce()
-	cv, ok := commonCanonHeader[v]
+	cv, ok := httpcommon.CachedCanonicalHeader(v)
 	if ok {
 		return cv
 	}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index b2e2ed33..f2c166b6 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@ import (
 	"net/http"
 	"net/http/httptrace"
 	"net/textproto"
-	"sort"
 	"strconv"
 	"strings"
 	"sync"
@@ -35,6 +34,7 @@ import (
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
 	"golang.org/x/net/idna"
+	"golang.org/x/net/internal/httpcommon"
 )
 
 const (
@@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
 // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
 var errRequestCanceled = errors.New("net/http: request canceled")
 
-func commaSeparatedTrailers(req *http.Request) (string, error) {
-	keys := make([]string, 0, len(req.Trailer))
-	for k := range req.Trailer {
-		k = canonicalHeader(k)
-		switch k {
-		case "Transfer-Encoding", "Trailer", "Content-Length":
-			return "", fmt.Errorf("invalid Trailer key %q", k)
-		}
-		keys = append(keys, k)
-	}
-	if len(keys) > 0 {
-		sort.Strings(keys)
-		return strings.Join(keys, ","), nil
-	}
-	return "", nil
-}
-
 func (cc *ClientConn) responseHeaderTimeout() time.Duration {
 	if cc.t.t1 != nil {
 		return cc.t.t1.ResponseHeaderTimeout
@@ -1303,35 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
 	return 0
 }
 
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func checkConnHeaders(req *http.Request) error {
-	if v := req.Header.Get("Upgrade"); v != "" {
-		return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
-	}
-	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
-		return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
-	}
-	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
-		return fmt.Errorf("http2: invalid Connection request header: %q", vv)
-	}
-	return nil
-}
-
-// actualContentLength returns a sanitized version of
-// req.ContentLength, where 0 actually means zero (not unknown) and -1
-// means unknown.
-func actualContentLength(req *http.Request) int64 {
-	if req.Body == nil || req.Body == http.NoBody {
-		return 0
-	}
-	if req.ContentLength != 0 {
-		return req.ContentLength
-	}
-	return -1
-}
-
 func (cc *ClientConn) decrStreamReservations() {
 	cc.mu.Lock()
 	defer cc.mu.Unlock()
@@ -1356,7 +1310,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
 		reqCancel:            req.Cancel,
 		isHead:               req.Method == "HEAD",
 		reqBody:              req.Body,
-		reqBodyContentLength: actualContentLength(req),
+		reqBodyContentLength: httpcommon.ActualContentLength(req),
 		trace:                httptrace.ContextClientTrace(ctx),
 		peerClosed:           make(chan struct{}),
 		abort:                make(chan struct{}),
@@ -1364,25 +1318,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
 		donec:                make(chan struct{}),
 	}
 
-	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
-	if !cc.t.disableCompression() &&
-		req.Header.Get("Accept-Encoding") == "" &&
-		req.Header.Get("Range") == "" &&
-		!cs.isHead {
-		// Request gzip only, not deflate. Deflate is ambiguous and
-		// not as universally supported anyway.
-		// See: https://zlib.net/zlib_faq.html#faq39
-		//
-		// Note that we don't request this for HEAD requests,
-		// due to a bug in nginx:
-		//   http://trac.nginx.org/nginx/ticket/358
-		//   https://golang.org/issue/5522
-		//
-		// We don't request gzip if the request is for a range, since
-		// auto-decoding a portion of a gzipped document will just fail
-		// anyway. See https://golang.org/issue/8923
-		cs.requestedGzip = true
-	}
+	cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression())
 
 	go cs.doRequest(req, streamf)
 
@@ -1413,7 +1349,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
 		}
 		res.Request = req
 		res.TLS = cc.tlsState
-		if res.Body == noBody && actualContentLength(req) == 0 {
+		if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 {
 			// If there isn't a request or response body still being
 			// written, then wait for the stream to be closed before
 			// RoundTrip returns.
@@ -1496,10 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
 	cc := cs.cc
 	ctx := cs.ctx
 
-	if err := checkConnHeaders(req); err != nil {
-		return err
-	}
-
 	// wait for setting frames to be received, a server can change this value later,
 	// but we just wait for the first settings frame
 	var isExtendedConnect bool
@@ -1663,20 +1595,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
 	// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
 	// sent by writeRequestBody below, along with any Trailers,
 	// again in form HEADERS{1}, CONTINUATION{0,})
-	trailers, err := commaSeparatedTrailers(req)
+	cc.hbuf.Reset()
+	res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{
+		Request:               req,
+		AddGzipHeader:         cs.requestedGzip,
+		PeerMaxHeaderListSize: cc.peerMaxHeaderListSize,
+		DefaultUserAgent:      defaultUserAgent,
+	}, func(name, value string) {
+		cc.writeHeader(name, value)
+	})
 	if err != nil {
-		return err
-	}
-	hasTrailers := trailers != ""
-	contentLen := actualContentLength(req)
-	hasBody := contentLen != 0
-	hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
-	if err != nil {
-		return err
+		return fmt.Errorf("http2: %w", err)
 	}
+	hdrs := cc.hbuf.Bytes()
 
 	// Write the request.
-	endStream := !hasBody && !hasTrailers
+	endStream := !res.HasBody && !res.HasTrailers
 	cs.sentHeaders = true
 	err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
 	traceWroteHeaders(cs.trace)
@@ -2070,218 +2004,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
 	}
 }
 
-func validateHeaders(hdrs http.Header) string {
-	for k, vv := range hdrs {
-		if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
-			return fmt.Sprintf("name %q", k)
-		}
-		for _, v := range vv {
-			if !httpguts.ValidHeaderFieldValue(v) {
-				// Don't include the value in the error,
-				// because it may be sensitive.
-				return fmt.Sprintf("value for header %q", k)
-			}
-		}
-	}
-	return ""
-}
-
-var errNilRequestURL = errors.New("http2: Request.URI is nil")
-
-func isNormalConnect(req *http.Request) bool {
-	return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
-}
-
-// requires cc.wmu be held.
-func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
-	cc.hbuf.Reset()
-	if req.URL == nil {
-		return nil, errNilRequestURL
-	}
-
-	host := req.Host
-	if host == "" {
-		host = req.URL.Host
-	}
-	host, err := httpguts.PunycodeHostPort(host)
-	if err != nil {
-		return nil, err
-	}
-	if !httpguts.ValidHostHeader(host) {
-		return nil, errors.New("http2: invalid Host header")
-	}
-
-	var path string
-	if !isNormalConnect(req) {
-		path = req.URL.RequestURI()
-		if !validPseudoPath(path) {
-			orig := path
-			path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
-			if !validPseudoPath(path) {
-				if req.URL.Opaque != "" {
-					return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
-				} else {
-					return nil, fmt.Errorf("invalid request :path %q", orig)
-				}
-			}
-		}
-	}
-
-	// Check for any invalid headers+trailers and return an error before we
-	// potentially pollute our hpack state. (We want to be able to
-	// continue to reuse the hpack encoder for future requests)
-	if err := validateHeaders(req.Header); err != "" {
-		return nil, fmt.Errorf("invalid HTTP header %s", err)
-	}
-	if err := validateHeaders(req.Trailer); err != "" {
-		return nil, fmt.Errorf("invalid HTTP trailer %s", err)
-	}
-
-	enumerateHeaders := func(f func(name, value string)) {
-		// 8.1.2.3 Request Pseudo-Header Fields
-		// The :path pseudo-header field includes the path and query parts of the
-		// target URI (the path-absolute production and optionally a '?' character
-		// followed by the query production, see Sections 3.3 and 3.4 of
-		// [RFC3986]).
-		f(":authority", host)
-		m := req.Method
-		if m == "" {
-			m = http.MethodGet
-		}
-		f(":method", m)
-		if !isNormalConnect(req) {
-			f(":path", path)
-			f(":scheme", req.URL.Scheme)
-		}
-		if trailers != "" {
-			f("trailer", trailers)
-		}
-
-		var didUA bool
-		for k, vv := range req.Header {
-			if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
-				// Host is :authority, already sent.
-				// Content-Length is automatic, set below.
-				continue
-			} else if asciiEqualFold(k, "connection") ||
-				asciiEqualFold(k, "proxy-connection") ||
-				asciiEqualFold(k, "transfer-encoding") ||
-				asciiEqualFold(k, "upgrade") ||
-				asciiEqualFold(k, "keep-alive") {
-				// Per 8.1.2.2 Connection-Specific Header
-				// Fields, don't send connection-specific
-				// fields. We have already checked if any
-				// are error-worthy so just ignore the rest.
-				continue
-			} else if asciiEqualFold(k, "user-agent") {
-				// Match Go's http1 behavior: at most one
-				// User-Agent. If set to nil or empty string,
-				// then omit it. Otherwise if not mentioned,
-				// include the default (below).
-				didUA = true
-				if len(vv) < 1 {
-					continue
-				}
-				vv = vv[:1]
-				if vv[0] == "" {
-					continue
-				}
-			} else if asciiEqualFold(k, "cookie") {
-				// Per 8.1.2.5 To allow for better compression efficiency, the
-				// Cookie header field MAY be split into separate header fields,
-				// each with one or more cookie-pairs.
-				for _, v := range vv {
-					for {
-						p := strings.IndexByte(v, ';')
-						if p < 0 {
-							break
-						}
-						f("cookie", v[:p])
-						p++
-						// strip space after semicolon if any.
-						for p+1 <= len(v) && v[p] == ' ' {
-							p++
-						}
-						v = v[p:]
-					}
-					if len(v) > 0 {
-						f("cookie", v)
-					}
-				}
-				continue
-			}
-
-			for _, v := range vv {
-				f(k, v)
-			}
-		}
-		if shouldSendReqContentLength(req.Method, contentLength) {
-			f("content-length", strconv.FormatInt(contentLength, 10))
-		}
-		if addGzipHeader {
-			f("accept-encoding", "gzip")
-		}
-		if !didUA {
-			f("user-agent", defaultUserAgent)
-		}
-	}
-
-	// Do a first pass over the headers counting bytes to ensure
-	// we don't exceed cc.peerMaxHeaderListSize. This is done as a
-	// separate pass before encoding the headers to prevent
-	// modifying the hpack state.
-	hlSize := uint64(0)
-	enumerateHeaders(func(name, value string) {
-		hf := hpack.HeaderField{Name: name, Value: value}
-		hlSize += uint64(hf.Size())
-	})
-
-	if hlSize > cc.peerMaxHeaderListSize {
-		return nil, errRequestHeaderListSize
-	}
-
-	trace := httptrace.ContextClientTrace(req.Context())
-	traceHeaders := traceHasWroteHeaderField(trace)
-
-	// Header list size is ok. Write the headers.
-	enumerateHeaders(func(name, value string) {
-		name, ascii := lowerHeader(name)
-		if !ascii {
-			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
-			// field names have to be ASCII characters (just as in HTTP/1.x).
-			return
-		}
-		cc.writeHeader(name, value)
-		if traceHeaders {
-			traceWroteHeaderField(trace, name, value)
-		}
-	})
-
-	return cc.hbuf.Bytes(), nil
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func shouldSendReqContentLength(method string, contentLength int64) bool {
-	if contentLength > 0 {
-		return true
-	}
-	if contentLength < 0 {
-		return false
-	}
-	// For zero bodies, whether we send a content-length depends on the method.
-	// It also kinda doesn't matter for http2 either way, with END_STREAM.
-	switch method {
-	case "POST", "PUT", "PATCH":
-		return true
-	default:
-		return false
-	}
-}
-
 // requires cc.wmu be held.
 func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
 	cc.hbuf.Reset()
@@ -2298,7 +2020,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
 	}
 
 	for k, vv := range trailer {
-		lowKey, ascii := lowerHeader(k)
+		lowKey, ascii := httpcommon.LowerHeader(k)
 		if !ascii {
 			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
 			// field names have to be ASCII characters (just as in HTTP/1.x).
@@ -2653,7 +2375,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
 		Status:     status + " " + http.StatusText(statusCode),
 	}
 	for _, hf := range regularFields {
-		key := canonicalHeader(hf.Name)
+		key := httpcommon.CanonicalHeader(hf.Name)
 		if key == "Trailer" {
 			t := res.Trailer
 			if t == nil {
@@ -2661,7 +2383,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
 				res.Trailer = t
 			}
 			foreachHeaderElement(hf.Value, func(v string) {
-				t[canonicalHeader(v)] = nil
+				t[httpcommon.CanonicalHeader(v)] = nil
 			})
 		} else {
 			vv := header[key]
@@ -2785,7 +2507,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
 
 	trailer := make(http.Header)
 	for _, hf := range f.RegularFields() {
-		key := canonicalHeader(hf.Name)
+		key := httpcommon.CanonicalHeader(hf.Name)
 		trailer[key] = append(trailer[key], hf.Value)
 	}
 	cs.trailer = trailer
@@ -3331,7 +3053,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
 
 var (
 	errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
-	errRequestHeaderListSize  = errors.New("http2: request header list larger than peer's advertised limit")
+	errRequestHeaderListSize  = httpcommon.ErrRequestHeaderListSize
 )
 
 func (cc *ClientConn) logf(format string, args ...interface{}) {
@@ -3515,16 +3237,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
 	}
 }
 
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
-	return trace != nil && trace.WroteHeaderField != nil
-}
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
-	if trace != nil && trace.WroteHeaderField != nil {
-		trace.WroteHeaderField(k, []string{v})
-	}
-}
-
 func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
 	if trace != nil {
 		return trace.Got1xxResponse
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 6ff6bee7..fdb35b94 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -13,6 +13,7 @@ import (
 
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/internal/httpcommon"
 )
 
 // writeFramer is implemented by any type that is used to write frames.
@@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
 	}
 	for _, k := range keys {
 		vv := h[k]
-		k, ascii := lowerHeader(k)
+		k, ascii := httpcommon.LowerHeader(k)
 		if !ascii {
 			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
 			// field names have to be ASCII characters (just as in HTTP/1.x).
diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
new file mode 100644
index 00000000..ed14da5a
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import "strings"
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i := 0; i < len(s); i++ {
+		if lower(s[i]) != lower(t[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+	if 'A' <= b && b <= 'Z' {
+		return b + ('a' - 'A')
+	}
+	return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func isASCIIPrint(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] < ' ' || s[i] > '~' {
+			return false
+		}
+	}
+	return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func asciiToLower(s string) (lower string, ok bool) {
+	if !isASCIIPrint(s) {
+		return "", false
+	}
+	return strings.ToLower(s), true
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
similarity index 77%
rename from vendor/golang.org/x/net/http2/headermap.go
rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go
index 149b3dd2..ad3fbacd 100644
--- a/vendor/golang.org/x/net/http2/headermap.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
@@ -1,8 +1,8 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package http2
+package httpcommon
 
 import (
 	"net/http"
@@ -88,7 +88,9 @@ func buildCommonHeaderMaps() {
 	}
 }
 
-func lowerHeader(v string) (lower string, ascii bool) {
+// LowerHeader returns the lowercase form of a header name,
+// used on the wire for HTTP/2 and HTTP/3 requests.
+func LowerHeader(v string) (lower string, ascii bool) {
 	buildCommonHeaderMapsOnce()
 	if s, ok := commonLowerHeader[v]; ok {
 		return s, true
@@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
 	return asciiToLower(v)
 }
 
-func canonicalHeader(v string) string {
+// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
+func CanonicalHeader(v string) string {
 	buildCommonHeaderMapsOnce()
 	if s, ok := commonCanonHeader[v]; ok {
 		return s
 	}
 	return http.CanonicalHeaderKey(v)
 }
+
+// CachedCanonicalHeader returns the canonical form of a well-known header name.
+func CachedCanonicalHeader(v string) (string, bool) {
+	buildCommonHeaderMapsOnce()
+	s, ok := commonCanonHeader[v]
+	return s, ok
+}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
new file mode 100644
index 00000000..34391477
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -0,0 +1,379 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/http/httptrace"
+	"sort"
+	"strconv"
+	"strings"
+
+	"golang.org/x/net/http/httpguts"
+	"golang.org/x/net/http2/hpack"
+)
+
+var (
+	ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
+)
+
+// EncodeHeadersParam is parameters to EncodeHeaders.
+type EncodeHeadersParam struct {
+	Request *http.Request
+
+	// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
+	// added to the request.
+	AddGzipHeader bool
+
+	// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
+	PeerMaxHeaderListSize uint64
+
+	// DefaultUserAgent is the User-Agent header to send when the request
+	// neither contains a User-Agent nor disables it.
+	DefaultUserAgent string
+}
+
+// EncodeHeadersParam is the result of EncodeHeaders.
+type EncodeHeadersResult struct {
+	HasBody     bool
+	HasTrailers bool
+}
+
+// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
+// It validates a request and calls headerf with each pseudo-header and header
+// for the request.
+// The headerf function is called with the validated, canonicalized header name.
+func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
+	req := param.Request
+
+	// Check for invalid connection-level headers.
+	if err := checkConnHeaders(req); err != nil {
+		return res, err
+	}
+
+	if req.URL == nil {
+		return res, errors.New("Request.URL is nil")
+	}
+
+	host := req.Host
+	if host == "" {
+		host = req.URL.Host
+	}
+	host, err := httpguts.PunycodeHostPort(host)
+	if err != nil {
+		return res, err
+	}
+	if !httpguts.ValidHostHeader(host) {
+		return res, errors.New("invalid Host header")
+	}
+
+	// isNormalConnect is true if this is a non-extended CONNECT request.
+	isNormalConnect := false
+	protocol := req.Header.Get(":protocol")
+	if req.Method == "CONNECT" && protocol == "" {
+		isNormalConnect = true
+	} else if protocol != "" && req.Method != "CONNECT" {
+		return res, errors.New("invalid :protocol header in non-CONNECT request")
+	}
+
+	// Validate the path, except for non-extended CONNECT requests which have no path.
+	var path string
+	if !isNormalConnect {
+		path = req.URL.RequestURI()
+		if !validPseudoPath(path) {
+			orig := path
+			path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+			if !validPseudoPath(path) {
+				if req.URL.Opaque != "" {
+					return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+				} else {
+					return res, fmt.Errorf("invalid request :path %q", orig)
+				}
+			}
+		}
+	}
+
+	// Check for any invalid headers+trailers and return an error before we
+	// potentially pollute our hpack state. (We want to be able to
+	// continue to reuse the hpack encoder for future requests)
+	if err := validateHeaders(req.Header); err != "" {
+		return res, fmt.Errorf("invalid HTTP header %s", err)
+	}
+	if err := validateHeaders(req.Trailer); err != "" {
+		return res, fmt.Errorf("invalid HTTP trailer %s", err)
+	}
+
+	contentLength := ActualContentLength(req)
+
+	trailers, err := commaSeparatedTrailers(req)
+	if err != nil {
+		return res, err
+	}
+
+	enumerateHeaders := func(f func(name, value string)) {
+		// 8.1.2.3 Request Pseudo-Header Fields
+		// The :path pseudo-header field includes the path and query parts of the
+		// target URI (the path-absolute production and optionally a '?' character
+		// followed by the query production, see Sections 3.3 and 3.4 of
+		// [RFC3986]).
+		f(":authority", host)
+		m := req.Method
+		if m == "" {
+			m = http.MethodGet
+		}
+		f(":method", m)
+		if !isNormalConnect {
+			f(":path", path)
+			f(":scheme", req.URL.Scheme)
+		}
+		if protocol != "" {
+			f(":protocol", protocol)
+		}
+		if trailers != "" {
+			f("trailer", trailers)
+		}
+
+		var didUA bool
+		for k, vv := range req.Header {
+			if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
+				// Host is :authority, already sent.
+				// Content-Length is automatic, set below.
+				continue
+			} else if asciiEqualFold(k, "connection") ||
+				asciiEqualFold(k, "proxy-connection") ||
+				asciiEqualFold(k, "transfer-encoding") ||
+				asciiEqualFold(k, "upgrade") ||
+				asciiEqualFold(k, "keep-alive") {
+				// Per 8.1.2.2 Connection-Specific Header
+				// Fields, don't send connection-specific
+				// fields. We have already checked if any
+				// are error-worthy so just ignore the rest.
+				continue
+			} else if asciiEqualFold(k, "user-agent") {
+				// Match Go's http1 behavior: at most one
+				// User-Agent. If set to nil or empty string,
+				// then omit it. Otherwise if not mentioned,
+				// include the default (below).
+				didUA = true
+				if len(vv) < 1 {
+					continue
+				}
+				vv = vv[:1]
+				if vv[0] == "" {
+					continue
+				}
+			} else if asciiEqualFold(k, "cookie") {
+				// Per 8.1.2.5 To allow for better compression efficiency, the
+				// Cookie header field MAY be split into separate header fields,
+				// each with one or more cookie-pairs.
+				for _, v := range vv {
+					for {
+						p := strings.IndexByte(v, ';')
+						if p < 0 {
+							break
+						}
+						f("cookie", v[:p])
+						p++
+						// strip space after semicolon if any.
+						for p+1 <= len(v) && v[p] == ' ' {
+							p++
+						}
+						v = v[p:]
+					}
+					if len(v) > 0 {
+						f("cookie", v)
+					}
+				}
+				continue
+			} else if k == ":protocol" {
+				// :protocol pseudo-header was already sent above.
+				continue
+			}
+
+			for _, v := range vv {
+				f(k, v)
+			}
+		}
+		if shouldSendReqContentLength(req.Method, contentLength) {
+			f("content-length", strconv.FormatInt(contentLength, 10))
+		}
+		if param.AddGzipHeader {
+			f("accept-encoding", "gzip")
+		}
+		if !didUA {
+			f("user-agent", param.DefaultUserAgent)
+		}
+	}
+
+	// Do a first pass over the headers counting bytes to ensure
+	// we don't exceed cc.peerMaxHeaderListSize. This is done as a
+	// separate pass before encoding the headers to prevent
+	// modifying the hpack state.
+	if param.PeerMaxHeaderListSize > 0 {
+		hlSize := uint64(0)
+		enumerateHeaders(func(name, value string) {
+			hf := hpack.HeaderField{Name: name, Value: value}
+			hlSize += uint64(hf.Size())
+		})
+
+		if hlSize > param.PeerMaxHeaderListSize {
+			return res, ErrRequestHeaderListSize
+		}
+	}
+
+	trace := httptrace.ContextClientTrace(req.Context())
+
+	// Header list size is ok. Write the headers.
+	enumerateHeaders(func(name, value string) {
+		name, ascii := LowerHeader(name)
+		if !ascii {
+			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+			// field names have to be ASCII characters (just as in HTTP/1.x).
+			return
+		}
+
+		headerf(name, value)
+
+		if trace != nil && trace.WroteHeaderField != nil {
+			trace.WroteHeaderField(name, []string{value})
+		}
+	})
+
+	res.HasBody = contentLength != 0
+	res.HasTrailers = trailers != ""
+	return res, nil
+}
+
+// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
+// for a request.
+func IsRequestGzip(req *http.Request, disableCompression bool) bool {
+	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+	if !disableCompression &&
+		req.Header.Get("Accept-Encoding") == "" &&
+		req.Header.Get("Range") == "" &&
+		req.Method != "HEAD" {
+		// Request gzip only, not deflate. Deflate is ambiguous and
+		// not as universally supported anyway.
+		// See: https://zlib.net/zlib_faq.html#faq39
+		//
+		// Note that we don't request this for HEAD requests,
+		// due to a bug in nginx:
+		//   http://trac.nginx.org/nginx/ticket/358
+		//   https://golang.org/issue/5522
+		//
+		// We don't request gzip if the request is for a range, since
+		// auto-decoding a portion of a gzipped document will just fail
+		// anyway. See https://golang.org/issue/8923
+		return true
+	}
+	return false
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+//
+// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
+// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
+//
+// Certain headers are special-cased as okay but not transmitted later.
+// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
+func checkConnHeaders(req *http.Request) error {
+	if v := req.Header.Get("Upgrade"); v != "" {
+		return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"])
+	}
+	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+		return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
+	}
+	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
+		return fmt.Errorf("invalid Connection request header: %q", vv)
+	}
+	return nil
+}
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+	keys := make([]string, 0, len(req.Trailer))
+	for k := range req.Trailer {
+		k = CanonicalHeader(k)
+		switch k {
+		case "Transfer-Encoding", "Trailer", "Content-Length":
+			return "", fmt.Errorf("invalid Trailer key %q", k)
+		}
+		keys = append(keys, k)
+	}
+	if len(keys) > 0 {
+		sort.Strings(keys)
+		return strings.Join(keys, ","), nil
+	}
+	return "", nil
+}
+
+// ActualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func ActualContentLength(req *http.Request) int64 {
+	if req.Body == nil || req.Body == http.NoBody {
+		return 0
+	}
+	if req.ContentLength != 0 {
+		return req.ContentLength
+	}
+	return -1
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+//   - a non-empty string starting with '/'
+//   - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+	return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+func validateHeaders(hdrs http.Header) string {
+	for k, vv := range hdrs {
+		if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
+			return fmt.Sprintf("name %q", k)
+		}
+		for _, v := range vv {
+			if !httpguts.ValidHeaderFieldValue(v) {
+				// Don't include the value in the error,
+				// because it may be sensitive.
+				return fmt.Sprintf("value for header %q", k)
+			}
+		}
+	}
+	return ""
+}
+
+// shouldSendReqContentLength reports whether we should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+	if contentLength > 0 {
+		return true
+	}
+	if contentLength < 0 {
+		return false
+	}
+	// For zero bodies, whether we send a content-length depends on the method.
+	// It also kinda doesn't matter for http2 either way, with END_STREAM.
+	switch method {
+	case "POST", "PUT", "PATCH":
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 948a3ee6..b8322598 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool {
 
 // SetLimit limits the number of active goroutines in this group to at most n.
 // A negative value indicates no limit.
+// A limit of zero will prevent any new goroutines from being added.
 //
 // Any subsequent call to the Go method will block until it can add an active
 // goroutine without exceeding the configured limit.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6448dc61..1337f186 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -65,7 +65,7 @@ github.com/k-sone/critbitgo
 # github.com/kardianos/service v1.2.2
 ## explicit; go 1.12
 github.com/kardianos/service
-# github.com/lifenjoiner/dhcpdns v0.0.6
+# github.com/lifenjoiner/dhcpdns v0.0.7
 ## explicit; go 1.20
 github.com/lifenjoiner/dhcpdns
 # github.com/miekg/dns v1.1.63
@@ -132,7 +132,7 @@ github.com/smartystreets/goconvey/convey/reporting
 ## explicit; go 1.22
 go.uber.org/mock/mockgen
 go.uber.org/mock/mockgen/model
-# golang.org/x/crypto v0.32.0
+# golang.org/x/crypto v0.33.0
 ## explicit; go 1.20
 golang.org/x/crypto/blake2b
 golang.org/x/crypto/chacha20
@@ -155,20 +155,21 @@ golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.34.0
+# golang.org/x/net v0.35.0
 ## explicit; go 1.18
 golang.org/x/net/bpf
 golang.org/x/net/http/httpguts
 golang.org/x/net/http2
 golang.org/x/net/http2/hpack
 golang.org/x/net/idna
+golang.org/x/net/internal/httpcommon
 golang.org/x/net/internal/iana
 golang.org/x/net/internal/socket
 golang.org/x/net/internal/socks
 golang.org/x/net/ipv4
 golang.org/x/net/ipv6
 golang.org/x/net/proxy
-# golang.org/x/sync v0.10.0
+# golang.org/x/sync v0.11.0
 ## explicit; go 1.18
 golang.org/x/sync/errgroup
 # golang.org/x/sys v0.30.0
@@ -180,7 +181,7 @@ golang.org/x/sys/windows/registry
 golang.org/x/sys/windows/svc
 golang.org/x/sys/windows/svc/eventlog
 golang.org/x/sys/windows/svc/mgr
-# golang.org/x/text v0.21.0
+# golang.org/x/text v0.22.0
 ## explicit; go 1.18
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform

From f49196c6e8966038e6378d9414be9067093a0e13 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 18:07:41 +0100
Subject: [PATCH 24/48] xTransport: avoid updating the host->IP map in multiple
 goroutines

When a goroutine is updating an IP, keep serving the previous IP
to other goroutines.
---
 dnscrypt-proxy/xtransport.go | 37 +++++++++++++++++++++++++++---------
 1 file changed, 28 insertions(+), 9 deletions(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 80402746..0bd1b93c 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -40,8 +40,9 @@ const (
 )
 
 type CachedIPItem struct {
-	ip         net.IP
-	expiration *time.Time
+	ip             net.IP
+	expiration     *time.Time
+	updating_until *time.Time
 }
 
 type CachedIPs struct {
@@ -105,7 +106,7 @@ func ParseIP(ipStr string) net.IP {
 // If ttl < 0, never expire
 // Otherwise, ttl is set to max(ttl, MinResolverIPTTL)
 func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Duration) {
-	item := &CachedIPItem{ip: ip, expiration: nil}
+	item := &CachedIPItem{ip: ip, expiration: nil, updating_until: nil}
 	if ttl >= 0 {
 		if ttl < MinResolverIPTTL {
 			ttl = MinResolverIPTTL
@@ -118,8 +119,21 @@ func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Dura
 	xTransport.cachedIPs.Unlock()
 }
 
-func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool) {
-	ip, expired = nil, false
+// Mark an entry as being updated
+func (xTransport *XTransport) markUpdatingCachedIP(host string) {
+	xTransport.cachedIPs.Lock()
+	item, ok := xTransport.cachedIPs.cache[host]
+	if ok {
+		now := time.Now()
+		until := now.Add(xTransport.timeout)
+		item.updating_until = &until
+		xTransport.cachedIPs.cache[host] = item
+	}
+	xTransport.cachedIPs.Unlock()
+}
+
+func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool, updating bool) {
+	ip, expired, updating = nil, false, false
 	xTransport.cachedIPs.RLock()
 	item, ok := xTransport.cachedIPs.cache[host]
 	xTransport.cachedIPs.RUnlock()
@@ -130,6 +144,9 @@ func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool
 	expiration := item.expiration
 	if expiration != nil && time.Until(*expiration) < 0 {
 		expired = true
+		if item.updating_until != nil && time.Until(*item.updating_until) > 0 {
+			updating = true
+		}
 	}
 	return
 }
@@ -153,7 +170,7 @@ func (xTransport *XTransport) rebuildTransport() {
 			ipOnly := host
 			// resolveAndUpdateCache() is always called in `Fetch()` before the `Dial()`
 			// method is used, so that a cached entry must be present at this point.
-			cachedIP, _ := xTransport.loadCachedIP(host)
+			cachedIP, _, _ := xTransport.loadCachedIP(host)
 			if cachedIP != nil {
 				if ipv4 := cachedIP.To4(); ipv4 != nil {
 					ipOnly = ipv4.String()
@@ -263,7 +280,7 @@ func (xTransport *XTransport) rebuildTransport() {
 			dlog.Debugf("Dialing for H3: [%v]", addrStr)
 			host, port := ExtractHostAndPort(addrStr, stamps.DefaultPort)
 			ipOnly := host
-			cachedIP, _ := xTransport.loadCachedIP(host)
+			cachedIP, _, _ := xTransport.loadCachedIP(host)
 			network := "udp4"
 			if cachedIP != nil {
 				if ipv4 := cachedIP.To4(); ipv4 != nil {
@@ -402,10 +419,12 @@ func (xTransport *XTransport) resolveAndUpdateCache(host string) error {
 	if ParseIP(host) != nil {
 		return nil
 	}
-	cachedIP, expired := xTransport.loadCachedIP(host)
-	if cachedIP != nil && !expired {
+	cachedIP, expired, updating := xTransport.loadCachedIP(host)
+	if cachedIP != nil && (!expired || updating) {
 		return nil
 	}
+	xTransport.markUpdatingCachedIP(host)
+
 	var foundIP net.IP
 	var ttl time.Duration
 	var err error

From 41bc703873a2a22a965edc77d5f2f1a467ea4d0a Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 18:11:29 +0100
Subject: [PATCH 25/48] Update quic-go

---
 go.mod                                        |   2 +-
 go.sum                                        |   4 +-
 .../quic-go/quic-go/conn_id_manager.go        |  59 +-
 .../github.com/quic-go/quic-go/connection.go  | 575 ++++++++++++------
 .../quic-go/quic-go/http3/server.go           |  24 +-
 .../quic-go/internal/ackhandler/interfaces.go |   4 +-
 .../quic-go/internal/ackhandler/packet.go     |   3 +-
 .../ackhandler/sent_packet_handler.go         | 217 +++++--
 .../ackhandler/sent_packet_history.go         |  82 ++-
 .../internal/handshake/crypto_setup.go        |  93 ++-
 .../{qtls/conn.go => handshake/fake_conn.go}  |   2 +-
 .../quic-go/internal/handshake/hkdf.go        |   2 -
 .../internal/handshake/session_ticket.go      |  18 +
 .../quic-go/internal/handshake/tls_config.go  |  39 ++
 .../internal/protocol/encryption_level.go     |  35 ++
 .../quic-go/internal/qtls/cipher_suite.go     |  52 --
 .../internal/qtls/client_session_cache.go     |  70 ---
 .../quic-go/quic-go/internal/qtls/qtls.go     | 150 -----
 .../quic-go/internal/utils/rtt_stats.go       |   9 +
 .../quic-go/quic-go/logging/types.go          |   4 +-
 .../quic-go/quic-go/mtu_discoverer.go         |  43 +-
 .../quic-go/quic-go/packet_packer.go          |  36 +-
 .../quic-go/quic-go/path_manager.go           | 145 +++++
 .../github.com/quic-go/quic-go/send_conn.go   |  50 +-
 .../github.com/quic-go/quic-go/send_queue.go  |  11 +-
 vendor/modules.txt                            |   5 +-
 26 files changed, 1097 insertions(+), 637 deletions(-)
 rename vendor/github.com/quic-go/quic-go/internal/{qtls/conn.go => handshake/fake_conn.go} (97%)
 create mode 100644 vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go
 delete mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go
 delete mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go
 delete mode 100644 vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go
 create mode 100644 vendor/github.com/quic-go/quic-go/path_manager.go

diff --git a/go.mod b/go.mod
index 52eb56f4..483c532b 100644
--- a/go.mod
+++ b/go.mod
@@ -21,7 +21,7 @@ require (
 	github.com/miekg/dns v1.1.63
 	github.com/opencoff/go-sieve v0.2.1
 	github.com/powerman/check v1.8.0
-	github.com/quic-go/quic-go v0.49.0
+	github.com/quic-go/quic-go v0.50.0
 	golang.org/x/crypto v0.33.0
 	golang.org/x/net v0.35.0
 	golang.org/x/sys v0.30.0
diff --git a/go.sum b/go.sum
index 27ba21e3..627bc5e0 100644
--- a/go.sum
+++ b/go.sum
@@ -75,8 +75,8 @@ github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+
 github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
 github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
 github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
-github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94=
-github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
+github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo=
+github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
 github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
 github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
 github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
diff --git a/vendor/github.com/quic-go/quic-go/conn_id_manager.go b/vendor/github.com/quic-go/quic-go/conn_id_manager.go
index 4030913d..a4fbd93c 100644
--- a/vendor/github.com/quic-go/quic-go/conn_id_manager.go
+++ b/vendor/github.com/quic-go/quic-go/conn_id_manager.go
@@ -19,6 +19,9 @@ type newConnID struct {
 type connIDManager struct {
 	queue list.List[newConnID]
 
+	highestProbingID uint64
+	pathProbing      map[pathID]newConnID // initialized lazily
+
 	handshakeComplete         bool
 	activeSequenceNumber      uint64
 	highestRetired            uint64
@@ -76,13 +79,23 @@ func (h *connIDManager) add(f *wire.NewConnectionIDFrame) error {
 	}
 	// If the NEW_CONNECTION_ID frame is reordered, such that its sequence number is smaller than the currently active
 	// connection ID or if it was already retired, send the RETIRE_CONNECTION_ID frame immediately.
-	if f.SequenceNumber < h.activeSequenceNumber || f.SequenceNumber < h.highestRetired {
+	if f.SequenceNumber < max(h.activeSequenceNumber, h.highestProbingID) || f.SequenceNumber < h.highestRetired {
 		h.queueControlFrame(&wire.RetireConnectionIDFrame{
 			SequenceNumber: f.SequenceNumber,
 		})
 		return nil
 	}
 
+	if f.RetirePriorTo != 0 && h.pathProbing != nil {
+		for id, entry := range h.pathProbing {
+			if entry.SequenceNumber < f.RetirePriorTo {
+				h.queueControlFrame(&wire.RetireConnectionIDFrame{
+					SequenceNumber: entry.SequenceNumber,
+				})
+				delete(h.pathProbing, id)
+			}
+		}
+	}
 	// Retire elements in the queue.
 	// Doesn't retire the active connection ID.
 	if f.RetirePriorTo > h.highestRetired {
@@ -225,6 +238,50 @@ func (h *connIDManager) SetHandshakeComplete() {
 	h.handshakeComplete = true
 }
 
+// GetConnIDForPath retrieves a connection ID for a new path (i.e. not the active one).
+// Once a connection ID is allocated for a path, it cannot be used for a different path.
+// When called with the same pathID, it will return the same connection ID,
+// unless the peer requested that this connection ID be retired.
+func (h *connIDManager) GetConnIDForPath(id pathID) (protocol.ConnectionID, bool) {
+	h.assertNotClosed()
+	// if we're using zero-length connection IDs, we don't need to change the connection ID
+	if h.activeConnectionID.Len() == 0 {
+		return protocol.ConnectionID{}, true
+	}
+
+	if h.pathProbing == nil {
+		h.pathProbing = make(map[pathID]newConnID)
+	}
+	entry, ok := h.pathProbing[id]
+	if ok {
+		return entry.ConnectionID, true
+	}
+	if h.queue.Len() == 0 {
+		return protocol.ConnectionID{}, false
+	}
+	front := h.queue.Remove(h.queue.Front())
+	h.pathProbing[id] = front
+	h.highestProbingID = front.SequenceNumber
+	return front.ConnectionID, true
+}
+
+func (h *connIDManager) RetireConnIDForPath(pathID pathID) {
+	h.assertNotClosed()
+	// if we're using zero-length connection IDs, we don't need to change the connection ID
+	if h.activeConnectionID.Len() == 0 {
+		return
+	}
+
+	entry, ok := h.pathProbing[pathID]
+	if !ok {
+		return
+	}
+	h.queueControlFrame(&wire.RetireConnectionIDFrame{
+		SequenceNumber: entry.SequenceNumber,
+	})
+	delete(h.pathProbing, pathID)
+}
+
 // Using the connIDManager after it has been closed can have disastrous effects:
 // If the connection ID is rotated, a new entry would be inserted into the packet handler map,
 // leading to a memory leak of the connection struct.
diff --git a/vendor/github.com/quic-go/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go
index 879faec0..9415584d 100644
--- a/vendor/github.com/quic-go/quic-go/connection.go
+++ b/vendor/github.com/quic-go/quic-go/connection.go
@@ -19,6 +19,7 @@ import (
 	"github.com/quic-go/quic-go/internal/protocol"
 	"github.com/quic-go/quic-go/internal/qerr"
 	"github.com/quic-go/quic-go/internal/utils"
+	"github.com/quic-go/quic-go/internal/utils/ringbuffer"
 	"github.com/quic-go/quic-go/internal/wire"
 	"github.com/quic-go/quic-go/logging"
 )
@@ -94,7 +95,6 @@ type connRunner interface {
 
 type closeError struct {
 	err       error
-	remote    bool
 	immediate bool
 }
 
@@ -128,6 +128,10 @@ type connection struct {
 	conn      sendConn
 	sendQueue sender
 
+	// lazily initialzed: most connections never migrate
+	pathManager        *pathManager
+	largestRcvdAppData protocol.PacketNumber
+
 	streamsMap      streamManager
 	connIDManager   *connIDManager
 	connIDGenerator *connIDGenerator
@@ -148,19 +152,21 @@ type connection struct {
 	packer        packer
 	mtuDiscoverer mtuDiscoverer // initialized when the transport parameters are received
 
-	maxPayloadSizeEstimate atomic.Uint32
+	currentMTUEstimate atomic.Uint32
 
 	initialStream       *cryptoStream
 	handshakeStream     *cryptoStream
 	oneRTTStream        *cryptoStream // only set for the server
 	cryptoStreamHandler cryptoStreamHandler
 
-	receivedPackets  chan receivedPacket
-	sendingScheduled chan struct{}
+	notifyReceivedPacket chan struct{}
+	sendingScheduled     chan struct{}
+	receivedPacketMx     sync.Mutex
+	receivedPackets      ringbuffer.RingBuffer[receivedPacket]
 
-	closeOnce sync.Once
 	// closeChan is used to notify the run loop that it should terminate
-	closeChan chan closeError
+	closeChan chan struct{}
+	closeErr  atomic.Pointer[closeError]
 
 	ctx                   context.Context
 	ctxCancel             context.CancelCauseFunc
@@ -280,7 +286,7 @@ var newConnection = func(
 		s.tracer,
 		s.logger,
 	)
-	s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
+	s.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
 	statelessResetToken := statelessResetter.GetStatelessResetToken(srcConnID)
 	params := &wire.TransportParameters{
 		InitialMaxStreamDataBidiLocal:   protocol.ByteCount(s.config.InitialStreamReceiveWindow),
@@ -392,7 +398,7 @@ var newClientConnection = func(
 		s.tracer,
 		s.logger,
 	)
-	s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
+	s.currentMTUEstimate.Store(uint32(estimateMaxPayloadSize(protocol.ByteCount(s.config.InitialPacketSize))))
 	oneRTTStream := newCryptoStream()
 	params := &wire.TransportParameters{
 		InitialMaxStreamDataBidiRemote: protocol.ByteCount(s.config.InitialStreamReceiveWindow),
@@ -450,6 +456,7 @@ var newClientConnection = func(
 }
 
 func (s *connection) preSetup() {
+	s.largestRcvdAppData = protocol.InvalidPacketNumber
 	s.initialStream = newCryptoStream()
 	s.handshakeStream = newCryptoStream()
 	s.sendQueue = newSendQueue(s.conn)
@@ -479,8 +486,9 @@ func (s *connection) preSetup() {
 		s.perspective,
 	)
 	s.framer = newFramer(s.connFlowController)
-	s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets)
-	s.closeChan = make(chan closeError, 1)
+	s.receivedPackets.Init(8)
+	s.notifyReceivedPacket = make(chan struct{}, 1)
+	s.closeChan = make(chan struct{}, 1)
 	s.sendingScheduled = make(chan struct{}, 1)
 	s.handshakeCompleteChan = make(chan struct{})
 
@@ -493,23 +501,18 @@ func (s *connection) preSetup() {
 }
 
 // run the connection main loop
-func (s *connection) run() error {
-	var closeErr closeError
-	defer func() { s.ctxCancel(closeErr.err) }()
+func (s *connection) run() (err error) {
+	defer func() { s.ctxCancel(err) }()
 
 	defer func() {
-		// Drain queued packets that will never be processed.
-		for {
-			select {
-			case p, ok := <-s.receivedPackets:
-				if !ok {
-					return
-				}
-				p.buffer.Decrement()
-				p.buffer.MaybeRelease()
-			default:
-				return
-			}
+		// drain queued packets that will never be processed
+		s.receivedPacketMx.Lock()
+		defer s.receivedPacketMx.Unlock()
+
+		for !s.receivedPackets.Empty() {
+			p := s.receivedPackets.PopFront()
+			p.buffer.Decrement()
+			p.buffer.MaybeRelease()
 		}
 	}()
 
@@ -536,91 +539,88 @@ func (s *connection) run() error {
 runLoop:
 	for {
 		if s.framer.QueuedTooManyControlFrames() {
-			s.closeLocal(&qerr.TransportError{ErrorCode: InternalError})
+			s.setCloseError(&closeError{err: &qerr.TransportError{ErrorCode: InternalError}})
+			break runLoop
 		}
 		// Close immediately if requested
 		select {
-		case closeErr = <-s.closeChan:
+		case <-s.closeChan:
 			break runLoop
 		default:
 		}
 
-		s.maybeResetTimer()
+		// no need to set a timer if we can send packets immediately
+		if s.pacingDeadline != deadlineSendImmediately {
+			s.maybeResetTimer()
+		}
 
-		var processedUndecryptablePacket bool
+		// 1st: handle undecryptable packets, if any.
+		// This can only occur before completion of the handshake.
 		if len(s.undecryptablePacketsToProcess) > 0 {
+			var processedUndecryptablePacket bool
 			queue := s.undecryptablePacketsToProcess
 			s.undecryptablePacketsToProcess = nil
 			for _, p := range queue {
-				if processed := s.handlePacketImpl(p); processed {
+				processed, err := s.handleOnePacket(p)
+				if err != nil {
+					s.setCloseError(&closeError{err: err})
+					break runLoop
+				}
+				if processed {
 					processedUndecryptablePacket = true
 				}
-				// Don't set timers and send packets if the packet made us close the connection.
-				select {
-				case closeErr = <-s.closeChan:
-					break runLoop
-				default:
-				}
+			}
+			if processedUndecryptablePacket {
+				// if we processed any undecryptable packets, jump to the resetting of the timers directly
+				continue
 			}
 		}
-		// If we processed any undecryptable packets, jump to the resetting of the timers directly.
-		if !processedUndecryptablePacket {
+
+		// 2nd: receive packets.
+		processed, err := s.handlePackets() // don't check receivedPackets.Len() in the run loop to avoid locking the mutex
+		if err != nil {
+			s.setCloseError(&closeError{err: err})
+			break runLoop
+		}
+
+		// We don't need to wait for new events if:
+		// * we processed packets: we probably need to send an ACK, and potentially more data
+		// * the pacer allows us to send more packets immediately
+		shouldProceedImmediately := sendQueueAvailable == nil && (processed || s.pacingDeadline == deadlineSendImmediately)
+		if !shouldProceedImmediately {
+			// 3rd: wait for something to happen:
+			// * closing of the connection
+			// * timer firing
+			// * sending scheduled
+			// * send queue available
+			// * received packets
 			select {
-			case closeErr = <-s.closeChan:
+			case <-s.closeChan:
 				break runLoop
 			case <-s.timer.Chan():
 				s.timer.SetRead()
-				// We do all the interesting stuff after the switch statement, so
-				// nothing to see here.
 			case <-s.sendingScheduled:
-				// We do all the interesting stuff after the switch statement, so
-				// nothing to see here.
 			case <-sendQueueAvailable:
-			case firstPacket := <-s.receivedPackets:
-				wasProcessed := s.handlePacketImpl(firstPacket)
-				// Don't set timers and send packets if the packet made us close the connection.
-				select {
-				case closeErr = <-s.closeChan:
+			case <-s.notifyReceivedPacket:
+				wasProcessed, err := s.handlePackets()
+				if err != nil {
+					s.setCloseError(&closeError{err: err})
 					break runLoop
-				default:
 				}
-				if s.handshakeComplete {
-					// Now process all packets in the receivedPackets channel.
-					// Limit the number of packets to the length of the receivedPackets channel,
-					// so we eventually get a chance to send out an ACK when receiving a lot of packets.
-					numPackets := len(s.receivedPackets)
-				receiveLoop:
-					for i := 0; i < numPackets; i++ {
-						select {
-						case p := <-s.receivedPackets:
-							if processed := s.handlePacketImpl(p); processed {
-								wasProcessed = true
-							}
-							select {
-							case closeErr = <-s.closeChan:
-								break runLoop
-							default:
-							}
-						default:
-							break receiveLoop
-						}
-					}
-				}
-				// Only reset the timers if this packet was actually processed.
-				// This avoids modifying any state when handling undecryptable packets,
-				// which could be injected by an attacker.
+				// if we processed any undecryptable packets, jump to the resetting of the timers directly
 				if !wasProcessed {
 					continue
 				}
 			}
 		}
 
+		// Check for loss detection timeout.
+		// This could cause packets to be declared lost, and retransmissions to be enqueued.
 		now := time.Now()
 		if timeout := s.sentPacketHandler.GetLossDetectionTimeout(); !timeout.IsZero() && timeout.Before(now) {
-			// This could cause packets to be retransmitted.
-			// Check it before trying to send packets.
 			if err := s.sentPacketHandler.OnLossDetectionTimeout(now); err != nil {
-				s.closeLocal(err)
+				s.setCloseError(&closeError{err: err})
+				break runLoop
 			}
 		}
 
@@ -631,35 +631,46 @@ runLoop:
 			s.keepAlivePingSent = true
 		} else if !s.handshakeComplete && now.Sub(s.creationTime) >= s.config.handshakeTimeout() {
 			s.destroyImpl(qerr.ErrHandshakeTimeout)
-			continue
+			break runLoop
 		} else {
 			idleTimeoutStartTime := s.idleTimeoutStartTime()
 			if (!s.handshakeComplete && now.Sub(idleTimeoutStartTime) >= s.config.HandshakeIdleTimeout) ||
 				(s.handshakeComplete && now.After(s.nextIdleTimeoutTime())) {
 				s.destroyImpl(qerr.ErrIdleTimeout)
-				continue
+				break runLoop
 			}
 		}
 
 		if s.sendQueue.WouldBlock() {
-			// The send queue is still busy sending out packets.
-			// Wait until there's space to enqueue new packets.
+			// The send queue is still busy sending out packets. Wait until there's space to enqueue new packets.
 			sendQueueAvailable = s.sendQueue.Available()
+			// Cancel the pacing timer, as we can't send any more packets until the send queue is available again.
+			s.pacingDeadline = time.Time{}
 			continue
 		}
+
+		if s.closeErr.Load() != nil {
+			break runLoop
+		}
+
 		if err := s.triggerSending(now); err != nil {
-			s.closeLocal(err)
+			s.setCloseError(&closeError{err: err})
+			break runLoop
 		}
 		if s.sendQueue.WouldBlock() {
+			// The send queue is still busy sending out packets. Wait until there's space to enqueue new packets.
 			sendQueueAvailable = s.sendQueue.Available()
+			// Cancel the pacing timer, as we can't send any more packets until the send queue is available again.
+			s.pacingDeadline = time.Time{}
 		} else {
 			sendQueueAvailable = nil
 		}
 	}
 
+	closeErr := s.closeErr.Load()
 	s.cryptoStreamHandler.Close()
 	s.sendQueue.Close() // close the send queue before sending the CONNECTION_CLOSE
-	s.handleCloseError(&closeErr)
+	s.handleCloseError(closeErr)
 	if s.tracer != nil && s.tracer.Close != nil {
 		if e := (&errCloseForRecreating{}); !errors.As(closeErr.err, &e) {
 			s.tracer.Close()
@@ -802,17 +813,60 @@ func (s *connection) handleHandshakeConfirmed(now time.Time) error {
 	return nil
 }
 
-func (s *connection) handlePacketImpl(rp receivedPacket) bool {
+func (s *connection) handlePackets() (wasProcessed bool, _ error) {
+	// Now process all packets in the receivedPackets channel.
+	// Limit the number of packets to the length of the receivedPackets channel,
+	// so we eventually get a chance to send out an ACK when receiving a lot of packets.
+	s.receivedPacketMx.Lock()
+	numPackets := s.receivedPackets.Len()
+	if numPackets == 0 {
+		s.receivedPacketMx.Unlock()
+		return false, nil
+	}
+
+	var hasMorePackets bool
+	for i := 0; i < numPackets; i++ {
+		if i > 0 {
+			s.receivedPacketMx.Lock()
+		}
+		p := s.receivedPackets.PopFront()
+		hasMorePackets = !s.receivedPackets.Empty()
+		s.receivedPacketMx.Unlock()
+
+		processed, err := s.handleOnePacket(p)
+		if err != nil {
+			return false, err
+		}
+		if processed {
+			wasProcessed = true
+		}
+		if !hasMorePackets {
+			break
+		}
+		// only process a single packet at a time before handshake completion
+		if !s.handshakeComplete {
+			break
+		}
+	}
+	if hasMorePackets {
+		select {
+		case s.notifyReceivedPacket <- struct{}{}:
+		default:
+		}
+	}
+	return wasProcessed, nil
+}
+
+func (s *connection) handleOnePacket(rp receivedPacket) (wasProcessed bool, _ error) {
 	s.sentPacketHandler.ReceivedBytes(rp.Size(), rp.rcvTime)
 
 	if wire.IsVersionNegotiationPacket(rp.data) {
 		s.handleVersionNegotiationPacket(rp)
-		return false
+		return false, nil
 	}
 
 	var counter uint8
 	var lastConnID protocol.ConnectionID
-	var processed bool
 	data := rp.data
 	p := rp
 	for len(data) > 0 {
@@ -872,26 +926,34 @@ func (s *connection) handlePacketImpl(rp receivedPacket) bool {
 
 			p.data = packetData
 
-			if wasProcessed := s.handleLongHeaderPacket(p, hdr); wasProcessed {
-				processed = true
+			processed, err := s.handleLongHeaderPacket(p, hdr)
+			if err != nil {
+				return false, err
+			}
+			if processed {
+				wasProcessed = true
 			}
 			data = rest
 		} else {
 			if counter > 0 {
 				p.buffer.Split()
 			}
-			if wasProcessed := s.handleShortHeaderPacket(p); wasProcessed {
-				processed = true
+			processed, err := s.handleShortHeaderPacket(p)
+			if err != nil {
+				return false, err
+			}
+			if processed {
+				wasProcessed = true
 			}
 			break
 		}
 	}
 
 	p.buffer.MaybeRelease()
-	return processed
+	return wasProcessed, nil
 }
 
-func (s *connection) handleShortHeaderPacket(p receivedPacket) bool {
+func (s *connection) handleShortHeaderPacket(p receivedPacket) (wasProcessed bool, _ error) {
 	var wasQueued bool
 
 	defer func() {
@@ -904,13 +966,14 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket) bool {
 	destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen)
 	if err != nil {
 		s.tracer.DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, protocol.ByteCount(len(p.data)), logging.PacketDropHeaderParseError)
-		return false
+		return false, nil
 	}
 	pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data)
 	if err != nil {
-		wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT)
-		return false
+		wasQueued, err = s.handleUnpackError(err, p, logging.PacketType1RTT)
+		return false, err
 	}
+	s.largestRcvdAppData = max(s.largestRcvdAppData, pn)
 
 	if s.logger.Debug() {
 		s.logger.Debugf("<- Reading packet %d (%d bytes) for connection %s, 1-RTT", pn, p.Size(), destConnID)
@@ -922,7 +985,7 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket) bool {
 		if s.tracer != nil && s.tracer.DroppedPacket != nil {
 			s.tracer.DroppedPacket(logging.PacketType1RTT, pn, p.Size(), logging.PacketDropDuplicate)
 		}
-		return false
+		return false, nil
 	}
 
 	var log func([]logging.Frame)
@@ -941,14 +1004,58 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket) bool {
 			)
 		}
 	}
-	if err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log); err != nil {
-		s.closeLocal(err)
-		return false
+	isNonProbing, err := s.handleUnpackedShortHeaderPacket(destConnID, pn, data, p.ecn, p.rcvTime, log)
+	if err != nil {
+		return false, err
 	}
-	return true
+
+	// In RFC 9000, only the client can migrate between paths.
+	if s.perspective == protocol.PerspectiveClient {
+		return true, nil
+	}
+
+	var shouldSwitchPath bool
+	if pn == s.largestRcvdAppData && !addrsEqual(p.remoteAddr, s.RemoteAddr()) {
+		if s.pathManager == nil {
+			s.pathManager = newPathManager(
+				s.connIDManager.GetConnIDForPath,
+				s.connIDManager.RetireConnIDForPath,
+				s.logger,
+			)
+		}
+		var destConnID protocol.ConnectionID
+		var pathChallenge ackhandler.Frame
+		destConnID, pathChallenge, shouldSwitchPath = s.pathManager.HandlePacket(p, isNonProbing)
+		if pathChallenge.Frame != nil {
+			probe, buf, err := s.packer.PackPathProbePacket(destConnID, pathChallenge, s.version)
+			if err != nil {
+				return false, err
+			}
+			s.logger.Debugf("sending path probe packet to %s", p.remoteAddr)
+			s.logShortHeaderPacket(probe.DestConnID, probe.Ack, probe.Frames, probe.StreamFrames, probe.PacketNumber, probe.PacketNumberLen, probe.KeyPhase, protocol.ECNNon, buf.Len(), false)
+			s.registerPackedShortHeaderPacket(probe, protocol.ECNNon, p.rcvTime)
+			s.sendQueue.SendProbe(buf, p.remoteAddr)
+		}
+	}
+
+	if shouldSwitchPath {
+		s.pathManager.SwitchToPath(p.remoteAddr)
+		s.sentPacketHandler.MigratedPath(p.rcvTime, protocol.ByteCount(s.config.InitialPacketSize))
+		maxPacketSize := protocol.ByteCount(protocol.MaxPacketBufferSize)
+		if s.peerParams.MaxUDPPayloadSize > 0 && s.peerParams.MaxUDPPayloadSize < maxPacketSize {
+			maxPacketSize = s.peerParams.MaxUDPPayloadSize
+		}
+		s.mtuDiscoverer.Reset(
+			p.rcvTime,
+			protocol.ByteCount(s.config.InitialPacketSize),
+			maxPacketSize,
+		)
+		s.conn.ChangeRemoteAddr(p.remoteAddr, p.info)
+	}
+	return true, nil
 }
 
-func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ {
+func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) (wasProcessed bool, _ error) {
 	var wasQueued bool
 
 	defer func() {
@@ -959,7 +1066,7 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header)
 	}()
 
 	if hdr.Type == protocol.PacketTypeRetry {
-		return s.handleRetryPacket(hdr, p.data, p.rcvTime)
+		return s.handleRetryPacket(hdr, p.data, p.rcvTime), nil
 	}
 
 	// The server can change the source connection ID with the first Handshake packet.
@@ -969,20 +1076,20 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header)
 			s.tracer.DroppedPacket(logging.PacketTypeInitial, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnknownConnectionID)
 		}
 		s.logger.Debugf("Dropping Initial packet (%d bytes) with unexpected source connection ID: %s (expected %s)", p.Size(), hdr.SrcConnectionID, s.handshakeDestConnID)
-		return false
+		return false, nil
 	}
 	// drop 0-RTT packets, if we are a client
 	if s.perspective == protocol.PerspectiveClient && hdr.Type == protocol.PacketType0RTT {
 		if s.tracer != nil && s.tracer.DroppedPacket != nil {
 			s.tracer.DroppedPacket(logging.PacketType0RTT, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropUnexpectedPacket)
 		}
-		return false
+		return false, nil
 	}
 
 	packet, err := s.unpacker.UnpackLongHeader(hdr, p.data)
 	if err != nil {
-		wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr))
-		return false
+		wasQueued, err = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr))
+		return false, err
 	}
 
 	if s.logger.Debug() {
@@ -995,39 +1102,40 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header)
 		if s.tracer != nil && s.tracer.DroppedPacket != nil {
 			s.tracer.DroppedPacket(logging.PacketTypeFromHeader(hdr), pn, p.Size(), logging.PacketDropDuplicate)
 		}
-		return false
+		return false, nil
 	}
 
 	if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
-		s.closeLocal(err)
-		return false
+		return false, err
 	}
-	return true
+	return true, nil
 }
 
-func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool) {
+func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.PacketType) (wasQueued bool, _ error) {
 	switch err {
 	case handshake.ErrKeysDropped:
 		if s.tracer != nil && s.tracer.DroppedPacket != nil {
 			s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropKeyUnavailable)
 		}
 		s.logger.Debugf("Dropping %s packet (%d bytes) because we already dropped the keys.", pt, p.Size())
+		return false, nil
 	case handshake.ErrKeysNotYetAvailable:
 		// Sealer for this encryption level not yet available.
 		// Try again later.
 		s.tryQueueingUndecryptablePacket(p, pt)
-		return true
+		return true, nil
 	case wire.ErrInvalidReservedBits:
-		s.closeLocal(&qerr.TransportError{
+		return false, &qerr.TransportError{
 			ErrorCode:    qerr.ProtocolViolation,
 			ErrorMessage: err.Error(),
-		})
+		}
 	case handshake.ErrDecryptionFailed:
 		// This might be a packet injected by an attacker. Drop it.
 		if s.tracer != nil && s.tracer.DroppedPacket != nil {
 			s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropPayloadDecryptError)
 		}
 		s.logger.Debugf("Dropping %s packet (%d bytes) that could not be unpacked. Error: %s", pt, p.Size(), err)
+		return false, nil
 	default:
 		var headerErr *headerParseError
 		if errors.As(err, &headerErr) {
@@ -1036,13 +1144,12 @@ func (s *connection) handleUnpackError(err error, p receivedPacket, pt logging.P
 				s.tracer.DroppedPacket(pt, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropHeaderParseError)
 			}
 			s.logger.Debugf("Dropping %s packet (%d bytes) for which we couldn't unpack the header. Error: %s", pt, p.Size(), err)
-		} else {
-			// This is an error returned by the AEAD (other than ErrDecryptionFailed).
-			// For example, a PROTOCOL_VIOLATION due to key updates.
-			s.closeLocal(err)
+			return false, nil
 		}
+		// This is an error returned by the AEAD (other than ErrDecryptionFailed).
+		// For example, a PROTOCOL_VIOLATION due to key updates.
+		return false, err
 	}
-	return false
 }
 
 func (s *connection) handleRetryPacket(hdr *wire.Header, data []byte, rcvTime time.Time) bool /* was this a valid Retry */ {
@@ -1219,13 +1326,17 @@ func (s *connection) handleUnpackedLongHeaderPacket(
 	s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
 	s.keepAlivePingSent = false
 
+	if packet.hdr.Type == protocol.PacketType0RTT {
+		s.largestRcvdAppData = max(s.largestRcvdAppData, packet.hdr.PacketNumber)
+	}
+
 	var log func([]logging.Frame)
 	if s.tracer != nil && s.tracer.ReceivedLongHeaderPacket != nil {
 		log = func(frames []logging.Frame) {
 			s.tracer.ReceivedLongHeaderPacket(packet.hdr, packetSize, ecn, frames)
 		}
 	}
-	isAckEliciting, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log, rcvTime)
+	isAckEliciting, _, err := s.handleFrames(packet.data, packet.hdr.DestConnectionID, packet.encryptionLevel, log, rcvTime)
 	if err != nil {
 		return err
 	}
@@ -1239,16 +1350,19 @@ func (s *connection) handleUnpackedShortHeaderPacket(
 	ecn protocol.ECN,
 	rcvTime time.Time,
 	log func([]logging.Frame),
-) error {
+) (isNonProbing bool, _ error) {
 	s.lastPacketReceivedTime = rcvTime
 	s.firstAckElicitingPacketAfterIdleSentTime = time.Time{}
 	s.keepAlivePingSent = false
 
-	isAckEliciting, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log, rcvTime)
+	isAckEliciting, isNonProbing, err := s.handleFrames(data, destConnID, protocol.Encryption1RTT, log, rcvTime)
 	if err != nil {
-		return err
+		return false, err
 	}
-	return s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting)
+	if err := s.receivedPacketHandler.ReceivedPacket(pn, ecn, protocol.Encryption1RTT, rcvTime, isAckEliciting); err != nil {
+		return false, err
+	}
+	return isNonProbing, nil
 }
 
 func (s *connection) handleFrames(
@@ -1257,7 +1371,7 @@ func (s *connection) handleFrames(
 	encLevel protocol.EncryptionLevel,
 	log func([]logging.Frame),
 	rcvTime time.Time,
-) (isAckEliciting bool, _ error) {
+) (isAckEliciting, isNonProbing bool, _ error) {
 	// Only used for tracing.
 	// If we're not tracing, this slice will always remain empty.
 	var frames []logging.Frame
@@ -1269,7 +1383,7 @@ func (s *connection) handleFrames(
 	for len(data) > 0 {
 		l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version)
 		if err != nil {
-			return false, err
+			return false, false, err
 		}
 		data = data[l:]
 		if frame == nil {
@@ -1278,6 +1392,9 @@ func (s *connection) handleFrames(
 		if ackhandler.IsFrameAckEliciting(frame) {
 			isAckEliciting = true
 		}
+		if !wire.IsProbingFrame(frame) {
+			isNonProbing = true
+		}
 		if log != nil {
 			frames = append(frames, toLoggingFrame(frame))
 		}
@@ -1288,7 +1405,7 @@ func (s *connection) handleFrames(
 		}
 		if err := s.handleFrame(frame, encLevel, destConnID, rcvTime); err != nil {
 			if log == nil {
-				return false, err
+				return false, false, err
 			}
 			// If we're logging, we need to keep parsing (but not handling) all frames.
 			handleErr = err
@@ -1298,7 +1415,7 @@ func (s *connection) handleFrames(
 	if log != nil {
 		log(frames)
 		if handleErr != nil {
-			return false, handleErr
+			return false, false, handleErr
 		}
 	}
 
@@ -1308,10 +1425,9 @@ func (s *connection) handleFrames(
 	// and an ACK serialized after that CRYPTO frame. In this case, we still want to process the ACK frame.
 	if !handshakeWasComplete && s.handshakeComplete {
 		if err := s.handleHandshakeComplete(rcvTime); err != nil {
-			return false, err
+			return false, false, err
 		}
 	}
-
 	return
 }
 
@@ -1331,7 +1447,7 @@ func (s *connection) handleFrame(
 	case *wire.AckFrame:
 		err = s.handleAckFrame(frame, encLevel, rcvTime)
 	case *wire.ConnectionCloseFrame:
-		s.handleConnectionCloseFrame(frame)
+		err = s.handleConnectionCloseFrame(frame)
 	case *wire.ResetStreamFrame:
 		err = s.handleResetStreamFrame(frame, rcvTime)
 	case *wire.MaxDataFrame:
@@ -1350,11 +1466,7 @@ func (s *connection) handleFrame(
 	case *wire.PathChallengeFrame:
 		s.handlePathChallengeFrame(frame)
 	case *wire.PathResponseFrame:
-		// since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs
-		err = &qerr.TransportError{
-			ErrorCode:    qerr.ProtocolViolation,
-			ErrorMessage: "unexpected PATH_RESPONSE frame",
-		}
+		err = s.handlePathResponseFrame(frame)
 	case *wire.NewTokenFrame:
 		err = s.handleNewTokenFrame(frame)
 	case *wire.NewConnectionIDFrame:
@@ -1373,32 +1485,39 @@ func (s *connection) handleFrame(
 
 // handlePacket is called by the server with a new packet
 func (s *connection) handlePacket(p receivedPacket) {
+	s.receivedPacketMx.Lock()
 	// Discard packets once the amount of queued packets is larger than
 	// the channel size, protocol.MaxConnUnprocessedPackets
-	select {
-	case s.receivedPackets <- p:
-	default:
+	if s.receivedPackets.Len() >= protocol.MaxConnUnprocessedPackets {
 		if s.tracer != nil && s.tracer.DroppedPacket != nil {
 			s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, p.Size(), logging.PacketDropDOSPrevention)
 		}
+		s.receivedPacketMx.Unlock()
+		return
+	}
+	s.receivedPackets.PushBack(p)
+	s.receivedPacketMx.Unlock()
+
+	select {
+	case s.notifyReceivedPacket <- struct{}{}:
+	default:
 	}
 }
 
-func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) {
+func (s *connection) handleConnectionCloseFrame(frame *wire.ConnectionCloseFrame) error {
 	if frame.IsApplicationError {
-		s.closeRemote(&qerr.ApplicationError{
+		return &qerr.ApplicationError{
 			Remote:       true,
 			ErrorCode:    qerr.ApplicationErrorCode(frame.ErrorCode),
 			ErrorMessage: frame.ReasonPhrase,
-		})
-		return
+		}
 	}
-	s.closeRemote(&qerr.TransportError{
+	return &qerr.TransportError{
 		Remote:       true,
 		ErrorCode:    qerr.TransportErrorCode(frame.ErrorCode),
 		FrameType:    frame.FrameType,
 		ErrorMessage: frame.ReasonPhrase,
-	})
+	}
 }
 
 func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) error {
@@ -1434,8 +1553,8 @@ func (s *connection) handleHandshakeEvents(now time.Time) error {
 			s.restoreTransportParameters(ev.TransportParameters)
 			close(s.earlyConnReadyChan)
 		case handshake.EventReceivedReadKeys:
-			// Queue all packets for decryption that have been undecryptable so far.
-			s.undecryptablePacketsToProcess = s.undecryptablePackets
+			// queue all previously undecryptable packets
+			s.undecryptablePacketsToProcess = append(s.undecryptablePacketsToProcess, s.undecryptablePackets...)
 			s.undecryptablePackets = nil
 		case handshake.EventDiscard0RTTKeys:
 			err = s.dropEncryptionLevel(protocol.Encryption0RTT, now)
@@ -1514,8 +1633,21 @@ func (s *connection) handleStopSendingFrame(frame *wire.StopSendingFrame) error
 	return nil
 }
 
-func (s *connection) handlePathChallengeFrame(frame *wire.PathChallengeFrame) {
-	s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data})
+func (s *connection) handlePathChallengeFrame(f *wire.PathChallengeFrame) {
+	s.queueControlFrame(&wire.PathResponseFrame{Data: f.Data})
+}
+
+func (s *connection) handlePathResponseFrame(f *wire.PathResponseFrame) error {
+	s.logger.Debugf("received PATH_RESPONSE frame: %v", f.Data)
+	if s.pathManager == nil {
+		// since we didn't send PATH_CHALLENGEs yet, we don't expect PATH_RESPONSEs
+		return &qerr.TransportError{
+			ErrorCode:    qerr.ProtocolViolation,
+			ErrorMessage: "unexpected PATH_RESPONSE frame",
+		}
+	}
+	s.pathManager.HandlePathResponseFrame(f)
+	return nil
 }
 
 func (s *connection) handleNewTokenFrame(frame *wire.NewTokenFrame) error {
@@ -1568,6 +1700,13 @@ func (s *connection) handleAckFrame(frame *wire.AckFrame, encLevel protocol.Encr
 			return err
 		}
 	}
+	// If one of the acknowledged packets was a Path MTU probe packet, this might have increased the Path MTU estimate.
+	if s.mtuDiscoverer != nil {
+		if mtu := s.mtuDiscoverer.CurrentSize(); mtu > protocol.ByteCount(s.currentMTUEstimate.Load()) {
+			s.currentMTUEstimate.Store(uint32(mtu))
+			s.sentPacketHandler.SetMaxDatagramSize(mtu)
+		}
+	}
 	return s.cryptoStreamHandler.SetLargest1RTTAcked(frame.LargestAcked())
 }
 
@@ -1582,16 +1721,17 @@ func (s *connection) handleDatagramFrame(f *wire.DatagramFrame) error {
 	return nil
 }
 
+func (s *connection) setCloseError(e *closeError) {
+	s.closeErr.CompareAndSwap(nil, e)
+	select {
+	case s.closeChan <- struct{}{}:
+	default:
+	}
+}
+
 // closeLocal closes the connection and send a CONNECTION_CLOSE containing the error
 func (s *connection) closeLocal(e error) {
-	s.closeOnce.Do(func() {
-		if e == nil {
-			s.logger.Infof("Closing connection.")
-		} else {
-			s.logger.Errorf("Closing connection with error: %s", e)
-		}
-		s.closeChan <- closeError{err: e, immediate: false, remote: false}
-	})
+	s.setCloseError(&closeError{err: e, immediate: false})
 }
 
 // destroy closes the connection without sending the error on the wire
@@ -1601,21 +1741,7 @@ func (s *connection) destroy(e error) {
 }
 
 func (s *connection) destroyImpl(e error) {
-	s.closeOnce.Do(func() {
-		if nerr, ok := e.(net.Error); ok && nerr.Timeout() {
-			s.logger.Errorf("Destroying connection: %s", e)
-		} else {
-			s.logger.Errorf("Destroying connection with error: %s", e)
-		}
-		s.closeChan <- closeError{err: e, immediate: true, remote: false}
-	})
-}
-
-func (s *connection) closeRemote(e error) {
-	s.closeOnce.Do(func() {
-		s.logger.Errorf("Peer closed connection with error: %s", e)
-		s.closeChan <- closeError{err: e, immediate: true, remote: true}
-	})
+	s.setCloseError(&closeError{err: e, immediate: true})
 }
 
 func (s *connection) CloseWithError(code ApplicationErrorCode, desc string) error {
@@ -1633,13 +1759,25 @@ func (s *connection) closeWithTransportError(code TransportErrorCode) {
 }
 
 func (s *connection) handleCloseError(closeErr *closeError) {
+	if closeErr.immediate {
+		if nerr, ok := closeErr.err.(net.Error); ok && nerr.Timeout() {
+			s.logger.Errorf("Destroying connection: %s", closeErr.err)
+		} else {
+			s.logger.Errorf("Destroying connection with error: %s", closeErr.err)
+		}
+	} else {
+		if closeErr.err == nil {
+			s.logger.Infof("Closing connection.")
+		} else {
+			s.logger.Errorf("Closing connection with error: %s", closeErr.err)
+		}
+	}
+
 	e := closeErr.err
 	if e == nil {
 		e = &qerr.ApplicationError{}
 	} else {
-		defer func() {
-			closeErr.err = e
-		}()
+		defer func() { closeErr.err = e }()
 	}
 
 	var (
@@ -1649,14 +1787,17 @@ func (s *connection) handleCloseError(closeErr *closeError) {
 		applicationErr        *ApplicationError
 		transportErr          *TransportError
 	)
+	var isRemoteClose bool
 	switch {
 	case errors.Is(e, qerr.ErrIdleTimeout),
 		errors.Is(e, qerr.ErrHandshakeTimeout),
 		errors.As(e, &statelessResetErr),
 		errors.As(e, &versionNegotiationErr),
-		errors.As(e, &recreateErr),
-		errors.As(e, &applicationErr),
-		errors.As(e, &transportErr):
+		errors.As(e, &recreateErr):
+	case errors.As(e, &applicationErr):
+		isRemoteClose = applicationErr.Remote
+	case errors.As(e, &transportErr):
+		isRemoteClose = transportErr.Remote
 	case closeErr.immediate:
 		e = closeErr.err
 	default:
@@ -1682,7 +1823,7 @@ func (s *connection) handleCloseError(closeErr *closeError) {
 	}
 
 	// If this is a remote close we're done here
-	if closeErr.remote {
+	if isRemoteClose {
 		s.connIDGenerator.ReplaceWithClosed(nil)
 		return
 	}
@@ -1831,7 +1972,6 @@ func (s *connection) applyTransportParameters() {
 		s.rttStats,
 		protocol.ByteCount(s.config.InitialPacketSize),
 		maxPacketSize,
-		s.onMTUIncreased,
 		s.tracer,
 	)
 }
@@ -1952,7 +2092,10 @@ func (s *connection) sendPacketsWithoutGSO(now time.Time) error {
 			return nil
 		}
 		// Prioritize receiving of packets over sending out more packets.
-		if len(s.receivedPackets) > 0 {
+		s.receivedPacketMx.Lock()
+		hasPackets := !s.receivedPackets.Empty()
+		s.receivedPacketMx.Unlock()
+		if hasPackets {
 			s.pacingDeadline = deadlineSendImmediately
 			return nil
 		}
@@ -2010,7 +2153,10 @@ func (s *connection) sendPacketsWithGSO(now time.Time) error {
 		}
 
 		// Prioritize receiving of packets over sending out more packets.
-		if len(s.receivedPackets) > 0 {
+		s.receivedPacketMx.Lock()
+		hasPackets := !s.receivedPackets.Empty()
+		s.receivedPacketMx.Unlock()
+		if hasPackets {
 			s.pacingDeadline = deadlineSendImmediately
 			return nil
 		}
@@ -2076,7 +2222,7 @@ func (s *connection) sendProbePacket(sendMode ackhandler.SendMode, now time.Time
 			break
 		}
 		var err error
-		packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), now, s.version)
+		packet, err = s.packer.MaybePackPTOProbePacket(encLevel, s.maxPacketSize(), now, s.version)
 		if err != nil {
 			return err
 		}
@@ -2087,7 +2233,7 @@ func (s *connection) sendProbePacket(sendMode ackhandler.SendMode, now time.Time
 	if packet == nil {
 		s.retransmissionQueue.AddPing(encLevel)
 		var err error
-		packet, err = s.packer.MaybePackProbePacket(encLevel, s.maxPacketSize(), now, s.version)
+		packet, err = s.packer.MaybePackPTOProbePacket(encLevel, s.maxPacketSize(), now, s.version)
 		if err != nil {
 			return err
 		}
@@ -2113,6 +2259,21 @@ func (s *connection) appendOneShortHeaderPacket(buf *packetBuffer, maxSize proto
 }
 
 func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, ecn protocol.ECN, now time.Time) {
+	if p.IsPathProbePacket {
+		s.sentPacketHandler.SentPacket(
+			now,
+			p.PacketNumber,
+			protocol.InvalidPacketNumber,
+			p.StreamFrames,
+			p.Frames,
+			protocol.Encryption1RTT,
+			ecn,
+			p.Length,
+			p.IsPathMTUProbePacket,
+			true,
+		)
+		return
+	}
 	if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && (len(p.StreamFrames) > 0 || ackhandler.HasAckElicitingFrames(p.Frames)) {
 		s.firstAckElicitingPacketAfterIdleSentTime = now
 	}
@@ -2121,7 +2282,18 @@ func (s *connection) registerPackedShortHeaderPacket(p shortHeaderPacket, ecn pr
 	if p.Ack != nil {
 		largestAcked = p.Ack.LargestAcked()
 	}
-	s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket)
+	s.sentPacketHandler.SentPacket(
+		now,
+		p.PacketNumber,
+		largestAcked,
+		p.StreamFrames,
+		p.Frames,
+		protocol.Encryption1RTT,
+		ecn,
+		p.Length,
+		p.IsPathMTUProbePacket,
+		false,
+	)
 	s.connIDManager.SentPacket()
 }
 
@@ -2135,7 +2307,18 @@ func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn prot
 		if p.ack != nil {
 			largestAcked = p.ack.LargestAcked()
 		}
-		s.sentPacketHandler.SentPacket(now, p.header.PacketNumber, largestAcked, p.streamFrames, p.frames, p.EncryptionLevel(), ecn, p.length, false)
+		s.sentPacketHandler.SentPacket(
+			now,
+			p.header.PacketNumber,
+			largestAcked,
+			p.streamFrames,
+			p.frames,
+			p.EncryptionLevel(),
+			ecn,
+			p.length,
+			false,
+			false,
+		)
 		if s.perspective == protocol.PerspectiveClient && p.EncryptionLevel() == protocol.EncryptionHandshake &&
 			!s.droppedInitialKeys {
 			// On the client side, Initial keys are dropped as soon as the first Handshake packet is sent.
@@ -2153,7 +2336,18 @@ func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, ecn prot
 		if p.Ack != nil {
 			largestAcked = p.Ack.LargestAcked()
 		}
-		s.sentPacketHandler.SentPacket(now, p.PacketNumber, largestAcked, p.StreamFrames, p.Frames, protocol.Encryption1RTT, ecn, p.Length, p.IsPathMTUProbePacket)
+		s.sentPacketHandler.SentPacket(
+			now,
+			p.PacketNumber,
+			largestAcked,
+			p.StreamFrames,
+			p.Frames,
+			protocol.Encryption1RTT,
+			ecn,
+			p.Length,
+			p.IsPathMTUProbePacket,
+			false,
+		)
 	}
 	s.connIDManager.SentPacket()
 	s.sendQueue.Send(packet.buffer, 0, ecn)
@@ -2299,11 +2493,6 @@ func (s *connection) onStreamCompleted(id protocol.StreamID) {
 	s.framer.RemoveActiveStream(id)
 }
 
-func (s *connection) onMTUIncreased(mtu protocol.ByteCount) {
-	s.maxPayloadSizeEstimate.Store(uint32(estimateMaxPayloadSize(mtu)))
-	s.sentPacketHandler.SetMaxDatagramSize(mtu)
-}
-
 func (s *connection) SendDatagram(p []byte) error {
 	if !s.supportsDatagrams() {
 		return errors.New("datagram support disabled")
@@ -2314,7 +2503,7 @@ func (s *connection) SendDatagram(p []byte) error {
 	// Under many circumstances we could send a few more bytes.
 	maxDataLen := min(
 		f.MaxDataLen(s.peerParams.MaxDatagramFrameSize, s.version),
-		protocol.ByteCount(s.maxPayloadSizeEstimate.Load()),
+		protocol.ByteCount(s.currentMTUEstimate.Load()),
 	)
 	if protocol.ByteCount(len(p)) > maxDataLen {
 		return &DatagramTooLargeError{MaxDatagramPayloadSize: int64(maxDataLen)}
diff --git a/vendor/github.com/quic-go/quic-go/http3/server.go b/vendor/github.com/quic-go/quic-go/http3/server.go
index 097a8005..1479609c 100644
--- a/vendor/github.com/quic-go/quic-go/http3/server.go
+++ b/vendor/github.com/quic-go/quic-go/http3/server.go
@@ -684,7 +684,7 @@ func (s *Server) handleRequest(conn *connection, str quic.Stream, datagrams *dat
 				if logger == nil {
 					logger = slog.Default()
 				}
-				logger.Error("http: panic serving", "arg", p, "trace", string(buf))
+				logger.Error("http3: panic serving", "arg", p, "trace", string(buf))
 			}
 		}()
 		handler.ServeHTTP(r, req)
@@ -694,18 +694,6 @@ func (s *Server) handleRequest(conn *connection, str quic.Stream, datagrams *dat
 		return
 	}
 
-	// only write response when there is no panic
-	if !panicked {
-		// response not written to the client yet, set Content-Length
-		if !r.headerWritten {
-			if _, haveCL := r.header["Content-Length"]; !haveCL {
-				r.header.Set("Content-Length", strconv.FormatInt(r.numWritten, 10))
-			}
-		}
-		r.Flush()
-		r.flushTrailers()
-	}
-
 	// abort the stream when there is a panic
 	if panicked {
 		str.CancelRead(quic.StreamErrorCode(ErrCodeInternalError))
@@ -713,9 +701,17 @@ func (s *Server) handleRequest(conn *connection, str quic.Stream, datagrams *dat
 		return
 	}
 
+	// response not written to the client yet, set Content-Length
+	if !r.headerWritten {
+		if _, haveCL := r.header["Content-Length"]; !haveCL {
+			r.header.Set("Content-Length", strconv.FormatInt(r.numWritten, 10))
+		}
+	}
+	r.Flush()
+	r.flushTrailers()
+
 	// If the EOF was read by the handler, CancelRead() is a no-op.
 	str.CancelRead(quic.StreamErrorCode(ErrCodeNoError))
-
 	str.Close()
 }
 
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
index acf95426..5fcce44d 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go
@@ -10,7 +10,7 @@ import (
 // SentPacketHandler handles ACKs received for outgoing packets
 type SentPacketHandler interface {
 	// SentPacket may modify the packet
-	SentPacket(t time.Time, pn, largestAcked protocol.PacketNumber, streamFrames []StreamFrame, frames []Frame, encLevel protocol.EncryptionLevel, ecn protocol.ECN, size protocol.ByteCount, isPathMTUProbePacket bool)
+	SentPacket(t time.Time, pn, largestAcked protocol.PacketNumber, streamFrames []StreamFrame, frames []Frame, encLevel protocol.EncryptionLevel, ecn protocol.ECN, size protocol.ByteCount, isPathMTUProbePacket, isPathProbePacket bool)
 	// ReceivedAck processes an ACK frame.
 	// It does not store a copy of the frame.
 	ReceivedAck(f *wire.AckFrame, encLevel protocol.EncryptionLevel, rcvTime time.Time) (bool /* 1-RTT packet acked */, error)
@@ -34,6 +34,8 @@ type SentPacketHandler interface {
 
 	GetLossDetectionTimeout() time.Time
 	OnLossDetectionTimeout(now time.Time) error
+
+	MigratedPath(now time.Time, initialMaxPacketSize protocol.ByteCount)
 }
 
 type sentPacketTracker interface {
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
index 5f43689b..c634939a 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go
@@ -22,10 +22,11 @@ type packet struct {
 	includedInBytesInFlight bool
 	declaredLost            bool
 	skippedPacket           bool
+	isPathProbePacket       bool
 }
 
 func (p *packet) outstanding() bool {
-	return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket
+	return !p.declaredLost && !p.skippedPacket && !p.IsPathMTUProbePacket && !p.isPathProbePacket
 }
 
 var packetPool = sync.Pool{New: func() any { return &packet{} }}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
index 5276fe19..7c3cf892 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
@@ -27,6 +27,9 @@ const (
 	maxPTODuration = 60 * time.Second
 )
 
+// Path probe packets are declared lost after this time.
+const pathProbePacketLossTimeout = time.Second
+
 type packetNumberSpace struct {
 	history sentPacketHistory
 	pns     packetNumberGenerator
@@ -174,10 +177,9 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel, now t
 		if pnSpace == nil {
 			return
 		}
-		pnSpace.history.Iterate(func(p *packet) (bool, error) {
+		for p := range pnSpace.history.Packets() {
 			h.removeFromBytesInFlight(p)
-			return true, nil
-		})
+		}
 	}
 	// drop the packet history
 	//nolint:exhaustive // Not every packet number space can be dropped.
@@ -194,14 +196,13 @@ func (h *sentPacketHandler) DropPackets(encLevel protocol.EncryptionLevel, now t
 		// and not when the client drops 0-RTT keys when the handshake completes.
 		// When 0-RTT is rejected, all application data sent so far becomes invalid.
 		// Delete the packets from the history and remove them from bytes_in_flight.
-		h.appDataPackets.history.Iterate(func(p *packet) (bool, error) {
+		for p := range h.appDataPackets.history.Packets() {
 			if p.EncryptionLevel != protocol.Encryption0RTT && !p.skippedPacket {
-				return false, nil
+				break
 			}
 			h.removeFromBytesInFlight(p)
 			h.appDataPackets.history.Remove(p.PacketNumber)
-			return true, nil
-		})
+		}
 	default:
 		panic(fmt.Sprintf("Cannot drop keys for encryption level %s", encLevel))
 	}
@@ -249,11 +250,12 @@ func (h *sentPacketHandler) SentPacket(
 	ecn protocol.ECN,
 	size protocol.ByteCount,
 	isPathMTUProbePacket bool,
+	isPathProbePacket bool,
 ) {
 	h.bytesSent += size
 
 	pnSpace := h.getPacketNumberSpace(encLevel)
-	if h.logger.Debug() && pnSpace.history.HasOutstandingPackets() {
+	if h.logger.Debug() && (pnSpace.history.HasOutstandingPackets() || pnSpace.history.HasOutstandingPathProbes()) {
 		for p := max(0, pnSpace.largestSent+1); p < pn; p++ {
 			h.logger.Debugf("Skipping packet number %d", p)
 		}
@@ -262,6 +264,18 @@ func (h *sentPacketHandler) SentPacket(
 	pnSpace.largestSent = pn
 	isAckEliciting := len(streamFrames) > 0 || len(frames) > 0
 
+	if isPathProbePacket {
+		p := getPacket()
+		p.SendTime = t
+		p.PacketNumber = pn
+		p.EncryptionLevel = encLevel
+		p.Length = size
+		p.Frames = frames
+		p.isPathProbePacket = true
+		pnSpace.history.SentPathProbePacket(p)
+		h.setLossDetectionTimer(t)
+		return
+	}
 	if isAckEliciting {
 		pnSpace.lastAckElicitingPacketTime = t
 		h.bytesInFlight += size
@@ -341,7 +355,7 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
 	}
 	// update the RTT, if the largest acked is newly acknowledged
 	if len(ackedPackets) > 0 {
-		if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() {
+		if p := ackedPackets[len(ackedPackets)-1]; p.PacketNumber == ack.LargestAcked() && !p.isPathProbePacket {
 			// don't use the ack delay for Initial and Handshake packets
 			var ackDelay time.Duration
 			if encLevel == protocol.Encryption1RTT {
@@ -365,8 +379,9 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
 
 	pnSpace.largestAcked = max(pnSpace.largestAcked, largestAcked)
 
-	if err := h.detectLostPackets(rcvTime, encLevel); err != nil {
-		return false, err
+	h.detectLostPackets(rcvTime, encLevel)
+	if encLevel == protocol.Encryption1RTT {
+		h.detectLostPathProbes(rcvTime)
 	}
 	var acked1RTTPacket bool
 	for _, p := range ackedPackets {
@@ -377,7 +392,9 @@ func (h *sentPacketHandler) ReceivedAck(ack *wire.AckFrame, encLevel protocol.En
 			acked1RTTPacket = true
 		}
 		h.removeFromBytesInFlight(p)
-		putPacket(p)
+		if !p.isPathProbePacket {
+			putPacket(p)
+		}
 	}
 	// After this point, we must not use ackedPackets any longer!
 	// We've already returned the buffers.
@@ -411,14 +428,13 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
 	ackRangeIndex := 0
 	lowestAcked := ack.LowestAcked()
 	largestAcked := ack.LargestAcked()
-	err := pnSpace.history.Iterate(func(p *packet) (bool, error) {
-		// Ignore packets below the lowest acked
+	for p := range pnSpace.history.Packets() {
+		// ignore packets below the lowest acked
 		if p.PacketNumber < lowestAcked {
-			return true, nil
+			continue
 		}
-		// Break after largest acked is reached
 		if p.PacketNumber > largestAcked {
-			return false, nil
+			break
 		}
 
 		if ack.HasMissingRanges() {
@@ -430,21 +446,28 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
 			}
 
 			if p.PacketNumber < ackRange.Smallest { // packet not contained in ACK range
-				return true, nil
+				continue
 			}
 			if p.PacketNumber > ackRange.Largest {
-				return false, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", p.PacketNumber, ackRange.Smallest, ackRange.Largest)
+				return nil, fmt.Errorf("BUG: ackhandler would have acked wrong packet %d, while evaluating range %d -> %d", p.PacketNumber, ackRange.Smallest, ackRange.Largest)
 			}
 		}
 		if p.skippedPacket {
-			return false, &qerr.TransportError{
+			return nil, &qerr.TransportError{
 				ErrorCode:    qerr.ProtocolViolation,
 				ErrorMessage: fmt.Sprintf("received an ACK for skipped packet number: %d (%s)", p.PacketNumber, encLevel),
 			}
 		}
+		if p.isPathProbePacket {
+			probePacket := pnSpace.history.RemovePathProbe(p.PacketNumber)
+			if probePacket == nil {
+				panic(fmt.Sprintf("path probe doesn't exist: %d", p.PacketNumber))
+			}
+			h.ackedPackets = append(h.ackedPackets, probePacket)
+			continue
+		}
 		h.ackedPackets = append(h.ackedPackets, p)
-		return true, nil
-	})
+	}
 	if h.logger.Debug() && len(h.ackedPackets) > 0 {
 		pns := make([]protocol.PacketNumber, len(h.ackedPackets))
 		for i, p := range h.ackedPackets {
@@ -475,8 +498,7 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
 			h.tracer.AcknowledgedPacket(encLevel, p.PacketNumber)
 		}
 	}
-
-	return h.ackedPackets, err
+	return h.ackedPackets, nil
 }
 
 func (h *sentPacketHandler) getLossTimeAndSpace() (time.Time, protocol.EncryptionLevel) {
@@ -507,7 +529,7 @@ func (h *sentPacketHandler) getScaledPTO(includeMaxAckDelay bool) time.Duration
 }
 
 // same logic as getLossTimeAndSpace, but for lastAckElicitingPacketTime instead of lossTime
-func (h *sentPacketHandler) getPTOTimeAndSpace(now time.Time) (pto time.Time, encLevel protocol.EncryptionLevel, ok bool) {
+func (h *sentPacketHandler) getPTOTimeAndSpace(now time.Time) (pto time.Time, encLevel protocol.EncryptionLevel) {
 	// We only send application data probe packets once the handshake is confirmed,
 	// because before that, we don't have the keys to decrypt ACKs sent in 1-RTT packets.
 	if !h.handshakeConfirmed && !h.hasOutstandingCryptoPackets() {
@@ -516,32 +538,35 @@ func (h *sentPacketHandler) getPTOTimeAndSpace(now time.Time) (pto time.Time, en
 		}
 		t := now.Add(h.getScaledPTO(false))
 		if h.initialPackets != nil {
-			return t, protocol.EncryptionInitial, true
+			return t, protocol.EncryptionInitial
 		}
-		return t, protocol.EncryptionHandshake, true
+		return t, protocol.EncryptionHandshake
 	}
 
-	if h.initialPackets != nil {
+	if h.initialPackets != nil && h.initialPackets.history.HasOutstandingPackets() &&
+		!h.initialPackets.lastAckElicitingPacketTime.IsZero() {
 		encLevel = protocol.EncryptionInitial
 		if t := h.initialPackets.lastAckElicitingPacketTime; !t.IsZero() {
 			pto = t.Add(h.getScaledPTO(false))
 		}
 	}
-	if h.handshakePackets != nil && !h.handshakePackets.lastAckElicitingPacketTime.IsZero() {
+	if h.handshakePackets != nil && h.handshakePackets.history.HasOutstandingPackets() &&
+		!h.handshakePackets.lastAckElicitingPacketTime.IsZero() {
 		t := h.handshakePackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(false))
 		if pto.IsZero() || (!t.IsZero() && t.Before(pto)) {
 			pto = t
 			encLevel = protocol.EncryptionHandshake
 		}
 	}
-	if h.handshakeConfirmed && !h.appDataPackets.lastAckElicitingPacketTime.IsZero() {
+	if h.handshakeConfirmed && h.appDataPackets.history.HasOutstandingPackets() &&
+		!h.appDataPackets.lastAckElicitingPacketTime.IsZero() {
 		t := h.appDataPackets.lastAckElicitingPacketTime.Add(h.getScaledPTO(true))
 		if pto.IsZero() || (!t.IsZero() && t.Before(pto)) {
 			pto = t
 			encLevel = protocol.Encryption1RTT
 		}
 	}
-	return pto, encLevel, true
+	return pto, encLevel
 }
 
 func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool {
@@ -573,8 +598,8 @@ func (h *sentPacketHandler) setLossDetectionTimer(now time.Time) {
 
 func (h *sentPacketHandler) lossDetectionTime(now time.Time) alarmTimer {
 	// cancel the alarm if no packets are outstanding
-	if h.peerCompletedAddressValidation &&
-		!h.hasOutstandingCryptoPackets() && !h.appDataPackets.history.HasOutstandingPackets() {
+	if h.peerCompletedAddressValidation && !h.hasOutstandingCryptoPackets() &&
+		!h.appDataPackets.history.HasOutstandingPackets() && !h.appDataPackets.history.HasOutstandingPathProbes() {
 		return alarmTimer{}
 	}
 
@@ -583,28 +608,62 @@ func (h *sentPacketHandler) lossDetectionTime(now time.Time) alarmTimer {
 		return alarmTimer{}
 	}
 
+	var pathProbeLossTime time.Time
+	if h.appDataPackets.history.HasOutstandingPathProbes() {
+		if p := h.appDataPackets.history.FirstOutstandingPathProbe(); p != nil {
+			pathProbeLossTime = p.SendTime.Add(pathProbePacketLossTimeout)
+		}
+	}
+
 	// early retransmit timer or time loss detection
 	lossTime, encLevel := h.getLossTimeAndSpace()
-	if !lossTime.IsZero() {
+	if !lossTime.IsZero() && (pathProbeLossTime.IsZero() || lossTime.Before(pathProbeLossTime)) {
 		return alarmTimer{
 			Time:            lossTime,
 			TimerType:       logging.TimerTypeACK,
 			EncryptionLevel: encLevel,
 		}
 	}
-
-	ptoTime, encLevel, ok := h.getPTOTimeAndSpace(now)
-	if !ok {
-		return alarmTimer{}
+	ptoTime, encLevel := h.getPTOTimeAndSpace(now)
+	if !ptoTime.IsZero() && (pathProbeLossTime.IsZero() || ptoTime.Before(pathProbeLossTime)) {
+		return alarmTimer{
+			Time:            ptoTime,
+			TimerType:       logging.TimerTypePTO,
+			EncryptionLevel: encLevel,
+		}
 	}
-	return alarmTimer{
-		Time:            ptoTime,
-		TimerType:       logging.TimerTypePTO,
-		EncryptionLevel: encLevel,
+	if !pathProbeLossTime.IsZero() {
+		return alarmTimer{
+			Time:            pathProbeLossTime,
+			TimerType:       logging.TimerTypePathProbe,
+			EncryptionLevel: encLevel,
+		}
+	}
+	return alarmTimer{}
+}
+
+func (h *sentPacketHandler) detectLostPathProbes(now time.Time) {
+	if !h.appDataPackets.history.HasOutstandingPathProbes() {
+		return
+	}
+	lossTime := now.Add(-pathProbePacketLossTimeout)
+	// RemovePathProbe cannot be called while iterating.
+	var lostPathProbes []*packet
+	for p := range h.appDataPackets.history.PathProbes() {
+		if !p.SendTime.After(lossTime) {
+			lostPathProbes = append(lostPathProbes, p)
+		}
+	}
+	for _, p := range lostPathProbes {
+		for _, f := range p.Frames {
+			f.Handler.OnLost(f.Frame)
+		}
+		h.appDataPackets.history.Remove(p.PacketNumber)
+		h.appDataPackets.history.RemovePathProbe(p.PacketNumber)
 	}
 }
 
-func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.EncryptionLevel) error {
+func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.EncryptionLevel) {
 	pnSpace := h.getPacketNumberSpace(encLevel)
 	pnSpace.lossTime = time.Time{}
 
@@ -618,15 +677,16 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
 	lostSendTime := now.Add(-lossDelay)
 
 	priorInFlight := h.bytesInFlight
-	return pnSpace.history.Iterate(func(p *packet) (bool, error) {
+	for p := range pnSpace.history.Packets() {
 		if p.PacketNumber > pnSpace.largestAcked {
-			return false, nil
+			break
 		}
 
+		isRegularPacket := !p.skippedPacket && !p.isPathProbePacket
 		var packetLost bool
 		if !p.SendTime.After(lostSendTime) {
 			packetLost = true
-			if !p.skippedPacket {
+			if isRegularPacket {
 				if h.logger.Debug() {
 					h.logger.Debugf("\tlost packet %d (time threshold)", p.PacketNumber)
 				}
@@ -636,7 +696,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
 			}
 		} else if pnSpace.largestAcked >= p.PacketNumber+packetThreshold {
 			packetLost = true
-			if !p.skippedPacket {
+			if isRegularPacket {
 				if h.logger.Debug() {
 					h.logger.Debugf("\tlost packet %d (reordering threshold)", p.PacketNumber)
 				}
@@ -654,7 +714,7 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
 		}
 		if packetLost {
 			pnSpace.history.DeclareLost(p.PacketNumber)
-			if !p.skippedPacket {
+			if isRegularPacket {
 				// the bytes in flight need to be reduced no matter if the frames in this packet will be retransmitted
 				h.removeFromBytesInFlight(p)
 				h.queueFramesForRetransmission(p)
@@ -666,12 +726,16 @@ func (h *sentPacketHandler) detectLostPackets(now time.Time, encLevel protocol.E
 				}
 			}
 		}
-		return true, nil
-	})
+	}
 }
 
 func (h *sentPacketHandler) OnLossDetectionTimeout(now time.Time) error {
 	defer h.setLossDetectionTimer(now)
+
+	if h.handshakeConfirmed {
+		h.detectLostPathProbes(now)
+	}
+
 	earliestLossTime, encLevel := h.getLossTimeAndSpace()
 	if !earliestLossTime.IsZero() {
 		if h.logger.Debug() {
@@ -681,7 +745,8 @@ func (h *sentPacketHandler) OnLossDetectionTimeout(now time.Time) error {
 			h.tracer.LossTimerExpired(logging.TimerTypeACK, encLevel)
 		}
 		// Early retransmit or time loss detection
-		return h.detectLostPackets(now, encLevel)
+		h.detectLostPackets(now, encLevel)
+		return nil
 	}
 
 	// PTO
@@ -702,11 +767,12 @@ func (h *sentPacketHandler) OnLossDetectionTimeout(now time.Time) error {
 		return nil
 	}
 
-	_, encLevel, ok := h.getPTOTimeAndSpace(now)
-	if !ok {
+	ptoTime, encLevel := h.getPTOTimeAndSpace(now)
+	if ptoTime.IsZero() {
 		return nil
 	}
-	if ps := h.getPacketNumberSpace(encLevel); !ps.history.HasOutstandingPackets() && !h.peerCompletedAddressValidation {
+	ps := h.getPacketNumberSpace(encLevel)
+	if !ps.history.HasOutstandingPackets() && !ps.history.HasOutstandingPathProbes() && !h.peerCompletedAddressValidation {
 		return nil
 	}
 	h.ptoCount++
@@ -868,24 +934,21 @@ func (h *sentPacketHandler) queueFramesForRetransmission(p *packet) {
 func (h *sentPacketHandler) ResetForRetry(now time.Time) {
 	h.bytesInFlight = 0
 	var firstPacketSendTime time.Time
-	h.initialPackets.history.Iterate(func(p *packet) (bool, error) {
+	for p := range h.initialPackets.history.Packets() {
 		if firstPacketSendTime.IsZero() {
 			firstPacketSendTime = p.SendTime
 		}
-		if p.declaredLost || p.skippedPacket {
-			return true, nil
-		}
-		h.queueFramesForRetransmission(p)
-		return true, nil
-	})
-	// All application data packets sent at this point are 0-RTT packets.
-	// In the case of a Retry, we can assume that the server dropped all of them.
-	h.appDataPackets.history.Iterate(func(p *packet) (bool, error) {
 		if !p.declaredLost && !p.skippedPacket {
 			h.queueFramesForRetransmission(p)
 		}
-		return true, nil
-	})
+	}
+	// All application data packets sent at this point are 0-RTT packets.
+	// In the case of a Retry, we can assume that the server dropped all of them.
+	for p := range h.appDataPackets.history.Packets() {
+		if !p.declaredLost && !p.skippedPacket {
+			h.queueFramesForRetransmission(p)
+		}
+	}
 
 	// Only use the Retry to estimate the RTT if we didn't send any retransmission for the Initial.
 	// Otherwise, we don't know which Initial the Retry was sent in response to.
@@ -913,3 +976,25 @@ func (h *sentPacketHandler) ResetForRetry(now time.Time) {
 	}
 	h.ptoCount = 0
 }
+
+func (h *sentPacketHandler) MigratedPath(now time.Time, initialMaxDatagramSize protocol.ByteCount) {
+	h.rttStats.ResetForPathMigration()
+	for p := range h.appDataPackets.history.Packets() {
+		h.appDataPackets.history.DeclareLost(p.PacketNumber)
+		if !p.skippedPacket && !p.isPathProbePacket {
+			h.removeFromBytesInFlight(p)
+			h.queueFramesForRetransmission(p)
+		}
+	}
+	for p := range h.appDataPackets.history.PathProbes() {
+		h.appDataPackets.history.RemovePathProbe(p.PacketNumber)
+	}
+	h.congestion = congestion.NewCubicSender(
+		congestion.DefaultClock{},
+		h.rttStats,
+		initialMaxDatagramSize,
+		true, // use Reno
+		h.tracer,
+	)
+	h.setLossDetectionTimer(now)
+}
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
index 9968df6a..0aabc6d9 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go
@@ -2,12 +2,14 @@ package ackhandler
 
 import (
 	"fmt"
+	"iter"
 
 	"github.com/quic-go/quic-go/internal/protocol"
 )
 
 type sentPacketHistory struct {
-	packets []*packet
+	packets          []*packet
+	pathProbePackets []*packet
 
 	numOutstanding int
 
@@ -32,11 +34,11 @@ func (h *sentPacketHistory) checkSequentialPacketNumberUse(pn protocol.PacketNum
 			panic("non-sequential packet number use")
 		}
 	}
+	h.highestPacketNumber = pn
 }
 
 func (h *sentPacketHistory) SkippedPacket(pn protocol.PacketNumber) {
 	h.checkSequentialPacketNumberUse(pn)
-	h.highestPacketNumber = pn
 	h.packets = append(h.packets, &packet{
 		PacketNumber:  pn,
 		skippedPacket: true,
@@ -45,7 +47,6 @@ func (h *sentPacketHistory) SkippedPacket(pn protocol.PacketNumber) {
 
 func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber) {
 	h.checkSequentialPacketNumberUse(pn)
-	h.highestPacketNumber = pn
 	if len(h.packets) > 0 {
 		h.packets = append(h.packets, nil)
 	}
@@ -53,28 +54,42 @@ func (h *sentPacketHistory) SentNonAckElicitingPacket(pn protocol.PacketNumber)
 
 func (h *sentPacketHistory) SentAckElicitingPacket(p *packet) {
 	h.checkSequentialPacketNumberUse(p.PacketNumber)
-	h.highestPacketNumber = p.PacketNumber
 	h.packets = append(h.packets, p)
 	if p.outstanding() {
 		h.numOutstanding++
 	}
 }
 
-// Iterate iterates through all packets.
-func (h *sentPacketHistory) Iterate(cb func(*packet) (cont bool, err error)) error {
-	for _, p := range h.packets {
-		if p == nil {
-			continue
-		}
-		cont, err := cb(p)
-		if err != nil {
-			return err
-		}
-		if !cont {
-			return nil
+func (h *sentPacketHistory) SentPathProbePacket(p *packet) {
+	h.checkSequentialPacketNumberUse(p.PacketNumber)
+	h.packets = append(h.packets, &packet{
+		PacketNumber:      p.PacketNumber,
+		isPathProbePacket: true,
+	})
+	h.pathProbePackets = append(h.pathProbePackets, p)
+}
+
+func (h *sentPacketHistory) Packets() iter.Seq[*packet] {
+	return func(yield func(*packet) bool) {
+		for _, p := range h.packets {
+			if p == nil {
+				continue
+			}
+			if !yield(p) {
+				return
+			}
+		}
+	}
+}
+
+func (h *sentPacketHistory) PathProbes() iter.Seq[*packet] {
+	return func(yield func(*packet) bool) {
+		for _, p := range h.pathProbePackets {
+			if !yield(p) {
+				return
+			}
 		}
 	}
-	return nil
 }
 
 // FirstOutstanding returns the first outstanding packet.
@@ -90,6 +105,14 @@ func (h *sentPacketHistory) FirstOutstanding() *packet {
 	return nil
 }
 
+// FirstOutstandingPathProbe returns the first outstanding path probe packet
+func (h *sentPacketHistory) FirstOutstandingPathProbe() *packet {
+	if len(h.pathProbePackets) == 0 {
+		return nil
+	}
+	return h.pathProbePackets[0]
+}
+
 func (h *sentPacketHistory) Len() int {
 	return len(h.packets)
 }
@@ -125,6 +148,27 @@ func (h *sentPacketHistory) Remove(pn protocol.PacketNumber) error {
 	return nil
 }
 
+// RemovePathProbe removes a path probe packet.
+// It scales O(N), but that's ok, since we don't expect to send many path probe packets.
+// It is not valid to call this function in IteratePathProbes.
+func (h *sentPacketHistory) RemovePathProbe(pn protocol.PacketNumber) *packet {
+	var packetToDelete *packet
+	idx := -1
+	for i, p := range h.pathProbePackets {
+		if p.PacketNumber == pn {
+			packetToDelete = p
+			idx = i
+			break
+		}
+	}
+	if idx != -1 {
+		// don't use slices.Delete, because it zeros the deleted element
+		copy(h.pathProbePackets[idx:], h.pathProbePackets[idx+1:])
+		h.pathProbePackets = h.pathProbePackets[:len(h.pathProbePackets)-1]
+	}
+	return packetToDelete
+}
+
 // getIndex gets the index of packet p in the packets slice.
 func (h *sentPacketHistory) getIndex(p protocol.PacketNumber) (int, bool) {
 	if len(h.packets) == 0 {
@@ -145,6 +189,10 @@ func (h *sentPacketHistory) HasOutstandingPackets() bool {
 	return h.numOutstanding > 0
 }
 
+func (h *sentPacketHistory) HasOutstandingPathProbes() bool {
+	return len(h.pathProbePackets) > 0
+}
+
 // delete all nil entries at the beginning of the packets slice
 func (h *sentPacketHistory) cleanupStart() {
 	for i, p := range h.packets {
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
index c8e6cb33..1a86c675 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go
@@ -12,7 +12,6 @@ import (
 
 	"github.com/quic-go/quic-go/internal/protocol"
 	"github.com/quic-go/quic-go/internal/qerr"
-	"github.com/quic-go/quic-go/internal/qtls"
 	"github.com/quic-go/quic-go/internal/utils"
 	"github.com/quic-go/quic-go/internal/wire"
 	"github.com/quic-go/quic-go/logging"
@@ -89,12 +88,13 @@ func NewCryptoSetupClient(
 
 	tlsConf = tlsConf.Clone()
 	tlsConf.MinVersion = tls.VersionTLS13
-	quicConf := &tls.QUICConfig{TLSConfig: tlsConf}
-	qtls.SetupConfigForClient(quicConf, cs.marshalDataForSessionState, cs.handleDataFromSessionState)
 	cs.tlsConf = tlsConf
 	cs.allow0RTT = enable0RTT
 
-	cs.conn = tls.QUICClient(quicConf)
+	cs.conn = tls.QUICClient(&tls.QUICConfig{
+		TLSConfig:           tlsConf,
+		EnableSessionEvents: true,
+	})
 	cs.conn.SetTransportParameters(cs.ourParams.Marshal(protocol.PerspectiveClient))
 
 	return cs
@@ -123,9 +123,13 @@ func NewCryptoSetupServer(
 	)
 	cs.allow0RTT = allow0RTT
 
-	tlsConf = qtls.SetupConfigForServer(tlsConf, localAddr, remoteAddr, cs.getDataForSessionTicket, cs.handleSessionTicket)
+	tlsConf = setupConfigForServer(tlsConf, localAddr, remoteAddr)
+
 	cs.tlsConf = tlsConf
-	cs.conn = tls.QUICServer(&tls.QUICConfig{TLSConfig: tlsConf})
+	cs.conn = tls.QUICServer(&tls.QUICConfig{
+		TLSConfig:           tlsConf,
+		EnableSessionEvents: true,
+	})
 	return cs
 }
 
@@ -178,11 +182,10 @@ func (h *cryptoSetup) StartHandshake(ctx context.Context) error {
 	}
 	for {
 		ev := h.conn.NextEvent()
-		done, err := h.handleEvent(ev)
-		if err != nil {
+		if err := h.handleEvent(ev); err != nil {
 			return wrapError(err)
 		}
-		if done {
+		if ev.Kind == tls.QUICNoEvent {
 			break
 		}
 	}
@@ -213,53 +216,78 @@ func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLev
 }
 
 func (h *cryptoSetup) handleMessage(data []byte, encLevel protocol.EncryptionLevel) error {
-	if err := h.conn.HandleData(qtls.ToTLSEncryptionLevel(encLevel), data); err != nil {
+	if err := h.conn.HandleData(encLevel.ToTLSEncryptionLevel(), data); err != nil {
 		return err
 	}
 	for {
 		ev := h.conn.NextEvent()
-		done, err := h.handleEvent(ev)
-		if err != nil {
+		if err := h.handleEvent(ev); err != nil {
 			return err
 		}
-		if done {
+		if ev.Kind == tls.QUICNoEvent {
 			return nil
 		}
 	}
 }
 
-func (h *cryptoSetup) handleEvent(ev tls.QUICEvent) (done bool, err error) {
-	//nolint:exhaustive
-	// Go 1.23 added new 0-RTT events, see https://github.com/quic-go/quic-go/issues/4272.
-	// We will start using these events when dropping support for Go 1.22.
+func (h *cryptoSetup) handleEvent(ev tls.QUICEvent) (err error) {
 	switch ev.Kind {
 	case tls.QUICNoEvent:
-		return true, nil
+		return nil
 	case tls.QUICSetReadSecret:
 		h.setReadKey(ev.Level, ev.Suite, ev.Data)
-		return false, nil
+		return nil
 	case tls.QUICSetWriteSecret:
 		h.setWriteKey(ev.Level, ev.Suite, ev.Data)
-		return false, nil
+		return nil
 	case tls.QUICTransportParameters:
-		return false, h.handleTransportParameters(ev.Data)
+		return h.handleTransportParameters(ev.Data)
 	case tls.QUICTransportParametersRequired:
 		h.conn.SetTransportParameters(h.ourParams.Marshal(h.perspective))
-		return false, nil
+		return nil
 	case tls.QUICRejectedEarlyData:
 		h.rejected0RTT()
-		return false, nil
+		return nil
 	case tls.QUICWriteData:
 		h.writeRecord(ev.Level, ev.Data)
-		return false, nil
+		return nil
 	case tls.QUICHandshakeDone:
 		h.handshakeComplete()
-		return false, nil
+		return nil
+	case tls.QUICStoreSession:
+		if h.perspective == protocol.PerspectiveServer {
+			panic("cryptoSetup BUG: unexpected QUICStoreSession event for the server")
+		}
+		ev.SessionState.Extra = append(
+			ev.SessionState.Extra,
+			addSessionStateExtraPrefix(h.marshalDataForSessionState(ev.SessionState.EarlyData)),
+		)
+		return h.conn.StoreSession(ev.SessionState)
+	case tls.QUICResumeSession:
+		var allowEarlyData bool
+		switch h.perspective {
+		case protocol.PerspectiveClient:
+			// for clients, this event occurs when a session ticket is selected
+			allowEarlyData = h.handleDataFromSessionState(
+				findSessionStateExtraData(ev.SessionState.Extra),
+				ev.SessionState.EarlyData,
+			)
+		case protocol.PerspectiveServer:
+			// for servers, this event occurs when receiving the client's session ticket
+			allowEarlyData = h.handleSessionTicket(
+				findSessionStateExtraData(ev.SessionState.Extra),
+				ev.SessionState.EarlyData,
+			)
+		}
+		if ev.SessionState.EarlyData {
+			ev.SessionState.EarlyData = allowEarlyData
+		}
+		return nil
 	default:
 		// Unknown events should be ignored.
 		// crypto/tls will ensure that this is safe to do.
 		// See the discussion following https://github.com/golang/go/issues/68124#issuecomment-2187042510 for details.
-		return false, nil
+		return nil
 	}
 }
 
@@ -350,7 +378,10 @@ func (h *cryptoSetup) getDataForSessionTicket() []byte {
 // Due to limitations in crypto/tls, it's only possible to generate a single session ticket per connection.
 // It is only valid for the server.
 func (h *cryptoSetup) GetSessionTicket() ([]byte, error) {
-	if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{EarlyData: h.allow0RTT}); err != nil {
+	if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{
+		EarlyData: h.allow0RTT,
+		Extra:     [][]byte{addSessionStateExtraPrefix(h.getDataForSessionTicket())},
+	}); err != nil {
 		// Session tickets might be disabled by tls.Config.SessionTicketsDisabled.
 		// We can't check h.tlsConfig here, since the actual config might have been obtained from
 		// the GetConfigForClient callback.
@@ -376,9 +407,9 @@ func (h *cryptoSetup) GetSessionTicket() ([]byte, error) {
 // It reads parameters from the session ticket and checks whether to accept 0-RTT if the session ticket enabled 0-RTT.
 // Note that the fact that the session ticket allows 0-RTT doesn't mean that the actual TLS handshake enables 0-RTT:
 // A client may use a 0-RTT enabled session to resume a TLS session without using 0-RTT.
-func (h *cryptoSetup) handleSessionTicket(sessionTicketData []byte, using0RTT bool) bool {
+func (h *cryptoSetup) handleSessionTicket(data []byte, using0RTT bool) (allowEarlyData bool) {
 	var t sessionTicket
-	if err := t.Unmarshal(sessionTicketData, using0RTT); err != nil {
+	if err := t.Unmarshal(data, using0RTT); err != nil {
 		h.logger.Debugf("Unmarshalling session ticket failed: %s", err.Error())
 		return false
 	}
@@ -446,7 +477,7 @@ func (h *cryptoSetup) setReadKey(el tls.QUICEncryptionLevel, suiteID uint16, tra
 	}
 	h.events = append(h.events, Event{Kind: EventReceivedReadKeys})
 	if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil {
-		h.tracer.UpdatedKeyFromTLS(qtls.FromTLSEncryptionLevel(el), h.perspective.Opposite())
+		h.tracer.UpdatedKeyFromTLS(protocol.FromTLSEncryptionLevel(el), h.perspective.Opposite())
 	}
 }
 
@@ -497,7 +528,7 @@ func (h *cryptoSetup) setWriteKey(el tls.QUICEncryptionLevel, suiteID uint16, tr
 		panic("unexpected write encryption level")
 	}
 	if h.tracer != nil && h.tracer.UpdatedKeyFromTLS != nil {
-		h.tracer.UpdatedKeyFromTLS(qtls.FromTLSEncryptionLevel(el), h.perspective)
+		h.tracer.UpdatedKeyFromTLS(protocol.FromTLSEncryptionLevel(el), h.perspective)
 	}
 }
 
diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go b/vendor/github.com/quic-go/quic-go/internal/handshake/fake_conn.go
similarity index 97%
rename from vendor/github.com/quic-go/quic-go/internal/qtls/conn.go
rename to vendor/github.com/quic-go/quic-go/internal/handshake/fake_conn.go
index 6660ac66..54af823b 100644
--- a/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/fake_conn.go
@@ -1,4 +1,4 @@
-package qtls
+package handshake
 
 import (
 	"net"
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go
index 0caf1c8e..3da97cd8 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go
@@ -8,8 +8,6 @@ import (
 )
 
 // hkdfExpandLabel HKDF expands a label as defined in RFC 8446, section 7.1.
-// Since this implementation avoids using a cryptobyte.Builder, it is about 15% faster than the
-// hkdfExpandLabel in the standard library.
 func hkdfExpandLabel(hash crypto.Hash, secret, context []byte, label string, length int) []byte {
 	b := make([]byte, 3, 3+6+len(label)+1+len(context))
 	binary.BigEndian.PutUint16(b, uint16(length))
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go
index b67f0101..4da517fc 100644
--- a/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go
@@ -1,6 +1,7 @@
 package handshake
 
 import (
+	"bytes"
 	"errors"
 	"fmt"
 	"time"
@@ -52,3 +53,20 @@ func (t *sessionTicket) Unmarshal(b []byte, using0RTT bool) error {
 	t.RTT = time.Duration(rtt) * time.Microsecond
 	return nil
 }
+
+const extraPrefix = "quic-go1"
+
+func addSessionStateExtraPrefix(b []byte) []byte {
+	return append([]byte(extraPrefix), b...)
+}
+
+func findSessionStateExtraData(extras [][]byte) []byte {
+	prefix := []byte(extraPrefix)
+	for _, extra := range extras {
+		if len(extra) < len(prefix) || !bytes.Equal(prefix, extra[:len(prefix)]) {
+			continue
+		}
+		return extra[len(prefix):]
+	}
+	return nil
+}
diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go
new file mode 100644
index 00000000..c4c0d22d
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_config.go
@@ -0,0 +1,39 @@
+package handshake
+
+import (
+	"crypto/tls"
+	"net"
+)
+
+func setupConfigForServer(conf *tls.Config, localAddr, remoteAddr net.Addr) *tls.Config {
+	// Workaround for https://github.com/golang/go/issues/60506.
+	// This initializes the session tickets _before_ cloning the config.
+	_, _ = conf.DecryptTicket(nil, tls.ConnectionState{})
+
+	conf = conf.Clone()
+	conf.MinVersion = tls.VersionTLS13
+
+	// The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo.
+	// Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn
+	// that allows the caller to get the local and the remote address.
+	if conf.GetConfigForClient != nil {
+		gcfc := conf.GetConfigForClient
+		conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
+			info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr}
+			c, err := gcfc(info)
+			if c != nil {
+				// we're returning a tls.Config here, so we need to apply this recursively
+				c = setupConfigForServer(c, localAddr, remoteAddr)
+			}
+			return c, err
+		}
+	}
+	if conf.GetCertificate != nil {
+		gc := conf.GetCertificate
+		conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
+			info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr}
+			return gc(info)
+		}
+	}
+	return conf
+}
diff --git a/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go
index 32d38ab1..40aa331a 100644
--- a/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go
+++ b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go
@@ -1,5 +1,10 @@
 package protocol
 
+import (
+	"crypto/tls"
+	"fmt"
+)
+
 // EncryptionLevel is the encryption level
 // Default value is Unencrypted
 type EncryptionLevel uint8
@@ -28,3 +33,33 @@ func (e EncryptionLevel) String() string {
 	}
 	return "unknown"
 }
+
+func (e EncryptionLevel) ToTLSEncryptionLevel() tls.QUICEncryptionLevel {
+	switch e {
+	case EncryptionInitial:
+		return tls.QUICEncryptionLevelInitial
+	case EncryptionHandshake:
+		return tls.QUICEncryptionLevelHandshake
+	case Encryption1RTT:
+		return tls.QUICEncryptionLevelApplication
+	case Encryption0RTT:
+		return tls.QUICEncryptionLevelEarly
+	default:
+		panic(fmt.Sprintf("unexpected encryption level: %s", e))
+	}
+}
+
+func FromTLSEncryptionLevel(e tls.QUICEncryptionLevel) EncryptionLevel {
+	switch e {
+	case tls.QUICEncryptionLevelInitial:
+		return EncryptionInitial
+	case tls.QUICEncryptionLevelHandshake:
+		return EncryptionHandshake
+	case tls.QUICEncryptionLevelApplication:
+		return Encryption1RTT
+	case tls.QUICEncryptionLevelEarly:
+		return Encryption0RTT
+	default:
+		panic(fmt.Sprintf("unexpect encryption level: %s", e))
+	}
+}
diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go b/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go
deleted file mode 100644
index 32a921cd..00000000
--- a/vendor/github.com/quic-go/quic-go/internal/qtls/cipher_suite.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package qtls
-
-import (
-	"crypto/tls"
-	"fmt"
-	"unsafe"
-)
-
-//go:linkname cipherSuitesTLS13 crypto/tls.cipherSuitesTLS13
-var cipherSuitesTLS13 []unsafe.Pointer
-
-//go:linkname defaultCipherSuitesTLS13 crypto/tls.defaultCipherSuitesTLS13
-var defaultCipherSuitesTLS13 []uint16
-
-//go:linkname defaultCipherSuitesTLS13NoAES crypto/tls.defaultCipherSuitesTLS13NoAES
-var defaultCipherSuitesTLS13NoAES []uint16
-
-var cipherSuitesModified bool
-
-// SetCipherSuite modifies the cipherSuiteTLS13 slice of cipher suites inside qtls
-// such that it only contains the cipher suite with the chosen id.
-// The reset function returned resets them back to the original value.
-func SetCipherSuite(id uint16) (reset func()) {
-	if cipherSuitesModified {
-		panic("cipher suites modified multiple times without resetting")
-	}
-	cipherSuitesModified = true
-
-	origCipherSuitesTLS13 := append([]unsafe.Pointer{}, cipherSuitesTLS13...)
-	origDefaultCipherSuitesTLS13 := append([]uint16{}, defaultCipherSuitesTLS13...)
-	origDefaultCipherSuitesTLS13NoAES := append([]uint16{}, defaultCipherSuitesTLS13NoAES...)
-	// The order is given by the order of the slice elements in cipherSuitesTLS13 in qtls.
-	switch id {
-	case tls.TLS_AES_128_GCM_SHA256:
-		cipherSuitesTLS13 = cipherSuitesTLS13[:1]
-	case tls.TLS_CHACHA20_POLY1305_SHA256:
-		cipherSuitesTLS13 = cipherSuitesTLS13[1:2]
-	case tls.TLS_AES_256_GCM_SHA384:
-		cipherSuitesTLS13 = cipherSuitesTLS13[2:]
-	default:
-		panic(fmt.Sprintf("unexpected cipher suite: %d", id))
-	}
-	defaultCipherSuitesTLS13 = []uint16{id}
-	defaultCipherSuitesTLS13NoAES = []uint16{id}
-
-	return func() {
-		cipherSuitesTLS13 = origCipherSuitesTLS13
-		defaultCipherSuitesTLS13 = origDefaultCipherSuitesTLS13
-		defaultCipherSuitesTLS13NoAES = origDefaultCipherSuitesTLS13NoAES
-		cipherSuitesModified = false
-	}
-}
diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go b/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go
deleted file mode 100644
index 4acac9e2..00000000
--- a/vendor/github.com/quic-go/quic-go/internal/qtls/client_session_cache.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package qtls
-
-import (
-	"crypto/tls"
-	"sync"
-)
-
-type clientSessionCache struct {
-	mx      sync.Mutex
-	getData func(earlyData bool) []byte
-	setData func(data []byte, earlyData bool) (allowEarlyData bool)
-	wrapped tls.ClientSessionCache
-}
-
-var _ tls.ClientSessionCache = &clientSessionCache{}
-
-func (c *clientSessionCache) Put(key string, cs *tls.ClientSessionState) {
-	c.mx.Lock()
-	defer c.mx.Unlock()
-
-	if cs == nil {
-		c.wrapped.Put(key, nil)
-		return
-	}
-	ticket, state, err := cs.ResumptionState()
-	if err != nil || state == nil {
-		c.wrapped.Put(key, cs)
-		return
-	}
-	state.Extra = append(state.Extra, addExtraPrefix(c.getData(state.EarlyData)))
-	newCS, err := tls.NewResumptionState(ticket, state)
-	if err != nil {
-		// It's not clear why this would error. Just save the original state.
-		c.wrapped.Put(key, cs)
-		return
-	}
-	c.wrapped.Put(key, newCS)
-}
-
-func (c *clientSessionCache) Get(key string) (*tls.ClientSessionState, bool) {
-	c.mx.Lock()
-	defer c.mx.Unlock()
-
-	cs, ok := c.wrapped.Get(key)
-	if !ok || cs == nil {
-		return cs, ok
-	}
-	ticket, state, err := cs.ResumptionState()
-	if err != nil {
-		// It's not clear why this would error.
-		// Remove the ticket from the session cache, so we don't run into this error over and over again
-		c.wrapped.Put(key, nil)
-		return nil, false
-	}
-	// restore QUIC transport parameters and RTT stored in state.Extra
-	if extra := findExtraData(state.Extra); extra != nil {
-		earlyData := c.setData(extra, state.EarlyData)
-		if state.EarlyData {
-			state.EarlyData = earlyData
-		}
-	}
-	session, err := tls.NewResumptionState(ticket, state)
-	if err != nil {
-		// It's not clear why this would error.
-		// Remove the ticket from the session cache, so we don't run into this error over and over again
-		c.wrapped.Put(key, nil)
-		return nil, false
-	}
-	return session, true
-}
diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go b/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go
deleted file mode 100644
index cdfe82a2..00000000
--- a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package qtls
-
-import (
-	"bytes"
-	"crypto/tls"
-	"fmt"
-	"net"
-
-	"github.com/quic-go/quic-go/internal/protocol"
-)
-
-func SetupConfigForServer(
-	conf *tls.Config,
-	localAddr, remoteAddr net.Addr,
-	getData func() []byte,
-	handleSessionTicket func([]byte, bool) bool,
-) *tls.Config {
-	// Workaround for https://github.com/golang/go/issues/60506.
-	// This initializes the session tickets _before_ cloning the config.
-	_, _ = conf.DecryptTicket(nil, tls.ConnectionState{})
-
-	conf = conf.Clone()
-	conf.MinVersion = tls.VersionTLS13
-
-	// add callbacks to save transport parameters into the session ticket
-	origWrapSession := conf.WrapSession
-	conf.WrapSession = func(cs tls.ConnectionState, state *tls.SessionState) ([]byte, error) {
-		// Add QUIC session ticket
-		state.Extra = append(state.Extra, addExtraPrefix(getData()))
-
-		if origWrapSession != nil {
-			return origWrapSession(cs, state)
-		}
-		b, err := conf.EncryptTicket(cs, state)
-		return b, err
-	}
-	origUnwrapSession := conf.UnwrapSession
-	// UnwrapSession might be called multiple times, as the client can use multiple session tickets.
-	// However, using 0-RTT is only possible with the first session ticket.
-	// crypto/tls guarantees that this callback is called in the same order as the session ticket in the ClientHello.
-	var unwrapCount int
-	conf.UnwrapSession = func(identity []byte, connState tls.ConnectionState) (*tls.SessionState, error) {
-		unwrapCount++
-		var state *tls.SessionState
-		var err error
-		if origUnwrapSession != nil {
-			state, err = origUnwrapSession(identity, connState)
-		} else {
-			state, err = conf.DecryptTicket(identity, connState)
-		}
-		if err != nil || state == nil {
-			return nil, err
-		}
-
-		extra := findExtraData(state.Extra)
-		if extra != nil {
-			state.EarlyData = handleSessionTicket(extra, state.EarlyData && unwrapCount == 1)
-		} else {
-			state.EarlyData = false
-		}
-
-		return state, nil
-	}
-	// The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo.
-	// Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn
-	// that allows the caller to get the local and the remote address.
-	if conf.GetConfigForClient != nil {
-		gcfc := conf.GetConfigForClient
-		conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
-			info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr}
-			c, err := gcfc(info)
-			if c != nil {
-				// We're returning a tls.Config here, so we need to apply this recursively.
-				c = SetupConfigForServer(c, localAddr, remoteAddr, getData, handleSessionTicket)
-			}
-			return c, err
-		}
-	}
-	if conf.GetCertificate != nil {
-		gc := conf.GetCertificate
-		conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) {
-			info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr}
-			return gc(info)
-		}
-	}
-	return conf
-}
-
-func SetupConfigForClient(
-	qconf *tls.QUICConfig,
-	getData func(earlyData bool) []byte,
-	setData func(data []byte, earlyData bool) (allowEarlyData bool),
-) {
-	conf := qconf.TLSConfig
-	if conf.ClientSessionCache != nil {
-		origCache := conf.ClientSessionCache
-		conf.ClientSessionCache = &clientSessionCache{
-			wrapped: origCache,
-			getData: getData,
-			setData: setData,
-		}
-	}
-}
-
-func ToTLSEncryptionLevel(e protocol.EncryptionLevel) tls.QUICEncryptionLevel {
-	switch e {
-	case protocol.EncryptionInitial:
-		return tls.QUICEncryptionLevelInitial
-	case protocol.EncryptionHandshake:
-		return tls.QUICEncryptionLevelHandshake
-	case protocol.Encryption1RTT:
-		return tls.QUICEncryptionLevelApplication
-	case protocol.Encryption0RTT:
-		return tls.QUICEncryptionLevelEarly
-	default:
-		panic(fmt.Sprintf("unexpected encryption level: %s", e))
-	}
-}
-
-func FromTLSEncryptionLevel(e tls.QUICEncryptionLevel) protocol.EncryptionLevel {
-	switch e {
-	case tls.QUICEncryptionLevelInitial:
-		return protocol.EncryptionInitial
-	case tls.QUICEncryptionLevelHandshake:
-		return protocol.EncryptionHandshake
-	case tls.QUICEncryptionLevelApplication:
-		return protocol.Encryption1RTT
-	case tls.QUICEncryptionLevelEarly:
-		return protocol.Encryption0RTT
-	default:
-		panic(fmt.Sprintf("unexpect encryption level: %s", e))
-	}
-}
-
-const extraPrefix = "quic-go1"
-
-func addExtraPrefix(b []byte) []byte {
-	return append([]byte(extraPrefix), b...)
-}
-
-func findExtraData(extras [][]byte) []byte {
-	prefix := []byte(extraPrefix)
-	for _, extra := range extras {
-		if len(extra) < len(prefix) || !bytes.Equal(prefix, extra[:len(prefix)]) {
-			continue
-		}
-		return extra[len(prefix):]
-	}
-	return nil
-}
diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
index 92fec2e2..0efd8354 100644
--- a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
+++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go
@@ -108,3 +108,12 @@ func (r *RTTStats) SetInitialRTT(t time.Duration) {
 	r.smoothedRTT = t
 	r.latestRTT = t
 }
+
+func (r *RTTStats) ResetForPathMigration() {
+	r.hasMeasurement = false
+	r.minRTT = 0
+	r.latestRTT = 0
+	r.smoothedRTT = 0
+	r.meanDeviation = 0
+	// max_ack_delay remains valid
+}
diff --git a/vendor/github.com/quic-go/quic-go/logging/types.go b/vendor/github.com/quic-go/quic-go/logging/types.go
index 0d79b0a9..65da3559 100644
--- a/vendor/github.com/quic-go/quic-go/logging/types.go
+++ b/vendor/github.com/quic-go/quic-go/logging/types.go
@@ -63,9 +63,11 @@ type TimerType uint8
 
 const (
 	// TimerTypeACK is the timer type for the early retransmit timer
-	TimerTypeACK TimerType = iota
+	TimerTypeACK TimerType = iota + 1
 	// TimerTypePTO is the timer type for the PTO retransmit timer
 	TimerTypePTO
+	// TimerTypePathProbe is the timer type for the path probe retransmit timer
+	TimerTypePathProbe
 )
 
 // TimeoutReason is the reason why a connection is closed
diff --git a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go
index ee636a6d..096eba14 100644
--- a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go
+++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go
@@ -17,6 +17,7 @@ type mtuDiscoverer interface {
 	ShouldSendProbe(now time.Time) bool
 	CurrentSize() protocol.ByteCount
 	GetPing(now time.Time) (ping ackhandler.Frame, datagramSize protocol.ByteCount)
+	Reset(now time.Time, start, max protocol.ByteCount)
 }
 
 const (
@@ -88,7 +89,6 @@ const (
 
 type mtuFinder struct {
 	lastProbeTime time.Time
-	mtuIncreased  func(protocol.ByteCount)
 
 	rttStats *utils.RTTStats
 
@@ -99,6 +99,11 @@ type mtuFinder struct {
 	lost             [maxLostMTUProbes]protocol.ByteCount
 	lastProbeWasLost bool
 
+	// The generation is used to ignore ACKs / losses for probe packets sent before a reset.
+	// Resets happen when the connection is migrated to a new path.
+	// We're therefore not concerned about overflows of this counter.
+	generation uint8
+
 	tracer *logging.ConnectionTracer
 }
 
@@ -107,16 +112,19 @@ var _ mtuDiscoverer = &mtuFinder{}
 func newMTUDiscoverer(
 	rttStats *utils.RTTStats,
 	start, max protocol.ByteCount,
-	mtuIncreased func(protocol.ByteCount),
 	tracer *logging.ConnectionTracer,
 ) *mtuFinder {
 	f := &mtuFinder{
-		inFlight:     protocol.InvalidByteCount,
-		min:          start,
-		rttStats:     rttStats,
-		mtuIncreased: mtuIncreased,
-		tracer:       tracer,
+		inFlight: protocol.InvalidByteCount,
+		rttStats: rttStats,
+		tracer:   tracer,
 	}
+	f.init(start, max)
+	return f
+}
+
+func (f *mtuFinder) init(start, max protocol.ByteCount) {
+	f.min = start
 	for i := range f.lost {
 		if i == 0 {
 			f.lost[i] = max
@@ -124,7 +132,6 @@ func newMTUDiscoverer(
 		}
 		f.lost[i] = protocol.InvalidByteCount
 	}
-	return f
 }
 
 func (f *mtuFinder) done() bool {
@@ -165,7 +172,7 @@ func (f *mtuFinder) GetPing(now time.Time) (ackhandler.Frame, protocol.ByteCount
 	f.inFlight = size
 	return ackhandler.Frame{
 		Frame:   &wire.PingFrame{},
-		Handler: &mtuFinderAckHandler{f},
+		Handler: &mtuFinderAckHandler{mtuFinder: f, generation: f.generation},
 	}, size
 }
 
@@ -173,13 +180,26 @@ func (f *mtuFinder) CurrentSize() protocol.ByteCount {
 	return f.min
 }
 
+func (f *mtuFinder) Reset(now time.Time, start, max protocol.ByteCount) {
+	f.generation++
+	f.lastProbeTime = now
+	f.lastProbeWasLost = false
+	f.inFlight = protocol.InvalidByteCount
+	f.init(start, max)
+}
+
 type mtuFinderAckHandler struct {
 	*mtuFinder
+	generation uint8
 }
 
 var _ ackhandler.FrameHandler = &mtuFinderAckHandler{}
 
 func (h *mtuFinderAckHandler) OnAcked(wire.Frame) {
+	if h.generation != h.mtuFinder.generation {
+		// ACK for probe sent before reset
+		return
+	}
 	size := h.inFlight
 	if size == protocol.InvalidByteCount {
 		panic("OnAcked callback called although there's no MTU probe packet in flight")
@@ -207,10 +227,13 @@ func (h *mtuFinderAckHandler) OnAcked(wire.Frame) {
 	if h.tracer != nil && h.tracer.UpdatedMTU != nil {
 		h.tracer.UpdatedMTU(size, h.done())
 	}
-	h.mtuIncreased(size)
 }
 
 func (h *mtuFinderAckHandler) OnLost(wire.Frame) {
+	if h.generation != h.mtuFinder.generation {
+		// probe sent before reset received
+		return
+	}
 	size := h.inFlight
 	if size == protocol.InvalidByteCount {
 		panic("OnLost callback called although there's no MTU probe packet in flight")
diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go
index 7724b503..720f1958 100644
--- a/vendor/github.com/quic-go/quic-go/packet_packer.go
+++ b/vendor/github.com/quic-go/quic-go/packet_packer.go
@@ -22,9 +22,10 @@ type packer interface {
 	PackCoalescedPacket(onlyAck bool, maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (*coalescedPacket, error)
 	PackAckOnlyPacket(maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, *packetBuffer, error)
 	AppendPacket(buf *packetBuffer, maxPacketSize protocol.ByteCount, now time.Time, v protocol.Version) (shortHeaderPacket, error)
-	MaybePackProbePacket(protocol.EncryptionLevel, protocol.ByteCount, time.Time, protocol.Version) (*coalescedPacket, error)
+	MaybePackPTOProbePacket(protocol.EncryptionLevel, protocol.ByteCount, time.Time, protocol.Version) (*coalescedPacket, error)
 	PackConnectionClose(*qerr.TransportError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error)
 	PackApplicationClose(*qerr.ApplicationError, protocol.ByteCount, protocol.Version) (*coalescedPacket, error)
+	PackPathProbePacket(protocol.ConnectionID, ackhandler.Frame, protocol.Version) (shortHeaderPacket, *packetBuffer, error)
 	PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, v protocol.Version) (shortHeaderPacket, *packetBuffer, error)
 
 	SetToken([]byte)
@@ -57,6 +58,7 @@ type shortHeaderPacket struct {
 	Ack                  *wire.AckFrame
 	Length               protocol.ByteCount
 	IsPathMTUProbePacket bool
+	IsPathProbePacket    bool
 
 	// used for logging
 	DestConnID      protocol.ConnectionID
@@ -269,17 +271,17 @@ func (p *packetPacker) packConnectionClose(
 		if sealers[i] == nil {
 			continue
 		}
-		var paddingLen protocol.ByteCount
-		if encLevel == protocol.EncryptionInitial {
-			paddingLen = p.initialPaddingLen(payloads[i].frames, size, maxPacketSize)
-		}
 		if encLevel == protocol.Encryption1RTT {
-			shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], paddingLen, maxPacketSize, sealers[i], false, v)
+			shp, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], 0, maxPacketSize, sealers[i], false, v)
 			if err != nil {
 				return nil, err
 			}
 			packet.shortHdrPacket = &shp
 		} else {
+			var paddingLen protocol.ByteCount
+			if encLevel == protocol.EncryptionInitial {
+				paddingLen = p.initialPaddingLen(payloads[i].frames, size, maxPacketSize)
+			}
 			longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], v)
 			if err != nil {
 				return nil, err
@@ -707,7 +709,7 @@ func (p *packetPacker) composeNextPacket(
 	return pl
 }
 
-func (p *packetPacker) MaybePackProbePacket(
+func (p *packetPacker) MaybePackPTOProbePacket(
 	encLevel protocol.EncryptionLevel,
 	maxPacketSize protocol.ByteCount,
 	now time.Time,
@@ -792,6 +794,26 @@ func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.B
 	return packet, buffer, err
 }
 
+func (p *packetPacker) PackPathProbePacket(connID protocol.ConnectionID, f ackhandler.Frame, v protocol.Version) (shortHeaderPacket, *packetBuffer, error) {
+	pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
+	buf := getPacketBuffer()
+	s, err := p.cryptoSetup.Get1RTTSealer()
+	if err != nil {
+		return shortHeaderPacket{}, nil, err
+	}
+	payload := payload{
+		frames: []ackhandler.Frame{f},
+		length: f.Frame.Length(v),
+	}
+	padding := protocol.MinInitialPacketSize - p.shortHeaderPacketLength(connID, pnLen, payload) - protocol.ByteCount(s.Overhead())
+	packet, err := p.appendShortHeaderPacket(buf, connID, pn, pnLen, s.KeyPhase(), payload, padding, protocol.MinInitialPacketSize, s, false, v)
+	if err != nil {
+		return shortHeaderPacket{}, nil, err
+	}
+	packet.IsPathProbePacket = true
+	return packet, buf, err
+}
+
 func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel, v protocol.Version) *wire.ExtendedHeader {
 	pn, pnLen := p.pnManager.PeekPacketNumber(encLevel)
 	hdr := &wire.ExtendedHeader{
diff --git a/vendor/github.com/quic-go/quic-go/path_manager.go b/vendor/github.com/quic-go/quic-go/path_manager.go
new file mode 100644
index 00000000..6d940921
--- /dev/null
+++ b/vendor/github.com/quic-go/quic-go/path_manager.go
@@ -0,0 +1,145 @@
+package quic
+
+import (
+	"crypto/rand"
+	"net"
+
+	"github.com/quic-go/quic-go/internal/ackhandler"
+	"github.com/quic-go/quic-go/internal/protocol"
+	"github.com/quic-go/quic-go/internal/utils"
+	"github.com/quic-go/quic-go/internal/wire"
+)
+
+type pathID int64
+
+const maxPaths = 3
+
+type path struct {
+	addr           net.Addr
+	pathChallenge  [8]byte
+	validated      bool
+	rcvdNonProbing bool
+}
+
+type pathManager struct {
+	nextPathID pathID
+	paths      map[pathID]*path
+
+	getConnID    func(pathID) (_ protocol.ConnectionID, ok bool)
+	retireConnID func(pathID)
+
+	logger utils.Logger
+}
+
+func newPathManager(
+	getConnID func(pathID) (_ protocol.ConnectionID, ok bool),
+	retireConnID func(pathID),
+	logger utils.Logger,
+) *pathManager {
+	return &pathManager{
+		paths:        make(map[pathID]*path),
+		getConnID:    getConnID,
+		retireConnID: retireConnID,
+		logger:       logger,
+	}
+}
+
+// Returns a path challenge frame if one should be sent.
+// May return nil.
+func (pm *pathManager) HandlePacket(p receivedPacket, isNonProbing bool) (_ protocol.ConnectionID, _ ackhandler.Frame, shouldSwitch bool) {
+	for _, path := range pm.paths {
+		if addrsEqual(path.addr, p.remoteAddr) {
+			// already sent a PATH_CHALLENGE for this path
+			if isNonProbing {
+				path.rcvdNonProbing = true
+			}
+			if pm.logger.Debug() {
+				pm.logger.Debugf("received packet for path %s that was already probed, validated: %t", p.remoteAddr, path.validated)
+			}
+			return protocol.ConnectionID{}, ackhandler.Frame{}, path.validated && path.rcvdNonProbing
+		}
+	}
+
+	if len(pm.paths) >= maxPaths {
+		if pm.logger.Debug() {
+			pm.logger.Debugf("received packet for previously unseen path %s, but already have %d paths", p.remoteAddr, len(pm.paths))
+		}
+		return protocol.ConnectionID{}, ackhandler.Frame{}, false
+	}
+
+	// previously unseen path, initiate path validation by sending a PATH_CHALLENGE
+	connID, ok := pm.getConnID(pm.nextPathID)
+	if !ok {
+		pm.logger.Debugf("skipping validation of new path %s since no connection ID is available", p.remoteAddr)
+		return protocol.ConnectionID{}, ackhandler.Frame{}, false
+	}
+	var b [8]byte
+	rand.Read(b[:])
+	pm.paths[pm.nextPathID] = &path{
+		addr:           p.remoteAddr,
+		pathChallenge:  b,
+		rcvdNonProbing: isNonProbing,
+	}
+	pm.nextPathID++
+	frame := ackhandler.Frame{
+		Frame:   &wire.PathChallengeFrame{Data: b},
+		Handler: (*pathManagerAckHandler)(pm),
+	}
+	pm.logger.Debugf("enqueueing PATH_CHALLENGE for new path %s", p.remoteAddr)
+	return connID, frame, false
+}
+
+func (pm *pathManager) HandlePathResponseFrame(f *wire.PathResponseFrame) {
+	for _, p := range pm.paths {
+		if f.Data == p.pathChallenge {
+			// path validated
+			p.validated = true
+			pm.logger.Debugf("path %s validated", p.addr)
+			break
+		}
+	}
+}
+
+// SwitchToPath is called when the connection switches to a new path
+func (pm *pathManager) SwitchToPath(addr net.Addr) {
+	// retire all other paths
+	for id := range pm.paths {
+		if addrsEqual(pm.paths[id].addr, addr) {
+			pm.logger.Debugf("switching to path %d (%s)", id, addr)
+			continue
+		}
+		pm.retireConnID(id)
+	}
+	clear(pm.paths)
+}
+
+type pathManagerAckHandler pathManager
+
+var _ ackhandler.FrameHandler = &pathManagerAckHandler{}
+
+// Acknowledging the frame doesn't validate the path, only receiving the PATH_RESPONSE does.
+func (pm *pathManagerAckHandler) OnAcked(f wire.Frame) {}
+
+func (pm *pathManagerAckHandler) OnLost(f wire.Frame) {
+	// TODO: retransmit the packet the first time it is lost
+	pc := f.(*wire.PathChallengeFrame)
+	for id, path := range pm.paths {
+		if path.pathChallenge == pc.Data {
+			delete(pm.paths, id)
+			pm.retireConnID(id)
+			break
+		}
+	}
+}
+
+func addrsEqual(addr1, addr2 net.Addr) bool {
+	if addr1 == nil || addr2 == nil {
+		return false
+	}
+	a1, ok1 := addr1.(*net.UDPAddr)
+	a2, ok2 := addr2.(*net.UDPAddr)
+	if ok1 && ok2 {
+		return a1.IP.Equal(a2.IP) && a1.Port == a2.Port
+	}
+	return addr1.String() == addr2.String()
+}
diff --git a/vendor/github.com/quic-go/quic-go/send_conn.go b/vendor/github.com/quic-go/quic-go/send_conn.go
index 498ed112..402520c6 100644
--- a/vendor/github.com/quic-go/quic-go/send_conn.go
+++ b/vendor/github.com/quic-go/quic-go/send_conn.go
@@ -2,6 +2,7 @@ package quic
 
 import (
 	"net"
+	"sync/atomic"
 
 	"github.com/quic-go/quic-go/internal/protocol"
 	"github.com/quic-go/quic-go/internal/utils"
@@ -10,22 +11,29 @@ import (
 // A sendConn allows sending using a simple Write() on a non-connected packet conn.
 type sendConn interface {
 	Write(b []byte, gsoSize uint16, ecn protocol.ECN) error
+	WriteTo([]byte, net.Addr) error
 	Close() error
 	LocalAddr() net.Addr
 	RemoteAddr() net.Addr
+	ChangeRemoteAddr(addr net.Addr, info packetInfo)
 
 	capabilities() connCapabilities
 }
 
+type remoteAddrInfo struct {
+	addr net.Addr
+	oob  []byte
+}
+
 type sconn struct {
 	rawConn
 
-	localAddr  net.Addr
-	remoteAddr net.Addr
+	localAddr net.Addr
+
+	remoteAddrInfo atomic.Pointer[remoteAddrInfo]
 
 	logger utils.Logger
 
-	packetInfoOOB []byte
 	// If GSO enabled, and we receive a GSO error for this remote address, GSO is disabled.
 	gotGSOError bool
 	// Used to catch the error sometimes returned by the first sendmsg call on Linux,
@@ -49,22 +57,26 @@ func newSendConn(c rawConn, remote net.Addr, info packetInfo, logger utils.Logge
 	// increase oob slice capacity, so we can add the UDP_SEGMENT and ECN control messages without allocating
 	l := len(oob)
 	oob = append(oob, make([]byte, 64)...)[:l]
-	return &sconn{
-		rawConn:       c,
-		localAddr:     localAddr,
-		remoteAddr:    remote,
-		packetInfoOOB: oob,
-		logger:        logger,
+	sc := &sconn{
+		rawConn:   c,
+		localAddr: localAddr,
+		logger:    logger,
 	}
+	sc.remoteAddrInfo.Store(&remoteAddrInfo{
+		addr: remote,
+		oob:  oob,
+	})
+	return sc
 }
 
 func (c *sconn) Write(p []byte, gsoSize uint16, ecn protocol.ECN) error {
-	err := c.writePacket(p, c.remoteAddr, c.packetInfoOOB, gsoSize, ecn)
+	ai := c.remoteAddrInfo.Load()
+	err := c.writePacket(p, ai.addr, ai.oob, gsoSize, ecn)
 	if err != nil && isGSOError(err) {
 		// disable GSO for future calls
 		c.gotGSOError = true
 		if c.logger.Debug() {
-			c.logger.Debugf("GSO failed when sending to %s", c.remoteAddr)
+			c.logger.Debugf("GSO failed when sending to %s", ai.addr)
 		}
 		// send out the packets one by one
 		for len(p) > 0 {
@@ -72,7 +84,7 @@ func (c *sconn) Write(p []byte, gsoSize uint16, ecn protocol.ECN) error {
 			if l > int(gsoSize) {
 				l = int(gsoSize)
 			}
-			if err := c.writePacket(p[:l], c.remoteAddr, c.packetInfoOOB, 0, ecn); err != nil {
+			if err := c.writePacket(p[:l], ai.addr, ai.oob, 0, ecn); err != nil {
 				return err
 			}
 			p = p[l:]
@@ -91,6 +103,11 @@ func (c *sconn) writePacket(p []byte, addr net.Addr, oob []byte, gsoSize uint16,
 	return err
 }
 
+func (c *sconn) WriteTo(b []byte, addr net.Addr) error {
+	_, err := c.WritePacket(b, addr, nil, 0, protocol.ECNUnsupported)
+	return err
+}
+
 func (c *sconn) capabilities() connCapabilities {
 	capabilities := c.rawConn.capabilities()
 	if capabilities.GSO {
@@ -99,5 +116,12 @@ func (c *sconn) capabilities() connCapabilities {
 	return capabilities
 }
 
-func (c *sconn) RemoteAddr() net.Addr { return c.remoteAddr }
+func (c *sconn) ChangeRemoteAddr(addr net.Addr, info packetInfo) {
+	c.remoteAddrInfo.Store(&remoteAddrInfo{
+		addr: addr,
+		oob:  info.OOB(),
+	})
+}
+
+func (c *sconn) RemoteAddr() net.Addr { return c.remoteAddrInfo.Load().addr }
 func (c *sconn) LocalAddr() net.Addr  { return c.localAddr }
diff --git a/vendor/github.com/quic-go/quic-go/send_queue.go b/vendor/github.com/quic-go/quic-go/send_queue.go
index bde02334..d19762be 100644
--- a/vendor/github.com/quic-go/quic-go/send_queue.go
+++ b/vendor/github.com/quic-go/quic-go/send_queue.go
@@ -1,9 +1,14 @@
 package quic
 
-import "github.com/quic-go/quic-go/internal/protocol"
+import (
+	"net"
+
+	"github.com/quic-go/quic-go/internal/protocol"
+)
 
 type sender interface {
 	Send(p *packetBuffer, gsoSize uint16, ecn protocol.ECN)
+	SendProbe(*packetBuffer, net.Addr)
 	Run() error
 	WouldBlock() bool
 	Available() <-chan struct{}
@@ -57,6 +62,10 @@ func (h *sendQueue) Send(p *packetBuffer, gsoSize uint16, ecn protocol.ECN) {
 	}
 }
 
+func (h *sendQueue) SendProbe(p *packetBuffer, addr net.Addr) {
+	h.conn.WriteTo(p.Data, addr)
+}
+
 func (h *sendQueue) WouldBlock() bool {
 	return len(h.queue) == sendQueueCapacity
 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1337f186..db8a4148 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -107,8 +107,8 @@ github.com/powerman/deepequal
 # github.com/quic-go/qpack v0.5.1
 ## explicit; go 1.22
 github.com/quic-go/qpack
-# github.com/quic-go/quic-go v0.49.0
-## explicit; go 1.22
+# github.com/quic-go/quic-go v0.50.0
+## explicit; go 1.23
 github.com/quic-go/quic-go
 github.com/quic-go/quic-go/http3
 github.com/quic-go/quic-go/internal/ackhandler
@@ -117,7 +117,6 @@ github.com/quic-go/quic-go/internal/flowcontrol
 github.com/quic-go/quic-go/internal/handshake
 github.com/quic-go/quic-go/internal/protocol
 github.com/quic-go/quic-go/internal/qerr
-github.com/quic-go/quic-go/internal/qtls
 github.com/quic-go/quic-go/internal/utils
 github.com/quic-go/quic-go/internal/utils/linkedlist
 github.com/quic-go/quic-go/internal/utils/ringbuffer

From a4cbc66fdde23a9511455304217d7adea6db6140 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 18:12:42 +0100
Subject: [PATCH 26/48] http3: RoundTripper was renamed to Transport

---
 dnscrypt-proxy/xtransport.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 0bd1b93c..7652566f 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -57,7 +57,7 @@ type AltSupport struct {
 
 type XTransport struct {
 	transport                *http.Transport
-	h3Transport              *http3.RoundTripper
+	h3Transport              *http3.Transport
 	keepAlive                time.Duration
 	timeout                  time.Duration
 	cachedIPs                CachedIPs
@@ -311,7 +311,7 @@ func (xTransport *XTransport) rebuildTransport() {
 			tlsCfg.ServerName = host
 			return quic.DialEarly(ctx, udpConn, udpAddr, tlsCfg, cfg)
 		}
-		h3Transport := &http3.RoundTripper{DisableCompression: true, TLSClientConfig: &tlsClientConfig, Dial: dial}
+		h3Transport := &http3.Transport{DisableCompression: true, TLSClientConfig: &tlsClientConfig, Dial: dial}
 		xTransport.h3Transport = h3Transport
 	}
 }

From 3f3cd1e67df8054fc18766c698687b6b32570b90 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 18:13:23 +0100
Subject: [PATCH 27/48] updating_until -> updatingUntil

---
 dnscrypt-proxy/xtransport.go | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 7652566f..6acd3b9b 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -40,9 +40,9 @@ const (
 )
 
 type CachedIPItem struct {
-	ip             net.IP
-	expiration     *time.Time
-	updating_until *time.Time
+	ip            net.IP
+	expiration    *time.Time
+	updatingUntil *time.Time
 }
 
 type CachedIPs struct {
@@ -106,7 +106,7 @@ func ParseIP(ipStr string) net.IP {
 // If ttl < 0, never expire
 // Otherwise, ttl is set to max(ttl, MinResolverIPTTL)
 func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Duration) {
-	item := &CachedIPItem{ip: ip, expiration: nil, updating_until: nil}
+	item := &CachedIPItem{ip: ip, expiration: nil, updatingUntil: nil}
 	if ttl >= 0 {
 		if ttl < MinResolverIPTTL {
 			ttl = MinResolverIPTTL
@@ -126,7 +126,7 @@ func (xTransport *XTransport) markUpdatingCachedIP(host string) {
 	if ok {
 		now := time.Now()
 		until := now.Add(xTransport.timeout)
-		item.updating_until = &until
+		item.updatingUntil = &until
 		xTransport.cachedIPs.cache[host] = item
 	}
 	xTransport.cachedIPs.Unlock()
@@ -144,7 +144,7 @@ func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool
 	expiration := item.expiration
 	if expiration != nil && time.Until(*expiration) < 0 {
 		expired = true
-		if item.updating_until != nil && time.Until(*item.updating_until) > 0 {
+		if item.updatingUntil != nil && time.Until(*item.updatingUntil) > 0 {
 			updating = true
 		}
 	}

From 45a4df8df57082972334dbce70590faf5a1b5210 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 18:16:33 +0100
Subject: [PATCH 28/48] Nits

---
 dnscrypt-proxy/coldstart.go      | 2 +-
 dnscrypt-proxy/plugin_forward.go | 8 +++-----
 dnscrypt-proxy/xtransport.go     | 2 +-
 3 files changed, 5 insertions(+), 7 deletions(-)

diff --git a/dnscrypt-proxy/coldstart.go b/dnscrypt-proxy/coldstart.go
index 0f0b73ac..9bcb7bc7 100644
--- a/dnscrypt-proxy/coldstart.go
+++ b/dnscrypt-proxy/coldstart.go
@@ -170,7 +170,7 @@ func ColdStart(proxy *Proxy) (*CaptivePortalHandler, error) {
 		if err != nil {
 			continue
 		}
-		if strings.Index(ipsStr, "*") != -1 {
+		if strings.Contains(ipsStr, "*") {
 			return nil, fmt.Errorf(
 				"A captive portal rule must use an exact host name at line %d",
 				1+lineNo,
diff --git a/dnscrypt-proxy/plugin_forward.go b/dnscrypt-proxy/plugin_forward.go
index 12e52928..e3352055 100644
--- a/dnscrypt-proxy/plugin_forward.go
+++ b/dnscrypt-proxy/plugin_forward.go
@@ -60,10 +60,8 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
 			continue
 		}
 		domain, serversStr, ok := StringTwoFields(line)
-		if strings.HasPrefix(domain, "*.") {
-			domain = domain[2:]
-		}
-		if strings.Index(domain, "*") != -1 {
+		domain = strings.TrimPrefix(domain, "*.")
+		if strings.Contains(domain, "*") {
 			ok = false
 		}
 		if !ok {
@@ -205,7 +203,7 @@ func (plugin *PluginForward) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
 					dlog.Infof("No response from the DHCP server while resolving [%s]", qName)
 					continue
 				}
-				if dhcpDNS != nil && len(dhcpDNS) > 0 {
+				if len(dhcpDNS) > 0 {
 					server = net.JoinHostPort(dhcpDNS[rand.Intn(len(dhcpDNS))].String(), "53")
 					break
 				}
diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 6acd3b9b..9a06d734 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -234,7 +234,7 @@ func (xTransport *XTransport) rebuildTransport() {
 		tlsClientConfig.Certificates = []tls.Certificate{cert}
 	}
 
-	overrideCipherSuite := xTransport.tlsCipherSuite != nil && len(xTransport.tlsCipherSuite) > 0
+	overrideCipherSuite := len(xTransport.tlsCipherSuite) > 0
 	if xTransport.tlsDisableSessionTickets || overrideCipherSuite {
 		tlsClientConfig.SessionTicketsDisabled = xTransport.tlsDisableSessionTickets
 		if !xTransport.tlsDisableSessionTickets {

From 8432827e5d38d8916c5def8b9d6df4c9f3582e0f Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 23:32:55 +0100
Subject: [PATCH 29/48] Reduce MinResolverIPTTL

---
 dnscrypt-proxy/xtransport.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 9a06d734..a3e89615 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -35,7 +35,7 @@ const (
 	DefaultKeepAlive         = 5 * time.Second
 	DefaultTimeout           = 30 * time.Second
 	SystemResolverIPTTL      = 24 * time.Hour
-	MinResolverIPTTL         = 12 * time.Hour
+	MinResolverIPTTL         = 4 * time.Hour
 	ExpiredCachedIPGraceTTL  = 15 * time.Minute
 )
 

From f052e0ccdfe96825f577668dd72c9eed4396a6fb Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 23:34:14 +0100
Subject: [PATCH 30/48] Reduce SystemResolverIPTTL

---
 dnscrypt-proxy/xtransport.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index a3e89615..e2d960ba 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -34,7 +34,7 @@ const (
 	DefaultBootstrapResolver = "9.9.9.9:53"
 	DefaultKeepAlive         = 5 * time.Second
 	DefaultTimeout           = 30 * time.Second
-	SystemResolverIPTTL      = 24 * time.Hour
+	SystemResolverIPTTL      = 12 * time.Hour
 	MinResolverIPTTL         = 4 * time.Hour
 	ExpiredCachedIPGraceTTL  = 15 * time.Minute
 )

From 2d8cfebe8b948e4d26580b36a6ad99397629bb56 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Feb 2025 23:56:18 +0100
Subject: [PATCH 31/48] Add some debugging helpers

---
 dnscrypt-proxy/xtransport.go | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index e2d960ba..9e810c14 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -117,6 +117,7 @@ func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Dura
 	xTransport.cachedIPs.Lock()
 	xTransport.cachedIPs.cache[host] = item
 	xTransport.cachedIPs.Unlock()
+	dlog.Debugf("[%s] IP address [%s] stored to the cache, valid for %v", host, ip, ttl)
 }
 
 // Mark an entry as being updated
@@ -128,6 +129,7 @@ func (xTransport *XTransport) markUpdatingCachedIP(host string) {
 		until := now.Add(xTransport.timeout)
 		item.updatingUntil = &until
 		xTransport.cachedIPs.cache[host] = item
+		dlog.Debugf("[%s] IP addresss marked as updating", host)
 	}
 	xTransport.cachedIPs.Unlock()
 }
@@ -138,6 +140,7 @@ func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool
 	item, ok := xTransport.cachedIPs.cache[host]
 	xTransport.cachedIPs.RUnlock()
 	if !ok {
+		dlog.Debugf("[%s] IP address not found in the cache", host)
 		return
 	}
 	ip = item.ip
@@ -146,6 +149,9 @@ func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool
 		expired = true
 		if item.updatingUntil != nil && time.Until(*item.updatingUntil) > 0 {
 			updating = true
+			dlog.Debugf("[%s] IP address is being updated", host)
+		} else {
+			dlog.Debugf("[%s] IP address expired, not being updated yet", host)
 		}
 	}
 	return

From 70f2f9b4244f02bd840411dcf8681c9fb597038b Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Sat, 22 Feb 2025 01:20:50 +0100
Subject: [PATCH 32/48] Avoid duplicate log

---
 dnscrypt-proxy/xtransport.go | 1 -
 1 file changed, 1 deletion(-)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index 9e810c14..a362298b 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -498,7 +498,6 @@ func (xTransport *XTransport) resolveAndUpdateCache(host string) error {
 		}
 	}
 	xTransport.saveCachedIP(host, foundIP, ttl)
-	dlog.Debugf("[%s] IP address [%s] added to the cache, valid for %v", host, foundIP, ttl)
 	return nil
 }
 

From 7844cfca27b59bb4100e65e2ed89a929bd38dce9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 24 Feb 2025 04:00:03 +0000
Subject: [PATCH 33/48] Bump softprops/action-gh-release

Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from a92196038060e0c673cf3ba5b98710dd716731df to daffc988f27cd136af07d2cdcbf7ceae2993875d.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/a92196038060e0c673cf3ba5b98710dd716731df...daffc988f27cd136af07d2cdcbf7ceae2993875d)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/releases.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index 4be08d84..1f0bfea5 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -83,7 +83,7 @@ jobs:
           prerelease: false
 
       - name: Upload release assets
-        uses: softprops/action-gh-release@a92196038060e0c673cf3ba5b98710dd716731df
+        uses: softprops/action-gh-release@daffc988f27cd136af07d2cdcbf7ceae2993875d
         if: startsWith(github.ref, 'refs/tags/')
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

From 2df7497b1f924c7ee7c0a8b61c933c9dd5fe9922 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 26 Feb 2025 03:38:36 +0000
Subject: [PATCH 34/48] Bump softprops/action-gh-release

Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from daffc988f27cd136af07d2cdcbf7ceae2993875d to 4b4d743a9bb620a3869f98d3be3ccdbd313922d3.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/daffc988f27cd136af07d2cdcbf7ceae2993875d...4b4d743a9bb620a3869f98d3be3ccdbd313922d3)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/releases.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index 1f0bfea5..b6bfb6a8 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -83,7 +83,7 @@ jobs:
           prerelease: false
 
       - name: Upload release assets
-        uses: softprops/action-gh-release@daffc988f27cd136af07d2cdcbf7ceae2993875d
+        uses: softprops/action-gh-release@4b4d743a9bb620a3869f98d3be3ccdbd313922d3
         if: startsWith(github.ref, 'refs/tags/')
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

From 990a0dc3e56a9093728f6a99e771eb0bd7a4100f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 5 Mar 2025 03:03:56 +0000
Subject: [PATCH 35/48] Bump softprops/action-gh-release

Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 4b4d743a9bb620a3869f98d3be3ccdbd313922d3 to 670eb2f21a607fad397653d7e45de8f17ade7cc4.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/4b4d743a9bb620a3869f98d3be3ccdbd313922d3...670eb2f21a607fad397653d7e45de8f17ade7cc4)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/releases.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index b6bfb6a8..420f6013 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -83,7 +83,7 @@ jobs:
           prerelease: false
 
       - name: Upload release assets
-        uses: softprops/action-gh-release@4b4d743a9bb620a3869f98d3be3ccdbd313922d3
+        uses: softprops/action-gh-release@670eb2f21a607fad397653d7e45de8f17ade7cc4
         if: startsWith(github.ref, 'refs/tags/')
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

From f0bac6f0c7c0e56c8df556cfa1b8875c06b2006a Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Wed, 5 Mar 2025 23:00:52 +0100
Subject: [PATCH 36/48] Update deps

---
 go.mod                                        |   8 +-
 go.sum                                        |  16 +-
 .../github.com/jedisct1/go-clocksmith/LICENSE |   2 +-
 vendor/golang.org/x/net/http2/server.go       | 121 ++++---------
 vendor/golang.org/x/net/http2/transport.go    |  52 ++++--
 .../x/net/internal/httpcommon/headermap.go    |   6 +-
 .../x/net/internal/httpcommon/request.go      | 166 ++++++++++++++----
 vendor/golang.org/x/net/proxy/per_host.go     |   8 +-
 vendor/modules.txt                            |  14 +-
 9 files changed, 233 insertions(+), 160 deletions(-)

diff --git a/go.mod b/go.mod
index 483c532b..47164e9d 100644
--- a/go.mod
+++ b/go.mod
@@ -10,7 +10,7 @@ require (
 	github.com/hashicorp/go-immutable-radix v1.3.1
 	github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
 	github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405
-	github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e
+	github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a
 	github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774
 	github.com/jedisct1/go-hpke-compact v0.0.0-20241212093903-5caa4621366f
 	github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7
@@ -22,9 +22,9 @@ require (
 	github.com/opencoff/go-sieve v0.2.1
 	github.com/powerman/check v1.8.0
 	github.com/quic-go/quic-go v0.50.0
-	golang.org/x/crypto v0.33.0
-	golang.org/x/net v0.35.0
-	golang.org/x/sys v0.30.0
+	golang.org/x/crypto v0.35.0
+	golang.org/x/net v0.36.0
+	golang.org/x/sys v0.31.0
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1
 )
 
diff --git a/go.sum b/go.sum
index 627bc5e0..2c13d283 100644
--- a/go.sum
+++ b/go.sum
@@ -39,8 +39,8 @@ github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLa
 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405 h1:6j/0utSiy3KhZSpFJgobk+ME1BIwXeq9jepJaDLW3Yg=
 github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405/go.mod h1:OO1HpQNlMCMaPdHPuI00fhChZQZ8npbVTTjMvJUxUqQ=
-github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e h1:tzG4EjKgHIqKVkLIAC4pXTIapuM2BR05uXokEEysAXA=
-github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e/go.mod h1:SAINchklztk2jcLWJ4bpNF4KnwDUSUTX+cJbspWC2Rw=
+github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a h1:8z8OvuZGZYGuvTeT5RD80ii6B6LftADl0EQr2z5asCg=
+github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a/go.mod h1:SAINchklztk2jcLWJ4bpNF4KnwDUSUTX+cJbspWC2Rw=
 github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774 h1:DobL5d8UxrYzlD0PbU/EVBAGHuDiFyH46gr6povMw50=
 github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774/go.mod h1:mEGEFZsGe4sG5Mb3Xi89pmsy+TZ0946ArbYMGKAM5uA=
 github.com/jedisct1/go-hpke-compact v0.0.0-20241212093903-5caa4621366f h1:h5/HKrLaDfrb/Zi1y8eCsPGQpcOnKIslT/OpFc81i4c=
@@ -87,21 +87,21 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
 github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
 go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
-golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
-golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
+golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
+golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
 golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
 golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
-golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
+golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
 golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
 golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
 golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
 golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
diff --git a/vendor/github.com/jedisct1/go-clocksmith/LICENSE b/vendor/github.com/jedisct1/go-clocksmith/LICENSE
index 06c6cdbb..fe0d5158 100644
--- a/vendor/github.com/jedisct1/go-clocksmith/LICENSE
+++ b/vendor/github.com/jedisct1/go-clocksmith/LICENSE
@@ -1,6 +1,6 @@
 MIT License
 
-Copyright (c) 2018-2023 Frank Denis
+Copyright (c) 2018-2025 Frank Denis
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 7434b878..b640deb0 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -2233,25 +2233,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
 func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
 	sc.serveG.check()
 
-	rp := requestParam{
-		method:    f.PseudoValue("method"),
-		scheme:    f.PseudoValue("scheme"),
-		authority: f.PseudoValue("authority"),
-		path:      f.PseudoValue("path"),
-		protocol:  f.PseudoValue("protocol"),
+	rp := httpcommon.ServerRequestParam{
+		Method:    f.PseudoValue("method"),
+		Scheme:    f.PseudoValue("scheme"),
+		Authority: f.PseudoValue("authority"),
+		Path:      f.PseudoValue("path"),
+		Protocol:  f.PseudoValue("protocol"),
 	}
 
 	// extended connect is disabled, so we should not see :protocol
-	if disableExtendedConnectProtocol && rp.protocol != "" {
+	if disableExtendedConnectProtocol && rp.Protocol != "" {
 		return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
 	}
 
-	isConnect := rp.method == "CONNECT"
+	isConnect := rp.Method == "CONNECT"
 	if isConnect {
-		if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
+		if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
 			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
 		}
-	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+	} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
 		// See 8.1.2.6 Malformed Requests and Responses:
 		//
 		// Malformed requests or responses that are detected
@@ -2265,15 +2265,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
 		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
 	}
 
-	rp.header = make(http.Header)
+	header := make(http.Header)
+	rp.Header = header
 	for _, hf := range f.RegularFields() {
-		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
 	}
-	if rp.authority == "" {
-		rp.authority = rp.header.Get("Host")
+	if rp.Authority == "" {
+		rp.Authority = header.Get("Host")
 	}
-	if rp.protocol != "" {
-		rp.header.Set(":protocol", rp.protocol)
+	if rp.Protocol != "" {
+		header.Set(":protocol", rp.Protocol)
 	}
 
 	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
@@ -2282,7 +2283,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
 	}
 	bodyOpen := !f.StreamEnded()
 	if bodyOpen {
-		if vv, ok := rp.header["Content-Length"]; ok {
+		if vv, ok := rp.Header["Content-Length"]; ok {
 			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
 				req.ContentLength = int64(cl)
 			} else {
@@ -2298,84 +2299,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
 	return rw, req, nil
 }
 
-type requestParam struct {
-	method                  string
-	scheme, authority, path string
-	protocol                string
-	header                  http.Header
-}
-
-func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
 	sc.serveG.check()
 
 	var tlsState *tls.ConnectionState // nil if not scheme https
-	if rp.scheme == "https" {
+	if rp.Scheme == "https" {
 		tlsState = sc.tlsState
 	}
 
-	needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
-	if needsContinue {
-		rp.header.Del("Expect")
-	}
-	// Merge Cookie headers into one "; "-delimited value.
-	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
-		rp.header.Set("Cookie", strings.Join(cookies, "; "))
-	}
-
-	// Setup Trailers
-	var trailer http.Header
-	for _, v := range rp.header["Trailer"] {
-		for _, key := range strings.Split(v, ",") {
-			key = http.CanonicalHeaderKey(textproto.TrimString(key))
-			switch key {
-			case "Transfer-Encoding", "Trailer", "Content-Length":
-				// Bogus. (copy of http1 rules)
-				// Ignore.
-			default:
-				if trailer == nil {
-					trailer = make(http.Header)
-				}
-				trailer[key] = nil
-			}
-		}
-	}
-	delete(rp.header, "Trailer")
-
-	var url_ *url.URL
-	var requestURI string
-	if rp.method == "CONNECT" && rp.protocol == "" {
-		url_ = &url.URL{Host: rp.authority}
-		requestURI = rp.authority // mimic HTTP/1 server behavior
-	} else {
-		var err error
-		url_, err = url.ParseRequestURI(rp.path)
-		if err != nil {
-			return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
-		}
-		requestURI = rp.path
+	res := httpcommon.NewServerRequest(rp)
+	if res.InvalidReason != "" {
+		return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
 	}
 
 	body := &requestBody{
 		conn:          sc,
 		stream:        st,
-		needsContinue: needsContinue,
+		needsContinue: res.NeedsContinue,
 	}
-	req := &http.Request{
-		Method:     rp.method,
-		URL:        url_,
+	req := (&http.Request{
+		Method:     rp.Method,
+		URL:        res.URL,
 		RemoteAddr: sc.remoteAddrStr,
-		Header:     rp.header,
-		RequestURI: requestURI,
+		Header:     rp.Header,
+		RequestURI: res.RequestURI,
 		Proto:      "HTTP/2.0",
 		ProtoMajor: 2,
 		ProtoMinor: 0,
 		TLS:        tlsState,
-		Host:       rp.authority,
+		Host:       rp.Authority,
 		Body:       body,
-		Trailer:    trailer,
-	}
-	req = req.WithContext(st.ctx)
-
+		Trailer:    res.Trailer,
+	}).WithContext(st.ctx)
 	rw := sc.newResponseWriter(st, req)
 	return rw, req, nil
 }
@@ -3270,12 +3225,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
 		// we start in "half closed (remote)" for simplicity.
 		// See further comments at the definition of stateHalfClosedRemote.
 		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
-		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
-			method:    msg.method,
-			scheme:    msg.url.Scheme,
-			authority: msg.url.Host,
-			path:      msg.url.RequestURI(),
-			header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+		rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
+			Method:    msg.method,
+			Scheme:    msg.url.Scheme,
+			Authority: msg.url.Host,
+			Path:      msg.url.RequestURI(),
+			Header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
 		})
 		if err != nil {
 			// Should not happen, since we've already validated msg.url.
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f2c166b6..f26356b9 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -1286,6 +1286,19 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
 	return 0
 }
 
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+	if req.Body == nil || req.Body == http.NoBody {
+		return 0
+	}
+	if req.ContentLength != 0 {
+		return req.ContentLength
+	}
+	return -1
+}
+
 func (cc *ClientConn) decrStreamReservations() {
 	cc.mu.Lock()
 	defer cc.mu.Unlock()
@@ -1310,7 +1323,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
 		reqCancel:            req.Cancel,
 		isHead:               req.Method == "HEAD",
 		reqBody:              req.Body,
-		reqBodyContentLength: httpcommon.ActualContentLength(req),
+		reqBodyContentLength: actualContentLength(req),
 		trace:                httptrace.ContextClientTrace(ctx),
 		peerClosed:           make(chan struct{}),
 		abort:                make(chan struct{}),
@@ -1318,7 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
 		donec:                make(chan struct{}),
 	}
 
-	cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression())
+	cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
 
 	go cs.doRequest(req, streamf)
 
@@ -1349,7 +1362,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
 		}
 		res.Request = req
 		res.TLS = cc.tlsState
-		if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 {
+		if res.Body == noBody && actualContentLength(req) == 0 {
 			// If there isn't a request or response body still being
 			// written, then wait for the stream to be closed before
 			// RoundTrip returns.
@@ -1596,12 +1609,7 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
 	// sent by writeRequestBody below, along with any Trailers,
 	// again in form HEADERS{1}, CONTINUATION{0,})
 	cc.hbuf.Reset()
-	res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{
-		Request:               req,
-		AddGzipHeader:         cs.requestedGzip,
-		PeerMaxHeaderListSize: cc.peerMaxHeaderListSize,
-		DefaultUserAgent:      defaultUserAgent,
-	}, func(name, value string) {
+	res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
 		cc.writeHeader(name, value)
 	})
 	if err != nil {
@@ -1617,6 +1625,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
 	return err
 }
 
+func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
+	return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
+		Request: httpcommon.Request{
+			Header:              req.Header,
+			Trailer:             req.Trailer,
+			URL:                 req.URL,
+			Host:                req.Host,
+			Method:              req.Method,
+			ActualContentLength: actualContentLength(req),
+		},
+		AddGzipHeader:         addGzipHeader,
+		PeerMaxHeaderListSize: peerMaxHeaderListSize,
+		DefaultUserAgent:      defaultUserAgent,
+	}, headerf)
+}
+
 // cleanupWriteRequest performs post-request tasks.
 //
 // If err (the result of writeRequest) is non-nil and the stream is not closed,
@@ -2186,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() {
 	}
 	cc.cond.Broadcast()
 	cc.mu.Unlock()
+
+	if !cc.seenSettings {
+		// If we have a pending request that wants extended CONNECT,
+		// let it continue and fail with the connection error.
+		cc.extendedConnectAllowed = true
+		close(cc.seenSettingsChan)
+	}
 }
 
 // countReadFrameError calls Transport.CountError with a string
@@ -2278,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error {
 			if VerboseLogs {
 				cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
 			}
-			if !cc.seenSettings {
-				close(cc.seenSettingsChan)
-			}
 			return err
 		}
 	}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
index ad3fbacd..92483d8e 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/headermap.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
@@ -5,7 +5,7 @@
 package httpcommon
 
 import (
-	"net/http"
+	"net/textproto"
 	"sync"
 )
 
@@ -82,7 +82,7 @@ func buildCommonHeaderMaps() {
 	commonLowerHeader = make(map[string]string, len(common))
 	commonCanonHeader = make(map[string]string, len(common))
 	for _, v := range common {
-		chk := http.CanonicalHeaderKey(v)
+		chk := textproto.CanonicalMIMEHeaderKey(v)
 		commonLowerHeader[chk] = v
 		commonCanonHeader[v] = chk
 	}
@@ -104,7 +104,7 @@ func CanonicalHeader(v string) string {
 	if s, ok := commonCanonHeader[v]; ok {
 		return s
 	}
-	return http.CanonicalHeaderKey(v)
+	return textproto.CanonicalMIMEHeaderKey(v)
 }
 
 // CachedCanonicalHeader returns the canonical form of a well-known header name.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
index 34391477..4b705531 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -5,10 +5,12 @@
 package httpcommon
 
 import (
+	"context"
 	"errors"
 	"fmt"
-	"net/http"
 	"net/http/httptrace"
+	"net/textproto"
+	"net/url"
 	"sort"
 	"strconv"
 	"strings"
@@ -21,9 +23,21 @@ var (
 	ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
 )
 
+// Request is a subset of http.Request.
+// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
+// without creating a dependency cycle.
+type Request struct {
+	URL                 *url.URL
+	Method              string
+	Host                string
+	Header              map[string][]string
+	Trailer             map[string][]string
+	ActualContentLength int64 // 0 means 0, -1 means unknown
+}
+
 // EncodeHeadersParam is parameters to EncodeHeaders.
 type EncodeHeadersParam struct {
-	Request *http.Request
+	Request Request
 
 	// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
 	// added to the request.
@@ -47,11 +61,11 @@ type EncodeHeadersResult struct {
 // It validates a request and calls headerf with each pseudo-header and header
 // for the request.
 // The headerf function is called with the validated, canonicalized header name.
-func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
+func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
 	req := param.Request
 
 	// Check for invalid connection-level headers.
-	if err := checkConnHeaders(req); err != nil {
+	if err := checkConnHeaders(req.Header); err != nil {
 		return res, err
 	}
 
@@ -73,7 +87,10 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
 
 	// isNormalConnect is true if this is a non-extended CONNECT request.
 	isNormalConnect := false
-	protocol := req.Header.Get(":protocol")
+	var protocol string
+	if vv := req.Header[":protocol"]; len(vv) > 0 {
+		protocol = vv[0]
+	}
 	if req.Method == "CONNECT" && protocol == "" {
 		isNormalConnect = true
 	} else if protocol != "" && req.Method != "CONNECT" {
@@ -107,9 +124,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
 		return res, fmt.Errorf("invalid HTTP trailer %s", err)
 	}
 
-	contentLength := ActualContentLength(req)
-
-	trailers, err := commaSeparatedTrailers(req)
+	trailers, err := commaSeparatedTrailers(req.Trailer)
 	if err != nil {
 		return res, err
 	}
@@ -123,7 +138,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
 		f(":authority", host)
 		m := req.Method
 		if m == "" {
-			m = http.MethodGet
+			m = "GET"
 		}
 		f(":method", m)
 		if !isNormalConnect {
@@ -198,8 +213,8 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
 				f(k, v)
 			}
 		}
-		if shouldSendReqContentLength(req.Method, contentLength) {
-			f("content-length", strconv.FormatInt(contentLength, 10))
+		if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
+			f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
 		}
 		if param.AddGzipHeader {
 			f("accept-encoding", "gzip")
@@ -225,7 +240,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
 		}
 	}
 
-	trace := httptrace.ContextClientTrace(req.Context())
+	trace := httptrace.ContextClientTrace(ctx)
 
 	// Header list size is ok. Write the headers.
 	enumerateHeaders(func(name, value string) {
@@ -243,19 +258,19 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (
 		}
 	})
 
-	res.HasBody = contentLength != 0
+	res.HasBody = req.ActualContentLength != 0
 	res.HasTrailers = trailers != ""
 	return res, nil
 }
 
 // IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
 // for a request.
-func IsRequestGzip(req *http.Request, disableCompression bool) bool {
+func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
 	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
 	if !disableCompression &&
-		req.Header.Get("Accept-Encoding") == "" &&
-		req.Header.Get("Range") == "" &&
-		req.Method != "HEAD" {
+		len(header["Accept-Encoding"]) == 0 &&
+		len(header["Range"]) == 0 &&
+		method != "HEAD" {
 		// Request gzip only, not deflate. Deflate is ambiguous and
 		// not as universally supported anyway.
 		// See: https://zlib.net/zlib_faq.html#faq39
@@ -280,22 +295,22 @@ func IsRequestGzip(req *http.Request, disableCompression bool) bool {
 //
 // Certain headers are special-cased as okay but not transmitted later.
 // For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
-func checkConnHeaders(req *http.Request) error {
-	if v := req.Header.Get("Upgrade"); v != "" {
-		return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"])
+func checkConnHeaders(h map[string][]string) error {
+	if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
+		return fmt.Errorf("invalid Upgrade request header: %q", vv)
 	}
-	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+	if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
 		return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
 	}
-	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
+	if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
 		return fmt.Errorf("invalid Connection request header: %q", vv)
 	}
 	return nil
 }
 
-func commaSeparatedTrailers(req *http.Request) (string, error) {
-	keys := make([]string, 0, len(req.Trailer))
-	for k := range req.Trailer {
+func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
+	keys := make([]string, 0, len(trailer))
+	for k := range trailer {
 		k = CanonicalHeader(k)
 		switch k {
 		case "Transfer-Encoding", "Trailer", "Content-Length":
@@ -310,19 +325,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
 	return "", nil
 }
 
-// ActualContentLength returns a sanitized version of
-// req.ContentLength, where 0 actually means zero (not unknown) and -1
-// means unknown.
-func ActualContentLength(req *http.Request) int64 {
-	if req.Body == nil || req.Body == http.NoBody {
-		return 0
-	}
-	if req.ContentLength != 0 {
-		return req.ContentLength
-	}
-	return -1
-}
-
 // validPseudoPath reports whether v is a valid :path pseudo-header
 // value. It must be either:
 //
@@ -340,7 +342,7 @@ func validPseudoPath(v string) bool {
 	return (len(v) > 0 && v[0] == '/') || v == "*"
 }
 
-func validateHeaders(hdrs http.Header) string {
+func validateHeaders(hdrs map[string][]string) string {
 	for k, vv := range hdrs {
 		if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
 			return fmt.Sprintf("name %q", k)
@@ -377,3 +379,89 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
 		return false
 	}
 }
+
+// ServerRequestParam is parameters to NewServerRequest.
+type ServerRequestParam struct {
+	Method                  string
+	Scheme, Authority, Path string
+	Protocol                string
+	Header                  map[string][]string
+}
+
+// ServerRequestResult is the result of NewServerRequest.
+type ServerRequestResult struct {
+	// Various http.Request fields.
+	URL        *url.URL
+	RequestURI string
+	Trailer    map[string][]string
+
+	NeedsContinue bool // client provided an "Expect: 100-continue" header
+
+	// If the request should be rejected, this is a short string suitable for passing
+	// to the http2 package's CountError function.
+	// It might be a bit odd to return errors this way rather than returing an error,
+	// but this ensures we don't forget to include a CountError reason.
+	InvalidReason string
+}
+
+func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
+	needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
+	if needsContinue {
+		delete(rp.Header, "Expect")
+	}
+	// Merge Cookie headers into one "; "-delimited value.
+	if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
+		rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
+	}
+
+	// Setup Trailers
+	var trailer map[string][]string
+	for _, v := range rp.Header["Trailer"] {
+		for _, key := range strings.Split(v, ",") {
+			key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
+			switch key {
+			case "Transfer-Encoding", "Trailer", "Content-Length":
+				// Bogus. (copy of http1 rules)
+				// Ignore.
+			default:
+				if trailer == nil {
+					trailer = make(map[string][]string)
+				}
+				trailer[key] = nil
+			}
+		}
+	}
+	delete(rp.Header, "Trailer")
+
+	// "':authority' MUST NOT include the deprecated userinfo subcomponent
+	// for "http" or "https" schemed URIs."
+	// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
+	if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
+		return ServerRequestResult{
+			InvalidReason: "userinfo_in_authority",
+		}
+	}
+
+	var url_ *url.URL
+	var requestURI string
+	if rp.Method == "CONNECT" && rp.Protocol == "" {
+		url_ = &url.URL{Host: rp.Authority}
+		requestURI = rp.Authority // mimic HTTP/1 server behavior
+	} else {
+		var err error
+		url_, err = url.ParseRequestURI(rp.Path)
+		if err != nil {
+			return ServerRequestResult{
+				InvalidReason: "bad_path",
+			}
+		}
+		requestURI = rp.Path
+	}
+
+	return ServerRequestResult{
+		URL:           url_,
+		NeedsContinue: needsContinue,
+		RequestURI:    requestURI,
+		Trailer:       trailer,
+	}
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
index d7d4b8b6..32bdf435 100644
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -7,6 +7,7 @@ package proxy
 import (
 	"context"
 	"net"
+	"net/netip"
 	"strings"
 )
 
@@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.
 }
 
 func (p *PerHost) dialerForRequest(host string) Dialer {
-	if ip := net.ParseIP(host); ip != nil {
+	if nip, err := netip.ParseAddr(host); err == nil {
+		ip := net.IP(nip.AsSlice())
 		for _, net := range p.bypassNetworks {
 			if net.Contains(ip) {
 				return p.bypass
@@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) {
 			}
 			continue
 		}
-		if ip := net.ParseIP(host); ip != nil {
-			p.AddIP(ip)
+		if nip, err := netip.ParseAddr(host); err == nil {
+			p.AddIP(net.IP(nip.AsSlice()))
 			continue
 		}
 		if strings.HasPrefix(host, "*.") {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index db8a4148..6a9501ac 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -44,7 +44,7 @@ github.com/hectane/go-acl/api
 # github.com/jedisct1/dlog v0.0.0-20241212093805-3c5fd791b405
 ## explicit; go 1.23
 github.com/jedisct1/dlog
-# github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e
+# github.com/jedisct1/go-clocksmith v0.0.0-20250224222044-e151f21a353a
 ## explicit
 github.com/jedisct1/go-clocksmith
 # github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774
@@ -131,8 +131,8 @@ github.com/smartystreets/goconvey/convey/reporting
 ## explicit; go 1.22
 go.uber.org/mock/mockgen
 go.uber.org/mock/mockgen/model
-# golang.org/x/crypto v0.33.0
-## explicit; go 1.20
+# golang.org/x/crypto v0.35.0
+## explicit; go 1.23.0
 golang.org/x/crypto/blake2b
 golang.org/x/crypto/chacha20
 golang.org/x/crypto/chacha20poly1305
@@ -154,8 +154,8 @@ golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.35.0
-## explicit; go 1.18
+# golang.org/x/net v0.36.0
+## explicit; go 1.23.0
 golang.org/x/net/bpf
 golang.org/x/net/http/httpguts
 golang.org/x/net/http2
@@ -171,8 +171,8 @@ golang.org/x/net/proxy
 # golang.org/x/sync v0.11.0
 ## explicit; go 1.18
 golang.org/x/sync/errgroup
-# golang.org/x/sys v0.30.0
-## explicit; go 1.18
+# golang.org/x/sys v0.31.0
+## explicit; go 1.23.0
 golang.org/x/sys/cpu
 golang.org/x/sys/unix
 golang.org/x/sys/windows

From 0d89626420649f801e5bdb58b62b392449047fba Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Mon, 10 Mar 2025 12:01:55 +0100
Subject: [PATCH 37/48] Don't run the permissions checks on non-Unix platforms

This is way too annoying on Windows systems.
---
 dnscrypt-proxy/common.go           | 31 -------------------------
 dnscrypt-proxy/permcheck_others.go |  7 ++++++
 dnscrypt-proxy/permcheck_unix.go   | 36 ++++++++++++++++++++++++++++++
 3 files changed, 43 insertions(+), 31 deletions(-)
 create mode 100644 dnscrypt-proxy/permcheck_others.go
 create mode 100644 dnscrypt-proxy/permcheck_unix.go

diff --git a/dnscrypt-proxy/common.go b/dnscrypt-proxy/common.go
index 647ccfac..54cac060 100644
--- a/dnscrypt-proxy/common.go
+++ b/dnscrypt-proxy/common.go
@@ -6,12 +6,9 @@ import (
 	"errors"
 	"net"
 	"os"
-	"path"
 	"strconv"
 	"strings"
 	"unicode"
-
-	"github.com/jedisct1/dlog"
 )
 
 type CryptoConstruction uint16
@@ -167,31 +164,3 @@ func ReadTextFile(filename string) (string, error) {
 }
 
 func isDigit(b byte) bool { return b >= '0' && b <= '9' }
-
-func maybeWritableByOtherUsers(p string) (bool, string, error) {
-	p = path.Clean(p)
-	for p != "/" && p != "." {
-		st, err := os.Stat(p)
-		if err != nil {
-			return false, p, err
-		}
-		mode := st.Mode()
-		if mode.Perm()&2 != 0 && !(st.IsDir() && mode&os.ModeSticky == os.ModeSticky) {
-			return true, p, nil
-		}
-		p = path.Dir(p)
-	}
-	return false, "", nil
-}
-
-func WarnIfMaybeWritableByOtherUsers(p string) {
-	if ok, px, err := maybeWritableByOtherUsers(p); ok {
-		if px == p {
-			dlog.Criticalf("[%s] is writable by other system users - If this is not intentional, it is recommended to fix the access permissions", p)
-		} else {
-			dlog.Warnf("[%s] can be modified by other system users because [%s] is writable by other users - If this is not intentional, it is recommended to fix the access permissions", p, px)
-		}
-	} else if err != nil {
-		dlog.Warnf("Error while checking if [%s] is accessible: [%s] : [%s]", p, px, err)
-	}
-}
diff --git a/dnscrypt-proxy/permcheck_others.go b/dnscrypt-proxy/permcheck_others.go
new file mode 100644
index 00000000..98d71642
--- /dev/null
+++ b/dnscrypt-proxy/permcheck_others.go
@@ -0,0 +1,7 @@
+//go:build !unix
+
+package main
+
+func WarnIfMaybeWritableByOtherUsers(p string) {
+	// No-op
+}
diff --git a/dnscrypt-proxy/permcheck_unix.go b/dnscrypt-proxy/permcheck_unix.go
new file mode 100644
index 00000000..89305a23
--- /dev/null
+++ b/dnscrypt-proxy/permcheck_unix.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+	"os"
+	"path"
+
+	"github.com/jedisct1/dlog"
+)
+
+func maybeWritableByOtherUsers(p string) (bool, string, error) {
+	p = path.Clean(p)
+	for p != "/" && p != "." {
+		st, err := os.Stat(p)
+		if err != nil {
+			return false, p, err
+		}
+		mode := st.Mode()
+		if mode.Perm()&2 != 0 && !(st.IsDir() && mode&os.ModeSticky == os.ModeSticky) {
+			return true, p, nil
+		}
+		p = path.Dir(p)
+	}
+	return false, "", nil
+}
+
+func WarnIfMaybeWritableByOtherUsers(p string) {
+	if ok, px, err := maybeWritableByOtherUsers(p); ok {
+		if px == p {
+			dlog.Criticalf("[%s] is writable by other system users - If this is not intentional, it is recommended to fix the access permissions", p)
+		} else {
+			dlog.Warnf("[%s] can be modified by other system users because [%s] is writable by other users - If this is not intentional, it is recommended to fix the access permissions", p, px)
+		}
+	} else if err != nil {
+		dlog.Warnf("Error while checking if [%s] is accessible: [%s] : [%s]", p, px, err)
+	}
+}

From c1d8e5cc57986ac0b46e30a3d417ad4e710bc001 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Mon, 10 Mar 2025 12:08:55 +0100
Subject: [PATCH 38/48] The port number is not required any more with IPv6 and
 forwarding

---
 dnscrypt-proxy/example-forwarding-rules.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/example-forwarding-rules.txt b/dnscrypt-proxy/example-forwarding-rules.txt
index 4577b28b..33807cca 100644
--- a/dnscrypt-proxy/example-forwarding-rules.txt
+++ b/dnscrypt-proxy/example-forwarding-rules.txt
@@ -36,7 +36,7 @@
 # example.com      9.9.9.9,8.8.8.8
 
 ## Forward queries to a resolver using IPv6
-# ipv6.example.com [2001:DB8::42]:53
+# ipv6.example.com [2001:DB8::42]
 
 ## Forward queries for .onion names to a local Tor client
 ## Tor must be configured with the following in the torrc file:

From 9ab4c0b3394b4036e36f246df981dcac8c4eaf4b Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Mon, 10 Mar 2025 12:12:55 +0100
Subject: [PATCH 39/48] Restore the ability to forward to non-standard ports

Older versions of dnscrypt-proxy allowed to include an optional
port number to forward to, but this was not supported any more since
version 2.1.6.

Restore this ability.

Fixes #2802
---
 dnscrypt-proxy/example-forwarding-rules.txt |  4 ++
 dnscrypt-proxy/plugin_forward.go            | 45 +++++++++++++++++----
 2 files changed, 41 insertions(+), 8 deletions(-)

diff --git a/dnscrypt-proxy/example-forwarding-rules.txt b/dnscrypt-proxy/example-forwarding-rules.txt
index 33807cca..e6952ec8 100644
--- a/dnscrypt-proxy/example-forwarding-rules.txt
+++ b/dnscrypt-proxy/example-forwarding-rules.txt
@@ -38,6 +38,10 @@
 ## Forward queries to a resolver using IPv6
 # ipv6.example.com [2001:DB8::42]
 
+## Forward to a non-standard port number
+# x.example.com    192.168.0.1:1053
+# y.example.com    [2001:DB8::42]:1053
+
 ## Forward queries for .onion names to a local Tor client
 ## Tor must be configured with the following in the torrc file:
 ## DNSPort 9053
diff --git a/dnscrypt-proxy/plugin_forward.go b/dnscrypt-proxy/plugin_forward.go
index e3352055..ff144c1a 100644
--- a/dnscrypt-proxy/plugin_forward.go
+++ b/dnscrypt-proxy/plugin_forward.go
@@ -102,14 +102,9 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
 					dlog.Criticalf("Unknown keyword [%s] at line %d", server, 1+lineNo)
 					continue
 				}
-				server = strings.TrimPrefix(server, "[")
-				server = strings.TrimSuffix(server, "]")
-				if ip := net.ParseIP(server); ip != nil {
-					if ip.To4() != nil {
-						server = fmt.Sprintf("%s:%d", server, 53)
-					} else {
-						server = fmt.Sprintf("[%s]:%d", server, 53)
-					}
+				if server, err = normalizeIPAndOptionalPort(server, "53"); err != nil {
+					dlog.Criticalf("Syntax error for a forwarding rule at line %d: %s", 1+lineNo, err)
+					continue
 				}
 				idxServers := -1
 				for i, item := range sequence {
@@ -252,3 +247,37 @@ func (plugin *PluginForward) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
 	}
 	return err
 }
+
+func normalizeIPAndOptionalPort(addr string, defaultPort string) (string, error) {
+	var host, port string
+	var err error
+
+	if strings.HasPrefix(addr, "[") {
+		if !strings.Contains(addr, "]:") {
+			if addr[len(addr)-1] != ']' {
+				return "", fmt.Errorf("invalid IPv6 format: missing closing ']'")
+			}
+			host = addr[1 : len(addr)-1]
+			port = defaultPort
+		} else {
+			host, port, err = net.SplitHostPort(addr)
+			if err != nil {
+				return "", err
+			}
+		}
+	} else {
+		host, port, err = net.SplitHostPort(addr)
+		if err != nil {
+			host = addr
+			port = defaultPort
+		}
+	}
+	ip := net.ParseIP(host)
+	if ip == nil {
+		return "", fmt.Errorf("invalid IP address: [%s]", host)
+	}
+	if ip.To4() != nil {
+		return fmt.Sprintf("%s:%s", ip.String(), port), nil
+	}
+	return fmt.Sprintf("[%s]:%s", ip.String(), port), nil
+}

From 231f438beec7a9e50dadd4e9dd22d50638a4a9d0 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Mon, 10 Mar 2025 23:01:11 +0100
Subject: [PATCH 40/48] Update deps

---
 go.mod                                         | 10 +++++-----
 go.sum                                         | 16 ++++++++--------
 vendor/golang.org/x/sync/errgroup/errgroup.go  |  2 +-
 vendor/golang.org/x/sync/errgroup/go120.go     | 13 -------------
 vendor/golang.org/x/sync/errgroup/pre_go120.go | 14 --------------
 vendor/modules.txt                             | 12 ++++++------
 6 files changed, 20 insertions(+), 47 deletions(-)
 delete mode 100644 vendor/golang.org/x/sync/errgroup/go120.go
 delete mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go

diff --git a/go.mod b/go.mod
index 47164e9d..bf0daa62 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
 module github.com/dnscrypt/dnscrypt-proxy
 
-go 1.24.0
+go 1.24.1
 
 require (
 	github.com/BurntSushi/toml v1.4.0
@@ -22,8 +22,8 @@ require (
 	github.com/opencoff/go-sieve v0.2.1
 	github.com/powerman/check v1.8.0
 	github.com/quic-go/quic-go v0.50.0
-	golang.org/x/crypto v0.35.0
-	golang.org/x/net v0.36.0
+	golang.org/x/crypto v0.36.0
+	golang.org/x/net v0.37.0
 	golang.org/x/sys v0.31.0
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1
 )
@@ -44,8 +44,8 @@ require (
 	go.uber.org/mock v0.5.0 // indirect
 	golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
 	golang.org/x/mod v0.18.0 // indirect
-	golang.org/x/sync v0.11.0 // indirect
-	golang.org/x/text v0.22.0 // indirect
+	golang.org/x/sync v0.12.0 // indirect
+	golang.org/x/text v0.23.0 // indirect
 	golang.org/x/tools v0.22.0 // indirect
 	google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
 	google.golang.org/grpc v1.56.3 // indirect
diff --git a/go.sum b/go.sum
index 2c13d283..2df642bf 100644
--- a/go.sum
+++ b/go.sum
@@ -87,23 +87,23 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
 github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
 go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
-golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
-golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
+golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
+golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
 golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
 golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
-golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
+golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
 golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
 golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
 golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index b8322598..a4ea5d14 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -46,7 +46,7 @@ func (g *Group) done() {
 // returns a non-nil error or the first time Wait returns, whichever occurs
 // first.
 func WithContext(ctx context.Context) (*Group, context.Context) {
-	ctx, cancel := withCancelCause(ctx)
+	ctx, cancel := context.WithCancelCause(ctx)
 	return &Group{cancel: cancel}, ctx
 }
 
diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go
deleted file mode 100644
index f93c740b..00000000
--- a/vendor/golang.org/x/sync/errgroup/go120.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-
-package errgroup
-
-import "context"
-
-func withCancelCause(parent context.Context) (context.Context, func(error)) {
-	return context.WithCancelCause(parent)
-}
diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go
deleted file mode 100644
index 88ce3343..00000000
--- a/vendor/golang.org/x/sync/errgroup/pre_go120.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.20
-
-package errgroup
-
-import "context"
-
-func withCancelCause(parent context.Context) (context.Context, func(error)) {
-	ctx, cancel := context.WithCancel(parent)
-	return ctx, func(error) { cancel() }
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6a9501ac..d44ea78a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -131,7 +131,7 @@ github.com/smartystreets/goconvey/convey/reporting
 ## explicit; go 1.22
 go.uber.org/mock/mockgen
 go.uber.org/mock/mockgen/model
-# golang.org/x/crypto v0.35.0
+# golang.org/x/crypto v0.36.0
 ## explicit; go 1.23.0
 golang.org/x/crypto/blake2b
 golang.org/x/crypto/chacha20
@@ -154,7 +154,7 @@ golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.36.0
+# golang.org/x/net v0.37.0
 ## explicit; go 1.23.0
 golang.org/x/net/bpf
 golang.org/x/net/http/httpguts
@@ -168,8 +168,8 @@ golang.org/x/net/internal/socks
 golang.org/x/net/ipv4
 golang.org/x/net/ipv6
 golang.org/x/net/proxy
-# golang.org/x/sync v0.11.0
-## explicit; go 1.18
+# golang.org/x/sync v0.12.0
+## explicit; go 1.23.0
 golang.org/x/sync/errgroup
 # golang.org/x/sys v0.31.0
 ## explicit; go 1.23.0
@@ -180,8 +180,8 @@ golang.org/x/sys/windows/registry
 golang.org/x/sys/windows/svc
 golang.org/x/sys/windows/svc/eventlog
 golang.org/x/sys/windows/svc/mgr
-# golang.org/x/text v0.22.0
-## explicit; go 1.18
+# golang.org/x/text v0.23.0
+## explicit; go 1.23.0
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi

From df9520e5975bb54e53d039b6d5892ccf4a8d00b2 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Thu, 13 Mar 2025 23:02:34 +0100
Subject: [PATCH 41/48] Missing go:build guard

---
 dnscrypt-proxy/permcheck_unix.go | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/dnscrypt-proxy/permcheck_unix.go b/dnscrypt-proxy/permcheck_unix.go
index 89305a23..9ca3b8b1 100644
--- a/dnscrypt-proxy/permcheck_unix.go
+++ b/dnscrypt-proxy/permcheck_unix.go
@@ -1,3 +1,5 @@
+//go:build unix
+
 package main
 
 import (

From 3771f06828a00b1b34ab14d1a61398bcaaf389cd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 14 Mar 2025 03:51:47 +0000
Subject: [PATCH 42/48] Bump softprops/action-gh-release

Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 670eb2f21a607fad397653d7e45de8f17ade7cc4 to ab50eebb6488051c6788d97fa95232267c6a4e23.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/670eb2f21a607fad397653d7e45de8f17ade7cc4...ab50eebb6488051c6788d97fa95232267c6a4e23)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] 
---
 .github/workflows/releases.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index 420f6013..60bb9244 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -83,7 +83,7 @@ jobs:
           prerelease: false
 
       - name: Upload release assets
-        uses: softprops/action-gh-release@670eb2f21a607fad397653d7e45de8f17ade7cc4
+        uses: softprops/action-gh-release@ab50eebb6488051c6788d97fa95232267c6a4e23
         if: startsWith(github.ref, 'refs/tags/')
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

From 5d2519e2c195ac17d3c36ba531af502dfa7fc321 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 21 Mar 2025 12:04:37 +0100
Subject: [PATCH 43/48] Update deps, especially quic-go

---
 go.mod                                        |  10 +-
 go.sum                                        |  20 +-
 vendor/github.com/BurntSushi/toml/README.md   |   2 +-
 vendor/github.com/BurntSushi/toml/decode.go   |  33 +-
 vendor/github.com/BurntSushi/toml/encode.go   |  46 +-
 vendor/github.com/BurntSushi/toml/error.go    |  69 +--
 vendor/github.com/BurntSushi/toml/lex.go      |  33 +-
 vendor/github.com/BurntSushi/toml/meta.go     |   3 -
 vendor/github.com/BurntSushi/toml/parse.go    |  17 +-
 vendor/github.com/miekg/dns/README.md         |   5 +-
 vendor/github.com/miekg/dns/edns.go           |  37 +-
 vendor/github.com/miekg/dns/scan_rr.go        |  10 +
 vendor/github.com/miekg/dns/svcb.go           |  12 +-
 vendor/github.com/miekg/dns/types.go          |  10 +
 vendor/github.com/miekg/dns/version.go        |   2 +-
 vendor/github.com/miekg/dns/zduplicate.go     |  17 +
 vendor/github.com/miekg/dns/zmsg.go           |  19 +
 vendor/github.com/miekg/dns/ztypes.go         |  15 +
 .../ackhandler/sent_packet_handler.go         |   7 +-
 vendor/golang.org/x/mod/LICENSE               |   4 +-
 vendor/golang.org/x/mod/modfile/read.go       |  12 +-
 vendor/golang.org/x/mod/modfile/rule.go       |  80 ++-
 vendor/golang.org/x/mod/modfile/work.go       |   2 +-
 vendor/golang.org/x/mod/module/module.go      |   2 -
 vendor/golang.org/x/tools/LICENSE             |   4 +-
 .../x/tools/go/ast/astutil/enclosing.go       |  24 +-
 .../x/tools/go/ast/astutil/imports.go         |   5 +
 .../golang.org/x/tools/go/ast/astutil/util.go |  11 +-
 .../x/tools/go/ast/inspector/inspector.go     | 152 +++--
 .../x/tools/go/ast/inspector/iter.go          |  85 +++
 .../x/tools/go/ast/inspector/typeof.go        |   5 +-
 .../x/tools/go/ast/inspector/walk.go          | 341 +++++++++++
 .../x/tools/go/gcexportdata/gcexportdata.go   | 117 ++--
 .../tools/go/internal/packagesdriver/sizes.go |  54 --
 vendor/golang.org/x/tools/go/packages/doc.go  |  15 +-
 .../x/tools/go/packages/external.go           |  15 +-
 .../golang.org/x/tools/go/packages/golist.go  |  90 ++-
 .../x/tools/go/packages/loadmode_string.go    |  73 ++-
 .../x/tools/go/packages/packages.go           | 413 ++++++++------
 .../golang.org/x/tools/go/packages/visit.go   |   9 +
 .../x/tools/go/types/objectpath/objectpath.go | 186 ++++--
 .../x/tools/go/types/typeutil/callee.go       |  68 +++
 .../x/tools/go/types/typeutil/imports.go      |  30 +
 .../x/tools/go/types/typeutil/map.go          | 470 +++++++++++++++
 .../tools/go/types/typeutil/methodsetcache.go |  71 +++
 .../x/tools/go/types/typeutil/ui.go           |  53 ++
 .../x/tools/internal/aliases/aliases.go       |  10 +-
 .../x/tools/internal/aliases/aliases_go121.go |  31 -
 .../x/tools/internal/aliases/aliases_go122.go |  55 +-
 .../x/tools/internal/astutil/edge/edge.go     | 295 ++++++++++
 .../x/tools/internal/gcimporter/bimport.go    |  61 --
 .../x/tools/internal/gcimporter/exportdata.go | 448 +++++++++++++--
 .../x/tools/internal/gcimporter/gcimporter.go | 182 +-----
 .../x/tools/internal/gcimporter/iexport.go    | 284 +++++++++-
 .../x/tools/internal/gcimporter/iimport.go    |  55 +-
 .../internal/gcimporter/iimport_go122.go      |  53 ++
 .../internal/gcimporter/newInterface10.go     |  22 -
 .../internal/gcimporter/newInterface11.go     |  14 -
 .../tools/internal/gcimporter/predeclared.go  |  91 +++
 .../x/tools/internal/gcimporter/support.go    |  30 +
 .../internal/gcimporter/support_go118.go      |  34 --
 .../x/tools/internal/gcimporter/unified_no.go |  10 -
 .../tools/internal/gcimporter/unified_yes.go  |  10 -
 .../tools/internal/gcimporter/ureader_yes.go  |  59 +-
 .../x/tools/internal/gocommand/invoke.go      |  79 ++-
 .../internal/gocommand/invoke_notunix.go      |  13 +
 .../x/tools/internal/gocommand/invoke_unix.go |  13 +
 .../x/tools/internal/imports/fix.go           | 536 +++++++++---------
 .../x/tools/internal/imports/imports.go       |  33 +-
 .../x/tools/internal/imports/mod.go           |  17 +-
 .../x/tools/internal/imports/source.go        |  63 ++
 .../x/tools/internal/imports/source_env.go    | 129 +++++
 .../tools/internal/imports/source_modindex.go | 103 ++++
 .../x/tools/internal/modindex/directories.go  | 135 +++++
 .../x/tools/internal/modindex/index.go        | 266 +++++++++
 .../x/tools/internal/modindex/lookup.go       | 178 ++++++
 .../x/tools/internal/modindex/modindex.go     | 164 ++++++
 .../x/tools/internal/modindex/symbols.go      | 218 +++++++
 .../x/tools/internal/modindex/types.go        |  25 +
 .../internal/packagesinternal/packages.go     |   6 +-
 .../x/tools/internal/pkgbits/decoder.go       |  38 +-
 .../x/tools/internal/pkgbits/encoder.go       |  43 +-
 .../x/tools/internal/pkgbits/frames_go1.go    |  21 -
 .../x/tools/internal/pkgbits/frames_go17.go   |  28 -
 .../x/tools/internal/pkgbits/support.go       |   2 +-
 .../x/tools/internal/pkgbits/sync.go          |  23 +
 .../internal/pkgbits/syncmarker_string.go     |   7 +-
 .../x/tools/internal/pkgbits/version.go       |  85 +++
 .../x/tools/internal/stdlib/manifest.go       | 330 +++++++++++
 .../internal/tokeninternal/tokeninternal.go   | 137 -----
 .../x/tools/internal/typeparams/common.go     |  68 +++
 .../x/tools/internal/typeparams/coretype.go   | 155 +++++
 .../x/tools/internal/typeparams/free.go       | 131 +++++
 .../x/tools/internal/typeparams/normalize.go  | 218 +++++++
 .../x/tools/internal/typeparams/termlist.go   | 163 ++++++
 .../x/tools/internal/typeparams/typeterm.go   | 169 ++++++
 .../x/tools/internal/typesinternal/element.go | 133 +++++
 .../tools/internal/typesinternal/errorcode.go |  10 +-
 .../tools/internal/typesinternal/qualifier.go |  46 ++
 .../x/tools/internal/typesinternal/recv.go    |  11 +-
 .../x/tools/internal/typesinternal/types.go   |  77 +++
 .../x/tools/internal/typesinternal/varkind.go |  40 ++
 .../tools/internal/typesinternal/zerovalue.go | 392 +++++++++++++
 .../x/tools/internal/versions/toolchain.go    |  14 -
 .../internal/versions/toolchain_go119.go      |  14 -
 .../internal/versions/toolchain_go120.go      |  14 -
 .../internal/versions/toolchain_go121.go      |  14 -
 .../x/tools/internal/versions/types.go        |  28 +-
 .../x/tools/internal/versions/types_go121.go  |  30 -
 .../x/tools/internal/versions/types_go122.go  |  41 --
 vendor/modules.txt                            |  22 +-
 111 files changed, 7205 insertions(+), 1779 deletions(-)
 create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/iter.go
 create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/walk.go
 delete mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
 create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go
 create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go
 create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go
 create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
 create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go
 delete mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
 create mode 100644 vendor/golang.org/x/tools/internal/astutil/edge/edge.go
 create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
 delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
 delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
 create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
 create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support.go
 delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
 delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
 delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
 create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
 create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
 create mode 100644 vendor/golang.org/x/tools/internal/imports/source.go
 create mode 100644 vendor/golang.org/x/tools/internal/imports/source_env.go
 create mode 100644 vendor/golang.org/x/tools/internal/imports/source_modindex.go
 create mode 100644 vendor/golang.org/x/tools/internal/modindex/directories.go
 create mode 100644 vendor/golang.org/x/tools/internal/modindex/index.go
 create mode 100644 vendor/golang.org/x/tools/internal/modindex/lookup.go
 create mode 100644 vendor/golang.org/x/tools/internal/modindex/modindex.go
 create mode 100644 vendor/golang.org/x/tools/internal/modindex/symbols.go
 create mode 100644 vendor/golang.org/x/tools/internal/modindex/types.go
 delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
 delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
 create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/version.go
 delete mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
 create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go
 create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go
 create mode 100644 vendor/golang.org/x/tools/internal/typeparams/free.go
 create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go
 create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go
 create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go
 create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/element.go
 create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
 create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/varkind.go
 create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
 delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go
 delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
 delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
 delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
 delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go
 delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go

diff --git a/go.mod b/go.mod
index bf0daa62..54bb6326 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/dnscrypt/dnscrypt-proxy
 go 1.24.1
 
 require (
-	github.com/BurntSushi/toml v1.4.0
+	github.com/BurntSushi/toml v1.5.0
 	github.com/VividCortex/ewma v1.2.0
 	github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
 	github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
@@ -18,10 +18,10 @@ require (
 	github.com/k-sone/critbitgo v1.4.0
 	github.com/kardianos/service v1.2.2
 	github.com/lifenjoiner/dhcpdns v0.0.7
-	github.com/miekg/dns v1.1.63
+	github.com/miekg/dns v1.1.64
 	github.com/opencoff/go-sieve v0.2.1
 	github.com/powerman/check v1.8.0
-	github.com/quic-go/quic-go v0.50.0
+	github.com/quic-go/quic-go v0.50.1
 	golang.org/x/crypto v0.36.0
 	golang.org/x/net v0.37.0
 	golang.org/x/sys v0.31.0
@@ -43,10 +43,10 @@ require (
 	github.com/smartystreets/goconvey v1.8.1 // indirect
 	go.uber.org/mock v0.5.0 // indirect
 	golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
-	golang.org/x/mod v0.18.0 // indirect
+	golang.org/x/mod v0.23.0 // indirect
 	golang.org/x/sync v0.12.0 // indirect
 	golang.org/x/text v0.23.0 // indirect
-	golang.org/x/tools v0.22.0 // indirect
+	golang.org/x/tools v0.30.0 // indirect
 	google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
 	google.golang.org/grpc v1.56.3 // indirect
 	google.golang.org/protobuf v1.34.2 // indirect
diff --git a/go.sum b/go.sum
index 2df642bf..58dd76de 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
-github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
 github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
 github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -57,8 +57,8 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
 github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
 github.com/lifenjoiner/dhcpdns v0.0.7 h1:VJM2aFWHU9V7M5v4UYYNaHhIHZkbdvSI6WGGpq6/TNQ=
 github.com/lifenjoiner/dhcpdns v0.0.7/go.mod h1:BixeaGeafYzDIuDCYIUbSOdi4m+TScpzI9cZGYgzgSk=
-github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
-github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
+github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ=
+github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
 github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
 github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
 github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
@@ -75,8 +75,8 @@ github.com/powerman/deepequal v0.1.0 h1:sVwtyTsBuYIvdbLR1O2wzRY63YgPqdGZmk/o80l+
 github.com/powerman/deepequal v0.1.0/go.mod h1:3k7aG/slufBhUANdN67o/UPg8i5YaiJ6FmibWX0cn04=
 github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
 github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
-github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo=
-github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
+github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
+github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
 github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
 github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
 github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
@@ -91,8 +91,8 @@ golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
 golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
-golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
-golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
+golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
 golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
 golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
 golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
@@ -106,8 +106,8 @@ golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
 golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
 golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
 golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
-golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
+golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 639e6c39..235496ee 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages.
 
 Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
 
-Documentation: https://godocs.io/github.com/BurntSushi/toml
+Documentation: https://pkg.go.dev/github.com/BurntSushi/toml
 
 See the [releases page](https://github.com/BurntSushi/toml/releases) for a
 changelog; this information is also in the git tag annotations (e.g. `git show
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index 7aaf462c..3fa516ca 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
 	return md.unify(primValue.undecoded, rvalue(v))
 }
 
+// markDecodedRecursive is a helper to mark any key under the given tmap as
+// decoded, recursing as needed
+func markDecodedRecursive(md *MetaData, tmap map[string]any) {
+	for key := range tmap {
+		md.decoded[md.context.add(key).String()] = struct{}{}
+		if tmap, ok := tmap[key].(map[string]any); ok {
+			md.context = append(md.context, key)
+			markDecodedRecursive(md, tmap)
+			md.context = md.context[0 : len(md.context)-1]
+		}
+	}
+}
+
 // unify performs a sort of type unification based on the structure of `rv`,
 // which is the client representation.
 //
@@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error {
 		if err != nil {
 			return md.parseErr(err)
 		}
+		// Assume the Unmarshaler decoded everything, so mark all keys under
+		// this table as decoded.
+		if tmap, ok := data.(map[string]any); ok {
+			markDecodedRecursive(md, tmap)
+		}
+		if aot, ok := data.([]map[string]any); ok {
+			for _, tmap := range aot {
+				markDecodedRecursive(md, tmap)
+			}
+		}
 		return nil
 	}
 	if v, ok := rvi.(encoding.TextUnmarshaler); ok {
@@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error {
 
 func (md *MetaData) parseErr(err error) error {
 	k := md.context.String()
+	d := string(md.data)
 	return ParseError{
-		LastKey:  k,
-		Position: md.keyInfo[k].pos,
-		Line:     md.keyInfo[k].pos.Line,
+		Message:  err.Error(),
 		err:      err,
-		input:    string(md.data),
+		LastKey:  k,
+		Position: md.keyInfo[k].pos.withCol(d),
+		Line:     md.keyInfo[k].pos.Line,
+		input:    d,
 	}
 }
 
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 73366c0d..ac196e7d 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
 
 	// Sort keys so that we have deterministic output. And write keys directly
 	// underneath this key first, before writing sub-structs or sub-maps.
-	var mapKeysDirect, mapKeysSub []string
+	var mapKeysDirect, mapKeysSub []reflect.Value
 	for _, mapKey := range rv.MapKeys() {
-		k := mapKey.String()
 		if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
-			mapKeysSub = append(mapKeysSub, k)
+			mapKeysSub = append(mapKeysSub, mapKey)
 		} else {
-			mapKeysDirect = append(mapKeysDirect, k)
+			mapKeysDirect = append(mapKeysDirect, mapKey)
 		}
 	}
 
-	var writeMapKeys = func(mapKeys []string, trailC bool) {
-		sort.Strings(mapKeys)
+	writeMapKeys := func(mapKeys []reflect.Value, trailC bool) {
+		sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() })
 		for i, mapKey := range mapKeys {
-			val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
+			val := eindirect(rv.MapIndex(mapKey))
 			if isNil(val) {
 				continue
 			}
 
 			if inline {
-				enc.writeKeyValue(Key{mapKey}, val, true)
+				enc.writeKeyValue(Key{mapKey.String()}, val, true)
 				if trailC || i != len(mapKeys)-1 {
 					enc.wf(", ")
 				}
 			} else {
-				enc.encode(key.add(mapKey), val)
+				enc.encode(key.add(mapKey.String()), val)
 			}
 		}
 	}
@@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
 	}
 }
 
-const is32Bit = (32 << (^uint(0) >> 63)) == 32
-
 func pointerTo(t reflect.Type) reflect.Type {
 	if t.Kind() == reflect.Ptr {
 		return pointerTo(t.Elem())
@@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
 
 			frv := eindirect(rv.Field(i))
 
-			if is32Bit {
-				// Copy so it works correct on 32bit archs; not clear why this
-				// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
-				// This also works fine on 64bit, but 32bit archs are somewhat
-				// rare and this is a wee bit faster.
-				copyStart := make([]int, len(start))
-				copy(copyStart, start)
-				start = copyStart
-			}
+			// Need to make a copy because ... ehm, I don't know why... I guess
+			// allocating a new array can cause it to fail(?)
+			//
+			// Done for: https://github.com/BurntSushi/toml/issues/430
+			// Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314
+			copyStart := make([]int, len(start))
+			copy(copyStart, start)
+			start = copyStart
 
 			// Treat anonymous struct fields with tag names as though they are
 			// not anonymous, like encoding/json does.
@@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
 	}
 	addFields(rt, rv, nil)
 
-	writeFields := func(fields [][]int) {
+	writeFields := func(fields [][]int, totalFields int) {
 		for _, fieldIndex := range fields {
 			fieldType := rt.FieldByIndex(fieldIndex)
 			fieldVal := rv.FieldByIndex(fieldIndex)
@@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
 
 			if inline {
 				enc.writeKeyValue(Key{keyName}, fieldVal, true)
-				if fieldIndex[0] != len(fields)-1 {
+				if fieldIndex[0] != totalFields-1 {
 					enc.wf(", ")
 				}
 			} else {
@@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
 	if inline {
 		enc.wf("{")
 	}
-	writeFields(fieldsDirect)
-	writeFields(fieldsSub)
+
+	l := len(fieldsDirect) + len(fieldsSub)
+	writeFields(fieldsDirect, l)
+	writeFields(fieldsSub, l)
 	if inline {
 		enc.wf("}")
 	}
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
index b45a3f45..b7077d3a 100644
--- a/vendor/github.com/BurntSushi/toml/error.go
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -67,21 +67,36 @@ type ParseError struct {
 // Position of an error.
 type Position struct {
 	Line  int // Line number, starting at 1.
+	Col   int // Error column, starting at 1.
 	Start int // Start of error, as byte offset starting at 0.
-	Len   int // Lenght in bytes.
+	Len   int // Length of the error in bytes.
+}
+
+func (p Position) withCol(tomlFile string) Position {
+	var (
+		pos   int
+		lines = strings.Split(tomlFile, "\n")
+	)
+	for i := range lines {
+		ll := len(lines[i]) + 1 // +1 for the removed newline
+		if pos+ll >= p.Start {
+			p.Col = p.Start - pos + 1
+			if p.Col < 1 { // Should never happen, but just in case.
+				p.Col = 1
+			}
+			break
+		}
+		pos += ll
+	}
+	return p
 }
 
 func (pe ParseError) Error() string {
-	msg := pe.Message
-	if msg == "" { // Error from errorf()
-		msg = pe.err.Error()
-	}
-
 	if pe.LastKey == "" {
-		return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
+		return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message)
 	}
 	return fmt.Sprintf("toml: line %d (last key %q): %s",
-		pe.Position.Line, pe.LastKey, msg)
+		pe.Position.Line, pe.LastKey, pe.Message)
 }
 
 // ErrorWithPosition returns the error with detailed location context.
@@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string {
 		return pe.Error()
 	}
 
-	var (
-		lines = strings.Split(pe.input, "\n")
-		col   = pe.column(lines)
-		b     = new(strings.Builder)
-	)
-
-	msg := pe.Message
-	if msg == "" {
-		msg = pe.err.Error()
-	}
-
 	// TODO: don't show control characters as literals? This may not show up
 	// well everywhere.
 
+	var (
+		lines = strings.Split(pe.input, "\n")
+		b     = new(strings.Builder)
+	)
 	if pe.Position.Len == 1 {
 		fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
-			msg, pe.Position.Line, col+1)
+			pe.Message, pe.Position.Line, pe.Position.Col)
 	} else {
 		fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
-			msg, pe.Position.Line, col, col+pe.Position.Len)
+			pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1)
 	}
 	if pe.Position.Line > 2 {
 		fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
@@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string {
 	diff := len(expanded) - len(lines[pe.Position.Line-1])
 
 	fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
-	fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
+	fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len))
 	return b.String()
 }
 
@@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string {
 	return m
 }
 
-func (pe ParseError) column(lines []string) int {
-	var pos, col int
-	for i := range lines {
-		ll := len(lines[i]) + 1 // +1 for the removed newline
-		if pos+ll >= pe.Position.Start {
-			col = pe.Position.Start - pos
-			if col < 0 { // Should never happen, but just in case.
-				col = 0
-			}
-			break
-		}
-		pos += ll
-	}
-
-	return col
-}
-
 func expandTab(s string) string {
 	var (
 		b    strings.Builder
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index a1016d98..1c3b4770 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
 func (lx *lexer) errorf(format string, values ...any) stateFn {
 	if lx.atEOF {
 		pos := lx.getPos()
-		pos.Line--
+		if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' {
+			pos.Line--
+		}
 		pos.Len = 1
 		pos.Start = lx.pos - 1
 		lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
@@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn {
 		lx.emit(itemKeyEnd)
 		return lexSkip(lx, lexValue)
 	default:
+		if r == '\n' {
+			return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r))
+		}
 		return lx.errorf("expected '.' or '=', but got %q instead", r)
 	}
 }
@@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn {
 	if r == eof {
 		return lx.errorf("unexpected EOF; expected value")
 	}
+	if r == '\n' {
+		return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r))
+	}
 	return lx.errorf("expected value but found %q instead", r)
 }
 
@@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
 	case 'x':
 		r = lx.peek()
 		if !isHex(r) {
-			lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
+			lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r)
 		}
 		return lexHexInteger
 	}
@@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' }
 func isOctal(r rune) bool  { return r >= '0' && r <= '7' }
 func isHex(r rune) bool    { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
 func isBareKeyChar(r rune, tomlNext bool) bool {
-	if tomlNext {
-		return (r >= 'A' && r <= 'Z') ||
-			(r >= 'a' && r <= 'z') ||
-			(r >= '0' && r <= '9') ||
-			r == '_' || r == '-' ||
-			r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
-			(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
-			(r >= 0x037f && r <= 0x1fff) ||
-			(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
-			(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
-			(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
-			(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
-			(r >= 0x10000 && r <= 0xeffff)
-	}
-
-	return (r >= 'A' && r <= 'Z') ||
-		(r >= 'a' && r <= 'z') ||
-		(r >= '0' && r <= '9') ||
-		r == '_' || r == '-'
+	return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') ||
+		(r >= '0' && r <= '9') || r == '_' || r == '-'
 }
diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go
index e6145373..0d337026 100644
--- a/vendor/github.com/BurntSushi/toml/meta.go
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string {
 
 // Like append(), but only increase the cap by 1.
 func (k Key) add(piece string) Key {
-	if cap(k) > len(k) {
-		return append(k, piece)
-	}
 	newKey := make(Key, len(k)+1)
 	copy(newKey, k)
 	newKey[len(k)] = piece
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index 11ac3108..e3ea8a9a 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) {
 	// it anyway.
 	if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
 		data = data[2:]
-		//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
 	} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
 		data = data[3:]
 	}
@@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) {
 	if i := strings.IndexRune(data[:ex], 0); i > -1 {
 		return nil, ParseError{
 			Message:  "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
-			Position: Position{Line: 1, Start: i, Len: 1},
+			Position: Position{Line: 1, Col: 1, Start: i, Len: 1},
 			Line:     1,
 			input:    data,
 		}
@@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) {
 
 func (p *parser) panicErr(it item, err error) {
 	panic(ParseError{
+		Message:  err.Error(),
 		err:      err,
-		Position: it.pos,
+		Position: it.pos.withCol(p.lx.input),
 		Line:     it.pos.Len,
 		LastKey:  p.current(),
 	})
@@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) {
 func (p *parser) panicItemf(it item, format string, v ...any) {
 	panic(ParseError{
 		Message:  fmt.Sprintf(format, v...),
-		Position: it.pos,
+		Position: it.pos.withCol(p.lx.input),
 		Line:     it.pos.Len,
 		LastKey:  p.current(),
 	})
@@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) {
 func (p *parser) panicf(format string, v ...any) {
 	panic(ParseError{
 		Message:  fmt.Sprintf(format, v...),
-		Position: p.pos,
+		Position: p.pos.withCol(p.lx.input),
 		Line:     p.pos.Line,
 		LastKey:  p.current(),
 	})
@@ -123,10 +123,11 @@ func (p *parser) next() item {
 	if it.typ == itemError {
 		if it.err != nil {
 			panic(ParseError{
-				Position: it.pos,
+				Message:  it.err.Error(),
+				err:      it.err,
+				Position: it.pos.withCol(p.lx.input),
 				Line:     it.pos.Line,
 				LastKey:  p.current(),
-				err:      it.err,
 			})
 		}
 
@@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool {
 			}
 		}
 
-		// isHexis a superset of all the permissable characters surrounding an
+		// isHex is a superset of all the permissible characters surrounding an
 		// underscore.
 		accept = isHex(r)
 	}
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 9831c37b..0e42858a 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -86,7 +86,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:
 * https://linuxcontainers.org/incus/
 * https://ifconfig.es
 * https://github.com/zmap/zdns
-
+* https://framagit.org/bortzmeyer/check-soa
 
 Send pull request if you want to be listed here.
 
@@ -193,6 +193,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
 * 9460 - Service Binding and Parameter Specification via the DNS
 * 9461 - Service Binding Mapping for DNS Servers
 * 9462 - Discovery of Designated Resolvers
+* 9460 - SVCB and HTTPS Records
+* 9606 - DNS Resolver Information
+* Draft - Compact Denial of Existence in DNSSEC
 
 ## Loosely Based Upon
 
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 0447fd82..91793b90 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -27,6 +27,7 @@ const (
 	EDNS0LOCALSTART   = 0xFDE9  // Beginning of range reserved for local/experimental use (See RFC 6891)
 	EDNS0LOCALEND     = 0xFFFE  // End of range reserved for local/experimental use (See RFC 6891)
 	_DO               = 1 << 15 // DNSSEC OK
+	_CO               = 1 << 14 // Compact Answers OK
 )
 
 // makeDataOpt is used to unpack the EDNS0 option(s) from a message.
@@ -75,7 +76,11 @@ type OPT struct {
 func (rr *OPT) String() string {
 	s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
 	if rr.Do() {
-		s += "flags: do; "
+		if rr.Co() {
+			s += "flags: do, co; "
+		} else {
+			s += "flags: do; "
+		}
 	} else {
 		s += "flags:; "
 	}
@@ -195,14 +200,34 @@ func (rr *OPT) SetDo(do ...bool) {
 	}
 }
 
-// Z returns the Z part of the OPT RR as a uint16 with only the 15 least significant bits used.
-func (rr *OPT) Z() uint16 {
-	return uint16(rr.Hdr.Ttl & 0x7FFF)
+// Co returns the value of the CO (Compact Answers OK) bit.
+func (rr *OPT) Co() bool {
+	return rr.Hdr.Ttl&_CO == _CO
 }
 
-// SetZ sets the Z part of the OPT RR, note only the 15 least significant bits of z are used.
+// SetCo sets the CO (Compact Answers OK) bit.
+// If we pass an argument, set the CO bit to that value.
+// It is possible to pass 2 or more arguments, but they will be ignored.
+func (rr *OPT) SetCo(co ...bool) {
+	if len(co) == 1 {
+		if co[0] {
+			rr.Hdr.Ttl |= _CO
+		} else {
+			rr.Hdr.Ttl &^= _CO
+		}
+	} else {
+		rr.Hdr.Ttl |= _CO
+	}
+}
+
+// Z returns the Z part of the OPT RR as a uint16 with only the 14 least significant bits used.
+func (rr *OPT) Z() uint16 {
+	return uint16(rr.Hdr.Ttl & 0x3FFF)
+}
+
+// SetZ sets the Z part of the OPT RR, note only the 14 least significant bits of z are used.
 func (rr *OPT) SetZ(z uint16) {
-	rr.Hdr.Ttl = rr.Hdr.Ttl&^0x7FFF | uint32(z&0x7FFF)
+	rr.Hdr.Ttl = rr.Hdr.Ttl&^0x3FFF | uint32(z&0x3FFF)
 }
 
 // EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index c1a76995..ac885f66 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -1620,6 +1620,16 @@ func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
 	return nil
 }
 
+// Uses the same format as TXT
+func (rr *RESINFO) parse(c *zlexer, o string) *ParseError {
+	s, e := endingToTxtSlice(c, "bad RESINFO Resinfo")
+	if e != nil {
+		return e
+	}
+	rr.Txt = s
+	return nil
+}
+
 func (rr *URI) parse(c *zlexer, o string) *ParseError {
 	l, _ := c.Next()
 	i, e := strconv.ParseUint(l.token, 10, 16)
diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go
index 310c7d11..d1baeea9 100644
--- a/vendor/github.com/miekg/dns/svcb.go
+++ b/vendor/github.com/miekg/dns/svcb.go
@@ -214,11 +214,7 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue {
 	}
 }
 
-// SVCB RR. See RFC xxxx (https://tools.ietf.org/html/draft-ietf-dnsop-svcb-https-08).
-//
-// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
-// The API, including constants and types related to SVCBKeyValues, may
-// change in future versions in accordance with the latest drafts.
+// SVCB RR. See RFC 9460.
 type SVCB struct {
 	Hdr      RR_Header
 	Priority uint16         // If zero, Value must be empty or discarded by the user of this library
@@ -226,12 +222,8 @@ type SVCB struct {
 	Value    []SVCBKeyValue `dns:"pairs"`
 }
 
-// HTTPS RR. Everything valid for SVCB applies to HTTPS as well.
+// HTTPS RR. See RFC 9460. Everything valid for SVCB applies to HTTPS as well.
 // Except that the HTTPS record is intended for use with the HTTP and HTTPS protocols.
-//
-// NOTE: The HTTPS/SVCB RFCs are in the draft stage.
-// The API, including constants and types related to SVCBKeyValues, may
-// change in future versions in accordance with the latest drafts.
 type HTTPS struct {
 	SVCB
 }
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index 7a34c14c..7e945b51 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -101,6 +101,7 @@ const (
 	TypeCAA        uint16 = 257
 	TypeAVC        uint16 = 258
 	TypeAMTRELAY   uint16 = 260
+	TypeRESINFO    uint16 = 261
 
 	TypeTKEY uint16 = 249
 	TypeTSIG uint16 = 250
@@ -1508,6 +1509,15 @@ func (rr *ZONEMD) String() string {
 		" " + rr.Digest
 }
 
+// RESINFO RR. See RFC 9606.
+
+type RESINFO struct {
+	Hdr RR_Header
+	Txt []string `dns:"txt"`
+}
+
+func (rr *RESINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
 // APL RR. See RFC 3123.
 type APL struct {
 	Hdr      RR_Header
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index e290e3df..384c3eb1 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
 import "fmt"
 
 // Version is current version of this library.
-var Version = v{1, 1, 63}
+var Version = v{1, 1, 64}
 
 // v holds the version of this library.
 type v struct {
diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
index 330c0539..ebd9e029 100644
--- a/vendor/github.com/miekg/dns/zduplicate.go
+++ b/vendor/github.com/miekg/dns/zduplicate.go
@@ -957,6 +957,23 @@ func (r1 *PX) isDuplicate(_r2 RR) bool {
 	return true
 }
 
+func (r1 *RESINFO) isDuplicate(_r2 RR) bool {
+	r2, ok := _r2.(*RESINFO)
+	if !ok {
+		return false
+	}
+	_ = r2
+	if len(r1.Txt) != len(r2.Txt) {
+		return false
+	}
+	for i := 0; i < len(r1.Txt); i++ {
+		if r1.Txt[i] != r2.Txt[i] {
+			return false
+		}
+	}
+	return true
+}
+
 func (r1 *RFC3597) isDuplicate(_r2 RR) bool {
 	r2, ok := _r2.(*RFC3597)
 	if !ok {
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index 5a6cf4c6..cc09810f 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -762,6 +762,14 @@ func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress boo
 	return off, nil
 }
 
+func (rr *RESINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+	off, err = packStringTxt(rr.Txt, msg, off)
+	if err != nil {
+		return off, err
+	}
+	return off, nil
+}
+
 func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
 	off, err = packStringHex(rr.Rdata, msg, off)
 	if err != nil {
@@ -2353,6 +2361,17 @@ func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
 	return off, nil
 }
 
+func (rr *RESINFO) unpack(msg []byte, off int) (off1 int, err error) {
+	rdStart := off
+	_ = rdStart
+
+	rr.Txt, off, err = unpackStringTxt(msg, off)
+	if err != nil {
+		return off, err
+	}
+	return off, nil
+}
+
 func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
 	rdStart := off
 	_ = rdStart
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
index 11f13ecf..cea79ae7 100644
--- a/vendor/github.com/miekg/dns/ztypes.go
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -66,6 +66,7 @@ var TypeToRR = map[uint16]func() RR{
 	TypeOPT:        func() RR { return new(OPT) },
 	TypePTR:        func() RR { return new(PTR) },
 	TypePX:         func() RR { return new(PX) },
+	TypeRESINFO:    func() RR { return new(RESINFO) },
 	TypeRKEY:       func() RR { return new(RKEY) },
 	TypeRP:         func() RR { return new(RP) },
 	TypeRRSIG:      func() RR { return new(RRSIG) },
@@ -154,6 +155,7 @@ var TypeToString = map[uint16]string{
 	TypeOPT:        "OPT",
 	TypePTR:        "PTR",
 	TypePX:         "PX",
+	TypeRESINFO:    "RESINFO",
 	TypeRKEY:       "RKEY",
 	TypeRP:         "RP",
 	TypeRRSIG:      "RRSIG",
@@ -238,6 +240,7 @@ func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
 func (rr *OPT) Header() *RR_Header        { return &rr.Hdr }
 func (rr *PTR) Header() *RR_Header        { return &rr.Hdr }
 func (rr *PX) Header() *RR_Header         { return &rr.Hdr }
+func (rr *RESINFO) Header() *RR_Header    { return &rr.Hdr }
 func (rr *RFC3597) Header() *RR_Header    { return &rr.Hdr }
 func (rr *RKEY) Header() *RR_Header       { return &rr.Hdr }
 func (rr *RP) Header() *RR_Header         { return &rr.Hdr }
@@ -622,6 +625,14 @@ func (rr *PX) len(off int, compression map[string]struct{}) int {
 	return l
 }
 
+func (rr *RESINFO) len(off int, compression map[string]struct{}) int {
+	l := rr.Hdr.len(off, compression)
+	for _, x := range rr.Txt {
+		l += len(x) + 1
+	}
+	return l
+}
+
 func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
 	l := rr.Hdr.len(off, compression)
 	l += len(rr.Rdata) / 2
@@ -1148,6 +1159,10 @@ func (rr *PX) copy() RR {
 	}
 }
 
+func (rr *RESINFO) copy() RR {
+	return &RESINFO{rr.Hdr, cloneSlice(rr.Txt)}
+}
+
 func (rr *RFC3597) copy() RR {
 	return &RFC3597{rr.Hdr, rr.Rdata}
 }
diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
index 7c3cf892..83d2736a 100644
--- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
+++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go
@@ -460,10 +460,10 @@ func (h *sentPacketHandler) detectAndRemoveAckedPackets(ack *wire.AckFrame, encL
 		}
 		if p.isPathProbePacket {
 			probePacket := pnSpace.history.RemovePathProbe(p.PacketNumber)
-			if probePacket == nil {
-				panic(fmt.Sprintf("path probe doesn't exist: %d", p.PacketNumber))
+			// the probe packet might already have been declared lost
+			if probePacket != nil {
+				h.ackedPackets = append(h.ackedPackets, probePacket)
 			}
-			h.ackedPackets = append(h.ackedPackets, probePacket)
 			continue
 		}
 		h.ackedPackets = append(h.ackedPackets, p)
@@ -658,7 +658,6 @@ func (h *sentPacketHandler) detectLostPathProbes(now time.Time) {
 		for _, f := range p.Frames {
 			f.Handler.OnLost(f.Frame)
 		}
-		h.appDataPackets.history.Remove(p.PacketNumber)
 		h.appDataPackets.history.RemovePathProbe(p.PacketNumber)
 	}
 }
diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/mod/LICENSE
+++ b/vendor/golang.org/x/mod/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go
index 22056825..2d748680 100644
--- a/vendor/golang.org/x/mod/modfile/read.go
+++ b/vendor/golang.org/x/mod/modfile/read.go
@@ -226,8 +226,9 @@ func (x *FileSyntax) Cleanup() {
 				continue
 			}
 			if ww == 1 && len(stmt.RParen.Comments.Before) == 0 {
-				// Collapse block into single line.
-				line := &Line{
+				// Collapse block into single line but keep the Line reference used by the
+				// parsed File structure.
+				*stmt.Line[0] = Line{
 					Comments: Comments{
 						Before: commentsAdd(stmt.Before, stmt.Line[0].Before),
 						Suffix: commentsAdd(stmt.Line[0].Suffix, stmt.Suffix),
@@ -235,7 +236,7 @@ func (x *FileSyntax) Cleanup() {
 					},
 					Token: stringsAdd(stmt.Token, stmt.Line[0].Token),
 				}
-				x.Stmt[w] = line
+				x.Stmt[w] = stmt.Line[0]
 				w++
 				continue
 			}
@@ -876,6 +877,11 @@ func (in *input) parseLineBlock(start Position, token []string, lparen token) *L
 			in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune))
 		case ')':
 			rparen := in.lex()
+			// Don't preserve blank lines (denoted by a single empty comment, added above)
+			// at the end of the block.
+			if len(comments) == 1 && comments[0] == (Comment{}) {
+				comments = nil
+			}
 			x.RParen.Before = comments
 			x.RParen.Pos = rparen.pos
 			if !in.peek().isEOL() {
diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go
index 66dcaf98..3e4a1d0a 100644
--- a/vendor/golang.org/x/mod/modfile/rule.go
+++ b/vendor/golang.org/x/mod/modfile/rule.go
@@ -43,6 +43,7 @@ type File struct {
 	Exclude   []*Exclude
 	Replace   []*Replace
 	Retract   []*Retract
+	Tool      []*Tool
 
 	Syntax *FileSyntax
 }
@@ -93,6 +94,12 @@ type Retract struct {
 	Syntax    *Line
 }
 
+// A Tool is a single tool statement.
+type Tool struct {
+	Path   string
+	Syntax *Line
+}
+
 // A VersionInterval represents a range of versions with upper and lower bounds.
 // Intervals are closed: both bounds are included. When Low is equal to High,
 // the interval may refer to a single version ('v1.2.3') or an interval
@@ -297,7 +304,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse
 					})
 				}
 				continue
-			case "module", "godebug", "require", "exclude", "replace", "retract":
+			case "module", "godebug", "require", "exclude", "replace", "retract", "tool":
 				for _, l := range x.Line {
 					f.add(&errs, x, l, x.Token[0], l.Token, fix, strict)
 				}
@@ -509,6 +516,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
 			Syntax:          line,
 		}
 		f.Retract = append(f.Retract, retract)
+
+	case "tool":
+		if len(args) != 1 {
+			errorf("tool directive expects exactly one argument")
+			return
+		}
+		s, err := parseString(&args[0])
+		if err != nil {
+			errorf("invalid quoted string: %v", err)
+			return
+		}
+		f.Tool = append(f.Tool, &Tool{
+			Path:   s,
+			Syntax: line,
+		})
 	}
 }
 
@@ -1567,6 +1589,36 @@ func (f *File) DropRetract(vi VersionInterval) error {
 	return nil
 }
 
+// AddTool adds a new tool directive with the given path.
+// It does nothing if the tool line already exists.
+func (f *File) AddTool(path string) error {
+	for _, t := range f.Tool {
+		if t.Path == path {
+			return nil
+		}
+	}
+
+	f.Tool = append(f.Tool, &Tool{
+		Path:   path,
+		Syntax: f.Syntax.addLine(nil, "tool", path),
+	})
+
+	f.SortBlocks()
+	return nil
+}
+
+// RemoveTool removes a tool directive with the given path.
+// It does nothing if no such tool directive exists.
+func (f *File) DropTool(path string) error {
+	for _, t := range f.Tool {
+		if t.Path == path {
+			t.Syntax.markRemoved()
+			*t = Tool{}
+		}
+	}
+	return nil
+}
+
 func (f *File) SortBlocks() {
 	f.removeDups() // otherwise sorting is unsafe
 
@@ -1593,9 +1645,9 @@ func (f *File) SortBlocks() {
 	}
 }
 
-// removeDups removes duplicate exclude and replace directives.
+// removeDups removes duplicate exclude, replace and tool directives.
 //
-// Earlier exclude directives take priority.
+// Earlier exclude and tool directives take priority.
 //
 // Later replace directives take priority.
 //
@@ -1605,10 +1657,10 @@ func (f *File) SortBlocks() {
 // retract directives are not de-duplicated since comments are
 // meaningful, and versions may be retracted multiple times.
 func (f *File) removeDups() {
-	removeDups(f.Syntax, &f.Exclude, &f.Replace)
+	removeDups(f.Syntax, &f.Exclude, &f.Replace, &f.Tool)
 }
 
-func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
+func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace, tool *[]*Tool) {
 	kill := make(map[*Line]bool)
 
 	// Remove duplicate excludes.
@@ -1649,6 +1701,24 @@ func removeDups(syntax *FileSyntax, exclude *[]*Exclude, replace *[]*Replace) {
 	}
 	*replace = repl
 
+	if tool != nil {
+		haveTool := make(map[string]bool)
+		for _, t := range *tool {
+			if haveTool[t.Path] {
+				kill[t.Syntax] = true
+				continue
+			}
+			haveTool[t.Path] = true
+		}
+		var newTool []*Tool
+		for _, t := range *tool {
+			if !kill[t.Syntax] {
+				newTool = append(newTool, t)
+			}
+		}
+		*tool = newTool
+	}
+
 	// Duplicate require and retract directives are not removed.
 
 	// Drop killed statements from the syntax tree.
diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go
index 8f54897c..5387d0c2 100644
--- a/vendor/golang.org/x/mod/modfile/work.go
+++ b/vendor/golang.org/x/mod/modfile/work.go
@@ -331,5 +331,5 @@ func (f *WorkFile) SortBlocks() {
 // retract directives are not de-duplicated since comments are
 // meaningful, and versions may be retracted multiple times.
 func (f *WorkFile) removeDups() {
-	removeDups(f.Syntax, nil, &f.Replace)
+	removeDups(f.Syntax, nil, &f.Replace, nil)
 }
diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go
index cac1a899..2a364b22 100644
--- a/vendor/golang.org/x/mod/module/module.go
+++ b/vendor/golang.org/x/mod/module/module.go
@@ -506,7 +506,6 @@ var badWindowsNames = []string{
 	"PRN",
 	"AUX",
 	"NUL",
-	"COM0",
 	"COM1",
 	"COM2",
 	"COM3",
@@ -516,7 +515,6 @@ var badWindowsNames = []string{
 	"COM7",
 	"COM8",
 	"COM9",
-	"LPT0",
 	"LPT1",
 	"LPT2",
 	"LPT3",
diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/tools/LICENSE
+++ b/vendor/golang.org/x/tools/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google Inc. nor the names of its
+   * Neither the name of Google LLC nor the names of its
 contributors may be used to endorse or promote products derived from
 this software without specific prior written permission.
 
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
index 2c4c4e23..6e34df46 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
 
 			// Does augmented child strictly contain [start, end)?
 			if augPos <= start && end <= augEnd {
-				_, isToken := child.(tokenNode)
-				return isToken || visit(child)
+				if is[tokenNode](child) {
+					return true
+				}
+
+				// childrenOf elides the FuncType node beneath FuncDecl.
+				// Add it back here for TypeParams, Params, Results,
+				// all FieldLists). But we don't add it back for the "func" token
+				// even though it is is the tree at FuncDecl.Type.Func.
+				if decl, ok := node.(*ast.FuncDecl); ok {
+					if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv {
+						path = append(path, decl.Type)
+					}
+				}
+
+				return visit(child)
 			}
 
 			// Does [start, end) overlap multiple children?
@@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node {
 		//
 		// As a workaround, we inline the case for FuncType
 		// here and order things correctly.
+		// We also need to insert the elided FuncType just
+		// before the 'visit' recursion.
 		//
 		children = nil // discard ast.Walk(FuncDecl) info subtrees
 		children = append(children, tok(n.Type.Func, len("func")))
@@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string {
 	}
 	panic(fmt.Sprintf("unexpected node type: %T", n))
 }
+
+func is[T any](x any) bool {
+	_, ok := x.(T)
+	return ok
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
index 18d1adb0..a6b5ed0a 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
 }
 
 // UsesImport reports whether a given import is used.
+// The provided File must have been parsed with syntactic object resolution
+// (not using go/parser.SkipObjectResolution).
 func UsesImport(f *ast.File, path string) (used bool) {
+	if f.Scope == nil {
+		panic("file f was not parsed with syntactic object resolution")
+	}
 	spec := importSpec(f, path)
 	if spec == nil {
 		return
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go
index 919d5305..ca71e3e1 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/util.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go
@@ -7,12 +7,5 @@ package astutil
 import "go/ast"
 
 // Unparen returns e with any enclosing parentheses stripped.
-func Unparen(e ast.Expr) ast.Expr {
-	for {
-		p, ok := e.(*ast.ParenExpr)
-		if !ok {
-			return e
-		}
-		e = p.X
-	}
-}
+// Deprecated: use [ast.Unparen].
+func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
index 1fc1de0b..0d5050fe 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -36,6 +36,9 @@ package inspector
 
 import (
 	"go/ast"
+	_ "unsafe"
+
+	"golang.org/x/tools/internal/astutil/edge"
 )
 
 // An Inspector provides methods for inspecting
@@ -44,6 +47,24 @@ type Inspector struct {
 	events []event
 }
 
+//go:linkname events
+func events(in *Inspector) []event { return in.events }
+
+func packEdgeKindAndIndex(ek edge.Kind, index int) int32 {
+	return int32(uint32(index+1)<<7 | uint32(ek))
+}
+
+// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within
+// an []ast.Node slice) from the parent field of a pop event.
+//
+//go:linkname unpackEdgeKindAndIndex
+func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) {
+	// The "parent" field of a pop node holds the
+	// edge Kind in the lower 7 bits and the index+1
+	// in the upper 25.
+	return edge.Kind(x & 0x7f), int(x>>7) - 1
+}
+
 // New returns an Inspector for the specified syntax trees.
 func New(files []*ast.File) *Inspector {
 	return &Inspector{traverse(files)}
@@ -52,9 +73,10 @@ func New(files []*ast.File) *Inspector {
 // An event represents a push or a pop
 // of an ast.Node during a traversal.
 type event struct {
-	node  ast.Node
-	typ   uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
-	index int    // index of corresponding push or pop event
+	node   ast.Node
+	typ    uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
+	index  int32  // index of corresponding push or pop event
+	parent int32  // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only)
 }
 
 // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
@@ -73,8 +95,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
 	// check, Preorder is almost twice as fast as Nodes. The two
 	// features seem to contribute similar slowdowns (~1.4x each).
 
+	// This function is equivalent to the PreorderSeq call below,
+	// but to avoid the additional dynamic call (which adds 13-35%
+	// to the benchmarks), we expand it out.
+	//
+	// in.PreorderSeq(types...)(func(n ast.Node) bool {
+	// 	f(n)
+	// 	return true
+	// })
+
 	mask := maskOf(types)
-	for i := 0; i < len(in.events); {
+	for i := int32(0); i < int32(len(in.events)); {
 		ev := in.events[i]
 		if ev.index > i {
 			// push
@@ -104,7 +135,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
 // matches an element of the types slice.
 func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
 	mask := maskOf(types)
-	for i := 0; i < len(in.events); {
+	for i := int32(0); i < int32(len(in.events)); {
 		ev := in.events[i]
 		if ev.index > i {
 			// push
@@ -138,7 +169,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc
 func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
 	mask := maskOf(types)
 	var stack []ast.Node
-	for i := 0; i < len(in.events); {
+	for i := int32(0); i < int32(len(in.events)); {
 		ev := in.events[i]
 		if ev.index > i {
 			// push
@@ -171,50 +202,83 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s
 // traverse builds the table of events representing a traversal.
 func traverse(files []*ast.File) []event {
 	// Preallocate approximate number of events
-	// based on source file extent.
+	// based on source file extent of the declarations.
+	// (We use End-Pos not FileStart-FileEnd to neglect
+	// the effect of long doc comments.)
 	// This makes traverse faster by 4x (!).
 	var extent int
 	for _, f := range files {
 		extent += int(f.End() - f.Pos())
 	}
 	// This estimate is based on the net/http package.
-	capacity := extent * 33 / 100
-	if capacity > 1e6 {
-		capacity = 1e6 // impose some reasonable maximum
+	capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M)
+
+	v := &visitor{
+		events: make([]event, 0, capacity),
+		stack:  []item{{index: -1}}, // include an extra event so file nodes have a parent
 	}
-	events := make([]event, 0, capacity)
-
-	var stack []event
-	stack = append(stack, event{}) // include an extra event so file nodes have a parent
-	for _, f := range files {
-		ast.Inspect(f, func(n ast.Node) bool {
-			if n != nil {
-				// push
-				ev := event{
-					node:  n,
-					typ:   0,           // temporarily used to accumulate type bits of subtree
-					index: len(events), // push event temporarily holds own index
-				}
-				stack = append(stack, ev)
-				events = append(events, ev)
-			} else {
-				// pop
-				top := len(stack) - 1
-				ev := stack[top]
-				typ := typeOf(ev.node)
-				push := ev.index
-				parent := top - 1
-
-				events[push].typ = typ            // set type of push
-				stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
-				events[push].index = len(events)  // make push refer to pop
-
-				stack = stack[:top]
-				events = append(events, ev)
-			}
-			return true
-		})
+	for _, file := range files {
+		walk(v, edge.Invalid, -1, file)
 	}
-
-	return events
+	return v.events
+}
+
+type visitor struct {
+	events []event
+	stack  []item
+}
+
+type item struct {
+	index            int32  // index of current node's push event
+	parentIndex      int32  // index of parent node's push event
+	typAccum         uint64 // accumulated type bits of current node's descendents
+	edgeKindAndIndex int32  // edge.Kind and index, bit packed
+}
+
+func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) {
+	var (
+		index       = int32(len(v.events))
+		parentIndex = v.stack[len(v.stack)-1].index
+	)
+	v.events = append(v.events, event{
+		node:   node,
+		parent: parentIndex,
+		typ:    typeOf(node),
+		index:  0, // (pop index is set later by visitor.pop)
+	})
+	v.stack = append(v.stack, item{
+		index:            index,
+		parentIndex:      parentIndex,
+		edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex),
+	})
+
+	// 2B nodes ought to be enough for anyone!
+	if int32(len(v.events)) < 0 {
+		panic("event index exceeded int32")
+	}
+
+	// 32M elements in an []ast.Node ought to be enough for anyone!
+	if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex {
+		panic("Node slice index exceeded uint25")
+	}
+}
+
+func (v *visitor) pop(node ast.Node) {
+	top := len(v.stack) - 1
+	current := v.stack[top]
+
+	push := &v.events[current.index]
+	parent := &v.stack[top-1]
+
+	push.index = int32(len(v.events))              // make push event refer to pop
+	parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent
+
+	v.stack = v.stack[:top]
+
+	v.events = append(v.events, event{
+		node:   node,
+		typ:    current.typAccum,
+		index:  current.index,
+		parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex]
+	})
 }
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go
new file mode 100644
index 00000000..c576dc70
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package inspector
+
+import (
+	"go/ast"
+	"iter"
+)
+
+// PreorderSeq returns an iterator that visits all the
+// nodes of the files supplied to New in depth-first order.
+// It visits each node n before n's children.
+// The complete traversal sequence is determined by ast.Inspect.
+//
+// The types argument, if non-empty, enables type-based
+// filtering of events: only nodes whose type matches an
+// element of the types slice are included in the sequence.
+func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
+
+	// This implementation is identical to Preorder,
+	// except that it supports breaking out of the loop.
+
+	return func(yield func(ast.Node) bool) {
+		mask := maskOf(types)
+		for i := int32(0); i < int32(len(in.events)); {
+			ev := in.events[i]
+			if ev.index > i {
+				// push
+				if ev.typ&mask != 0 {
+					if !yield(ev.node) {
+						break
+					}
+				}
+				pop := ev.index
+				if in.events[pop].typ&mask == 0 {
+					// Subtrees do not contain types: skip them and pop.
+					i = pop + 1
+					continue
+				}
+			}
+			i++
+		}
+	}
+}
+
+// All[N] returns an iterator over all the nodes of type N.
+// N must be a pointer-to-struct type that implements ast.Node.
+//
+// Example:
+//
+//	for call := range All[*ast.CallExpr](in) { ... }
+func All[N interface {
+	*S
+	ast.Node
+}, S any](in *Inspector) iter.Seq[N] {
+
+	// To avoid additional dynamic call overheads,
+	// we duplicate rather than call the logic of PreorderSeq.
+
+	mask := typeOf((N)(nil))
+	return func(yield func(N) bool) {
+		for i := int32(0); i < int32(len(in.events)); {
+			ev := in.events[i]
+			if ev.index > i {
+				// push
+				if ev.typ&mask != 0 {
+					if !yield(ev.node.(N)) {
+						break
+					}
+				}
+				pop := ev.index
+				if in.events[pop].typ&mask == 0 {
+					// Subtrees do not contain types: skip them and pop.
+					i = pop + 1
+					continue
+				}
+			}
+			i++
+		}
+	}
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index 2a872f89..97784484 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -12,6 +12,8 @@ package inspector
 import (
 	"go/ast"
 	"math"
+
+	_ "unsafe"
 )
 
 const (
@@ -215,8 +217,9 @@ func typeOf(n ast.Node) uint64 {
 	return 0
 }
 
+//go:linkname maskOf
 func maskOf(nodes []ast.Node) uint64 {
-	if nodes == nil {
+	if len(nodes) == 0 {
 		return math.MaxUint64 // match all node types
 	}
 	var mask uint64
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go
new file mode 100644
index 00000000..5a42174a
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go
@@ -0,0 +1,341 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+// This file is a fork of ast.Inspect to reduce unnecessary dynamic
+// calls and to gather edge information.
+//
+// Consistency with the original is ensured by TestInspectAllNodes.
+
+import (
+	"fmt"
+	"go/ast"
+
+	"golang.org/x/tools/internal/astutil/edge"
+)
+
+func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) {
+	for i, node := range list {
+		walk(v, ek, i, node)
+	}
+}
+
+func walk(v *visitor, ek edge.Kind, index int, node ast.Node) {
+	v.push(ek, index, node)
+
+	// walk children
+	// (the order of the cases matches the order
+	// of the corresponding node types in ast.go)
+	switch n := node.(type) {
+	// Comments and fields
+	case *ast.Comment:
+		// nothing to do
+
+	case *ast.CommentGroup:
+		walkList(v, edge.CommentGroup_List, n.List)
+
+	case *ast.Field:
+		if n.Doc != nil {
+			walk(v, edge.Field_Doc, -1, n.Doc)
+		}
+		walkList(v, edge.Field_Names, n.Names)
+		if n.Type != nil {
+			walk(v, edge.Field_Type, -1, n.Type)
+		}
+		if n.Tag != nil {
+			walk(v, edge.Field_Tag, -1, n.Tag)
+		}
+		if n.Comment != nil {
+			walk(v, edge.Field_Comment, -1, n.Comment)
+		}
+
+	case *ast.FieldList:
+		walkList(v, edge.FieldList_List, n.List)
+
+	// Expressions
+	case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+		// nothing to do
+
+	case *ast.Ellipsis:
+		if n.Elt != nil {
+			walk(v, edge.Ellipsis_Elt, -1, n.Elt)
+		}
+
+	case *ast.FuncLit:
+		walk(v, edge.FuncLit_Type, -1, n.Type)
+		walk(v, edge.FuncLit_Body, -1, n.Body)
+
+	case *ast.CompositeLit:
+		if n.Type != nil {
+			walk(v, edge.CompositeLit_Type, -1, n.Type)
+		}
+		walkList(v, edge.CompositeLit_Elts, n.Elts)
+
+	case *ast.ParenExpr:
+		walk(v, edge.ParenExpr_X, -1, n.X)
+
+	case *ast.SelectorExpr:
+		walk(v, edge.SelectorExpr_X, -1, n.X)
+		walk(v, edge.SelectorExpr_Sel, -1, n.Sel)
+
+	case *ast.IndexExpr:
+		walk(v, edge.IndexExpr_X, -1, n.X)
+		walk(v, edge.IndexExpr_Index, -1, n.Index)
+
+	case *ast.IndexListExpr:
+		walk(v, edge.IndexListExpr_X, -1, n.X)
+		walkList(v, edge.IndexListExpr_Indices, n.Indices)
+
+	case *ast.SliceExpr:
+		walk(v, edge.SliceExpr_X, -1, n.X)
+		if n.Low != nil {
+			walk(v, edge.SliceExpr_Low, -1, n.Low)
+		}
+		if n.High != nil {
+			walk(v, edge.SliceExpr_High, -1, n.High)
+		}
+		if n.Max != nil {
+			walk(v, edge.SliceExpr_Max, -1, n.Max)
+		}
+
+	case *ast.TypeAssertExpr:
+		walk(v, edge.TypeAssertExpr_X, -1, n.X)
+		if n.Type != nil {
+			walk(v, edge.TypeAssertExpr_Type, -1, n.Type)
+		}
+
+	case *ast.CallExpr:
+		walk(v, edge.CallExpr_Fun, -1, n.Fun)
+		walkList(v, edge.CallExpr_Args, n.Args)
+
+	case *ast.StarExpr:
+		walk(v, edge.StarExpr_X, -1, n.X)
+
+	case *ast.UnaryExpr:
+		walk(v, edge.UnaryExpr_X, -1, n.X)
+
+	case *ast.BinaryExpr:
+		walk(v, edge.BinaryExpr_X, -1, n.X)
+		walk(v, edge.BinaryExpr_Y, -1, n.Y)
+
+	case *ast.KeyValueExpr:
+		walk(v, edge.KeyValueExpr_Key, -1, n.Key)
+		walk(v, edge.KeyValueExpr_Value, -1, n.Value)
+
+	// Types
+	case *ast.ArrayType:
+		if n.Len != nil {
+			walk(v, edge.ArrayType_Len, -1, n.Len)
+		}
+		walk(v, edge.ArrayType_Elt, -1, n.Elt)
+
+	case *ast.StructType:
+		walk(v, edge.StructType_Fields, -1, n.Fields)
+
+	case *ast.FuncType:
+		if n.TypeParams != nil {
+			walk(v, edge.FuncType_TypeParams, -1, n.TypeParams)
+		}
+		if n.Params != nil {
+			walk(v, edge.FuncType_Params, -1, n.Params)
+		}
+		if n.Results != nil {
+			walk(v, edge.FuncType_Results, -1, n.Results)
+		}
+
+	case *ast.InterfaceType:
+		walk(v, edge.InterfaceType_Methods, -1, n.Methods)
+
+	case *ast.MapType:
+		walk(v, edge.MapType_Key, -1, n.Key)
+		walk(v, edge.MapType_Value, -1, n.Value)
+
+	case *ast.ChanType:
+		walk(v, edge.ChanType_Value, -1, n.Value)
+
+	// Statements
+	case *ast.BadStmt:
+		// nothing to do
+
+	case *ast.DeclStmt:
+		walk(v, edge.DeclStmt_Decl, -1, n.Decl)
+
+	case *ast.EmptyStmt:
+		// nothing to do
+
+	case *ast.LabeledStmt:
+		walk(v, edge.LabeledStmt_Label, -1, n.Label)
+		walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt)
+
+	case *ast.ExprStmt:
+		walk(v, edge.ExprStmt_X, -1, n.X)
+
+	case *ast.SendStmt:
+		walk(v, edge.SendStmt_Chan, -1, n.Chan)
+		walk(v, edge.SendStmt_Value, -1, n.Value)
+
+	case *ast.IncDecStmt:
+		walk(v, edge.IncDecStmt_X, -1, n.X)
+
+	case *ast.AssignStmt:
+		walkList(v, edge.AssignStmt_Lhs, n.Lhs)
+		walkList(v, edge.AssignStmt_Rhs, n.Rhs)
+
+	case *ast.GoStmt:
+		walk(v, edge.GoStmt_Call, -1, n.Call)
+
+	case *ast.DeferStmt:
+		walk(v, edge.DeferStmt_Call, -1, n.Call)
+
+	case *ast.ReturnStmt:
+		walkList(v, edge.ReturnStmt_Results, n.Results)
+
+	case *ast.BranchStmt:
+		if n.Label != nil {
+			walk(v, edge.BranchStmt_Label, -1, n.Label)
+		}
+
+	case *ast.BlockStmt:
+		walkList(v, edge.BlockStmt_List, n.List)
+
+	case *ast.IfStmt:
+		if n.Init != nil {
+			walk(v, edge.IfStmt_Init, -1, n.Init)
+		}
+		walk(v, edge.IfStmt_Cond, -1, n.Cond)
+		walk(v, edge.IfStmt_Body, -1, n.Body)
+		if n.Else != nil {
+			walk(v, edge.IfStmt_Else, -1, n.Else)
+		}
+
+	case *ast.CaseClause:
+		walkList(v, edge.CaseClause_List, n.List)
+		walkList(v, edge.CaseClause_Body, n.Body)
+
+	case *ast.SwitchStmt:
+		if n.Init != nil {
+			walk(v, edge.SwitchStmt_Init, -1, n.Init)
+		}
+		if n.Tag != nil {
+			walk(v, edge.SwitchStmt_Tag, -1, n.Tag)
+		}
+		walk(v, edge.SwitchStmt_Body, -1, n.Body)
+
+	case *ast.TypeSwitchStmt:
+		if n.Init != nil {
+			walk(v, edge.TypeSwitchStmt_Init, -1, n.Init)
+		}
+		walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign)
+		walk(v, edge.TypeSwitchStmt_Body, -1, n.Body)
+
+	case *ast.CommClause:
+		if n.Comm != nil {
+			walk(v, edge.CommClause_Comm, -1, n.Comm)
+		}
+		walkList(v, edge.CommClause_Body, n.Body)
+
+	case *ast.SelectStmt:
+		walk(v, edge.SelectStmt_Body, -1, n.Body)
+
+	case *ast.ForStmt:
+		if n.Init != nil {
+			walk(v, edge.ForStmt_Init, -1, n.Init)
+		}
+		if n.Cond != nil {
+			walk(v, edge.ForStmt_Cond, -1, n.Cond)
+		}
+		if n.Post != nil {
+			walk(v, edge.ForStmt_Post, -1, n.Post)
+		}
+		walk(v, edge.ForStmt_Body, -1, n.Body)
+
+	case *ast.RangeStmt:
+		if n.Key != nil {
+			walk(v, edge.RangeStmt_Key, -1, n.Key)
+		}
+		if n.Value != nil {
+			walk(v, edge.RangeStmt_Value, -1, n.Value)
+		}
+		walk(v, edge.RangeStmt_X, -1, n.X)
+		walk(v, edge.RangeStmt_Body, -1, n.Body)
+
+	// Declarations
+	case *ast.ImportSpec:
+		if n.Doc != nil {
+			walk(v, edge.ImportSpec_Doc, -1, n.Doc)
+		}
+		if n.Name != nil {
+			walk(v, edge.ImportSpec_Name, -1, n.Name)
+		}
+		walk(v, edge.ImportSpec_Path, -1, n.Path)
+		if n.Comment != nil {
+			walk(v, edge.ImportSpec_Comment, -1, n.Comment)
+		}
+
+	case *ast.ValueSpec:
+		if n.Doc != nil {
+			walk(v, edge.ValueSpec_Doc, -1, n.Doc)
+		}
+		walkList(v, edge.ValueSpec_Names, n.Names)
+		if n.Type != nil {
+			walk(v, edge.ValueSpec_Type, -1, n.Type)
+		}
+		walkList(v, edge.ValueSpec_Values, n.Values)
+		if n.Comment != nil {
+			walk(v, edge.ValueSpec_Comment, -1, n.Comment)
+		}
+
+	case *ast.TypeSpec:
+		if n.Doc != nil {
+			walk(v, edge.TypeSpec_Doc, -1, n.Doc)
+		}
+		walk(v, edge.TypeSpec_Name, -1, n.Name)
+		if n.TypeParams != nil {
+			walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams)
+		}
+		walk(v, edge.TypeSpec_Type, -1, n.Type)
+		if n.Comment != nil {
+			walk(v, edge.TypeSpec_Comment, -1, n.Comment)
+		}
+
+	case *ast.BadDecl:
+		// nothing to do
+
+	case *ast.GenDecl:
+		if n.Doc != nil {
+			walk(v, edge.GenDecl_Doc, -1, n.Doc)
+		}
+		walkList(v, edge.GenDecl_Specs, n.Specs)
+
+	case *ast.FuncDecl:
+		if n.Doc != nil {
+			walk(v, edge.FuncDecl_Doc, -1, n.Doc)
+		}
+		if n.Recv != nil {
+			walk(v, edge.FuncDecl_Recv, -1, n.Recv)
+		}
+		walk(v, edge.FuncDecl_Name, -1, n.Name)
+		walk(v, edge.FuncDecl_Type, -1, n.Type)
+		if n.Body != nil {
+			walk(v, edge.FuncDecl_Body, -1, n.Body)
+		}
+
+	case *ast.File:
+		if n.Doc != nil {
+			walk(v, edge.File_Doc, -1, n.Doc)
+		}
+		walk(v, edge.File_Name, -1, n.Name)
+		walkList(v, edge.File_Decls, n.Decls)
+		// don't walk n.Comments - they have been
+		// visited already through the individual
+		// nodes
+
+	default:
+		// (includes *ast.Package)
+		panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+	}
+
+	v.pop(node)
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index 137cc8df..65fe2628 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -2,22 +2,64 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Package gcexportdata provides functions for locating, reading, and
-// writing export data files containing type information produced by the
-// gc compiler.  This package supports go1.7 export data format and all
-// later versions.
+// Package gcexportdata provides functions for reading and writing
+// export data, which is a serialized description of the API of a Go
+// package including the names, kinds, types, and locations of all
+// exported declarations.
 //
-// Although it might seem convenient for this package to live alongside
-// go/types in the standard library, this would cause version skew
-// problems for developer tools that use it, since they must be able to
-// consume the outputs of the gc compiler both before and after a Go
-// update such as from Go 1.7 to Go 1.8.  Because this package lives in
-// golang.org/x/tools, sites can update their version of this repo some
-// time before the Go 1.8 release and rebuild and redeploy their
-// developer tools, which will then be able to consume both Go 1.7 and
-// Go 1.8 export data files, so they will work before and after the
-// Go update. (See discussion at https://golang.org/issue/15651.)
-package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
+// The standard Go compiler (cmd/compile) writes an export data file
+// for each package it compiles, which it later reads when compiling
+// packages that import the earlier one. The compiler must thus
+// contain logic to both write and read export data.
+// (See the "Export" section in the cmd/compile/README file.)
+//
+// The [Read] function in this package can read files produced by the
+// compiler, producing [go/types] data structures. As a matter of
+// policy, Read supports export data files produced by only the last
+// two Go releases plus tip; see https://go.dev/issue/68898. The
+// export data files produced by the compiler contain additional
+// details related to generics, inlining, and other optimizations that
+// cannot be decoded by the [Read] function.
+//
+// In files written by the compiler, the export data is not at the
+// start of the file. Before calling Read, use [NewReader] to locate
+// the desired portion of the file.
+//
+// The [Write] function in this package encodes the exported API of a
+// Go package ([types.Package]) as a file. Such files can be later
+// decoded by Read, but cannot be consumed by the compiler.
+//
+// # Future changes
+//
+// Although Read supports the formats written by both Write and the
+// compiler, the two are quite different, and there is an open
+// proposal (https://go.dev/issue/69491) to separate these APIs.
+//
+// Under that proposal, this package would ultimately provide only the
+// Read operation for compiler export data, which must be defined in
+// this module (golang.org/x/tools), not in the standard library, to
+// avoid version skew for developer tools that need to read compiler
+// export data both before and after a Go release, such as from Go
+// 1.23 to Go 1.24. Because this package lives in the tools module,
+// clients can update their version of the module some time before the
+// Go 1.24 release and rebuild and redeploy their tools, which will
+// then be able to consume both Go 1.23 and Go 1.24 export data files,
+// so they will work before and after the Go update. (See discussion
+// at https://go.dev/issue/15651.)
+//
+// The operations to import and export [go/types] data structures
+// would be defined in the go/types package as Import and Export.
+// [Write] would (eventually) delegate to Export,
+// and [Read], when it detects a file produced by Export,
+// would delegate to Import.
+//
+// # Deprecations
+//
+// The [NewImporter] and [Find] functions are deprecated and should
+// not be used in new code. The [WriteBundle] and [ReadBundle]
+// functions are experimental, and there is an open proposal to
+// deprecate them (https://go.dev/issue/69573).
+package gcexportdata
 
 import (
 	"bufio"
@@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) {
 // additional trailing data beyond the end of the export data.
 func NewReader(r io.Reader) (io.Reader, error) {
 	buf := bufio.NewReader(r)
-	_, size, err := gcimporter.FindExportData(buf)
+	size, err := gcimporter.FindExportData(buf)
 	if err != nil {
 		return nil, err
 	}
 
-	if size >= 0 {
-		// We were given an archive and found the __.PKGDEF in it.
-		// This tells us the size of the export data, and we don't
-		// need to return the entire file.
-		return &io.LimitedReader{
-			R: buf,
-			N: size,
-		}, nil
-	} else {
-		// We were given an object file. As such, we don't know how large
-		// the export data is and must return the entire file.
-		return buf, nil
-	}
+	// We were given an archive and found the __.PKGDEF in it.
+	// This tells us the size of the export data, and we don't
+	// need to return the entire file.
+	return &io.LimitedReader{
+		R: buf,
+		N: size,
+	}, nil
 }
 
 // readAll works the same way as io.ReadAll, but avoids allocations and copies
@@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) {
 // Read reads export data from in, decodes it, and returns type
 // information for the package.
 //
+// Read is capable of reading export data produced by [Write] at the
+// same source code version, or by the last two Go releases (plus tip)
+// of the standard Go compiler. Reading files from older compilers may
+// produce an error.
+//
 // The package path (effectively its linker symbol prefix) is
 // specified by path, since unlike the package name, this information
 // may not be recorded in the export data.
@@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
 	// (from "version"). Select appropriate importer.
 	if len(data) > 0 {
 		switch data[0] {
-		case 'v', 'c', 'd': // binary, till go1.10
+		case 'v', 'c', 'd':
+			// binary, produced by cmd/compile till go1.10
 			return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
 
-		case 'i': // indexed, till go1.19
+		case 'i':
+			// indexed, produced by cmd/compile till go1.19,
+			// and also by [Write].
+			//
+			// If proposal #69491 is accepted, go/types
+			// serialization will be implemented by
+			// types.Export, to which Write would eventually
+			// delegate (explicitly dropping any pretence at
+			// inter-version Write-Read compatibility).
+			// This [Read] function would delegate to types.Import
+			// when it detects that the file was produced by Export.
 			_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
 			return pkg, err
 
-		case 'u': // unified, from go1.20
+		case 'u':
+			// unified, produced by cmd/compile since go1.20
 			_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
 			return pkg, err
 
diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
deleted file mode 100644
index c6e7c0d4..00000000
--- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packagesdriver fetches type sizes for go/packages and go/analysis.
-package packagesdriver
-
-import (
-	"context"
-	"fmt"
-	"strings"
-
-	"golang.org/x/tools/internal/gocommand"
-)
-
-// TODO(adonovan): move back into go/packages.
-func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
-	inv.Verb = "list"
-	inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
-	stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
-	var goarch, compiler string
-	if rawErr != nil {
-		rawErrMsg := rawErr.Error()
-		if strings.Contains(rawErrMsg, "cannot find main module") ||
-			strings.Contains(rawErrMsg, "go.mod file not found") {
-			// User's running outside of a module.
-			// All bets are off. Get GOARCH and guess compiler is gc.
-			// TODO(matloob): Is this a problem in practice?
-			inv.Verb = "env"
-			inv.Args = []string{"GOARCH"}
-			envout, enverr := gocmdRunner.Run(ctx, inv)
-			if enverr != nil {
-				return "", "", enverr
-			}
-			goarch = strings.TrimSpace(envout.String())
-			compiler = "gc"
-		} else if friendlyErr != nil {
-			return "", "", friendlyErr
-		} else {
-			// This should be unreachable, but be defensive
-			// in case RunRaw's error results are inconsistent.
-			return "", "", rawErr
-		}
-	} else {
-		fields := strings.Fields(stdout.String())
-		if len(fields) < 2 {
-			return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>",
-				stdout.String(), stderr.String())
-		}
-		goarch = fields[0]
-		compiler = fields[1]
-	}
-	return compiler, goarch, nil
-}
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
index 3531ac8f..f1931d10 100644
--- a/vendor/golang.org/x/tools/go/packages/doc.go
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -64,7 +64,7 @@ graph using the Imports fields.
 
 The Load function can be configured by passing a pointer to a Config as
 the first argument. A nil Config is equivalent to the zero Config, which
-causes Load to run in LoadFiles mode, collecting minimal information.
+causes Load to run in [LoadFiles] mode, collecting minimal information.
 See the documentation for type Config for details.
 
 As noted earlier, the Config.Mode controls the amount of detail
@@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode
 for details.
 
 Most tools should pass their command-line arguments (after any flags)
-uninterpreted to [Load], so that it can interpret them
+uninterpreted to Load, so that it can interpret them
 according to the conventions of the underlying build system.
 
 See the Example function for typical usage.
 
 # The driver protocol
 
-[Load] may be used to load Go packages even in Go projects that use
+Load may be used to load Go packages even in Go projects that use
 alternative build systems, by installing an appropriate "driver"
 program for the build system and specifying its location in the
 GOPACKAGESDRIVER environment variable.
@@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information
 is written to the driver's standard input. The driver must write a
 JSON-encoded [DriverResponse] message to its standard output. (This
 message differs from the JSON schema produced by 'go list'.)
+
+The value of the PWD environment variable seen by the driver process
+is the preferred name of its working directory. (The working directory
+may have other aliases due to symbolic links; see the comment on the
+Dir field of [exec.Cmd] for related information.)
+When the driver process emits in its response the name of a file
+that is a descendant of this directory, it must use an absolute path
+that has the value of PWD as a prefix, to ensure that the returned
+filenames satisfy the original query.
 */
 package packages // import "golang.org/x/tools/go/packages"
 
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
index c2b4b711..91bd62e8 100644
--- a/vendor/golang.org/x/tools/go/packages/external.go
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -13,6 +13,7 @@ import (
 	"fmt"
 	"os"
 	"os/exec"
+	"slices"
 	"strings"
 )
 
@@ -79,10 +80,10 @@ type DriverResponse struct {
 
 // driver is the type for functions that query the build system for the
 // packages named by the patterns.
-type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
+type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
 
 // findExternalDriver returns the file path of a tool that supplies
-// the build system package structure, or "" if not found."
+// the build system package structure, or "" if not found.
 // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
 // value, otherwise it searches for a binary named gopackagesdriver on the PATH.
 func findExternalDriver(cfg *Config) driver {
@@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver {
 			return nil
 		}
 	}
-	return func(cfg *Config, words ...string) (*DriverResponse, error) {
+	return func(cfg *Config, patterns []string) (*DriverResponse, error) {
 		req, err := json.Marshal(DriverRequest{
 			Mode:       cfg.Mode,
 			Env:        cfg.Env,
@@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver {
 
 		buf := new(bytes.Buffer)
 		stderr := new(bytes.Buffer)
-		cmd := exec.CommandContext(cfg.Context, tool, words...)
+		cmd := exec.CommandContext(cfg.Context, tool, patterns...)
 		cmd.Dir = cfg.Dir
 		// The cwd gets resolved to the real path. On Darwin, where
 		// /tmp is a symlink, this breaks anything that expects the
@@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver {
 		// command.
 		//
 		// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
-		cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir)
+		cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
 		cmd.Stdin = bytes.NewReader(req)
 		cmd.Stdout = buf
 		cmd.Stderr = stderr
@@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver {
 		return &response, nil
 	}
 }
-
-// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)].
-// TODO(adonovan): use go1.21 slices.Clip.
-func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] }
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index d9be410a..0458b4f9 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -21,7 +21,6 @@ import (
 	"sync"
 	"unicode"
 
-	"golang.org/x/tools/go/internal/packagesdriver"
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/packagesinternal"
 )
@@ -81,6 +80,12 @@ type golistState struct {
 	cfg *Config
 	ctx context.Context
 
+	runner *gocommand.Runner
+
+	// overlay is the JSON file that encodes the Config.Overlay
+	// mapping, used by 'go list -overlay=...'.
+	overlay string
+
 	envOnce    sync.Once
 	goEnvError error
 	goEnv      map[string]string
@@ -128,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string {
 // goListDriver uses the go list command to interpret the patterns and produce
 // the build system package structure.
 // See driver for more details.
-func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
+//
+// overlay is the JSON file that encodes the cfg.Overlay
+// mapping, used by 'go list -overlay=...'
+func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
 	// Make sure that any asynchronous go commands are killed when we return.
 	parentCtx := cfg.Context
 	if parentCtx == nil {
@@ -143,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error
 		cfg:        cfg,
 		ctx:        ctx,
 		vendorDirs: map[string]bool{},
+		overlay:    overlay,
+		runner:     runner,
 	}
 
 	// Fill in response.Sizes asynchronously if necessary.
-	if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
+	if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
 		errCh := make(chan error)
 		go func() {
-			compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
+			compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
 			response.dr.Compiler = compiler
 			response.dr.Arch = arch
 			errCh <- err
@@ -312,6 +322,7 @@ type jsonPackage struct {
 	ImportPath        string
 	Dir               string
 	Name              string
+	Target            string
 	Export            string
 	GoFiles           []string
 	CompiledGoFiles   []string
@@ -495,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
 		pkg := &Package{
 			Name:            p.Name,
 			ID:              p.ImportPath,
+			Dir:             p.Dir,
+			Target:          p.Target,
 			GoFiles:         absJoin(p.Dir, p.GoFiles, p.CgoFiles),
 			CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
 			OtherFiles:      absJoin(p.Dir, otherFiles(p)...),
 			EmbedFiles:      absJoin(p.Dir, p.EmbedFiles),
 			EmbedPatterns:   absJoin(p.Dir, p.EmbedPatterns),
 			IgnoredFiles:    absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
-			forTest:         p.ForTest,
+			ForTest:         p.ForTest,
 			depsErrors:      p.DepsErrors,
 			Module:          p.Module,
 		}
@@ -682,7 +695,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
 // getGoVersion returns the effective minor version of the go command.
 func (state *golistState) getGoVersion() (int, error) {
 	state.goVersionOnce.Do(func() {
-		state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
+		state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
 	})
 	return state.goVersion, state.goVersionError
 }
@@ -752,7 +765,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
 		}
 	}
 	addFields("Name", "ImportPath", "Error") // These fields are always needed
-	if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
+	if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
 		addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
 			"CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
 			"SwigFiles", "SwigCXXFiles", "SysoFiles")
@@ -760,7 +773,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
 			addFields("TestGoFiles", "XTestGoFiles")
 		}
 	}
-	if cfg.Mode&NeedTypes != 0 {
+	if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
 		// CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
 		// even when -compiled isn't passed in.
 		// TODO(#52435): Should we make the test ask for -compiled, or automatically
@@ -785,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
 		// Request Dir in the unlikely case Export is not absolute.
 		addFields("Dir", "Export")
 	}
-	if cfg.Mode&needInternalForTest != 0 {
+	if cfg.Mode&NeedForTest != 0 {
 		addFields("ForTest")
 	}
 	if cfg.Mode&needInternalDepsErrors != 0 {
@@ -800,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string {
 	if cfg.Mode&NeedEmbedPatterns != 0 {
 		addFields("EmbedPatterns")
 	}
+	if cfg.Mode&NeedTarget != 0 {
+		addFields("Target")
+	}
 	return "-json=" + strings.Join(fields, ",")
 }
 
@@ -841,7 +857,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation {
 		Env:        cfg.Env,
 		Logf:       cfg.Logf,
 		WorkingDir: cfg.Dir,
-		Overlay:    cfg.goListOverlayFile,
+		Overlay:    state.overlay,
 	}
 }
 
@@ -852,11 +868,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
 	inv := state.cfgInvocation()
 	inv.Verb = verb
 	inv.Args = args
-	gocmdRunner := cfg.gocmdRunner
-	if gocmdRunner == nil {
-		gocmdRunner = &gocommand.Runner{}
-	}
-	stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
+
+	stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
 	if err != nil {
 		// Check for 'go' executable not being found.
 		if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
@@ -880,6 +893,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
 			return nil, friendlyErr
 		}
 
+		// Return an error if 'go list' failed due to missing tools in
+		// $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
+		if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
+			return nil, friendlyErr
+		}
+
 		// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
 		// and should be suppressed by go list -e.
 		//
@@ -1024,3 +1043,44 @@ func cmdDebugStr(cmd *exec.Cmd) string {
 	}
 	return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
 }
+
+// getSizesForArgs queries 'go list' for the appropriate
+// Compiler and GOARCH arguments to pass to [types.SizesFor].
+func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
+	inv.Verb = "list"
+	inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
+	stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
+	var goarch, compiler string
+	if rawErr != nil {
+		rawErrMsg := rawErr.Error()
+		if strings.Contains(rawErrMsg, "cannot find main module") ||
+			strings.Contains(rawErrMsg, "go.mod file not found") {
+			// User's running outside of a module.
+			// All bets are off. Get GOARCH and guess compiler is gc.
+			// TODO(matloob): Is this a problem in practice?
+			inv.Verb = "env"
+			inv.Args = []string{"GOARCH"}
+			envout, enverr := gocmdRunner.Run(ctx, inv)
+			if enverr != nil {
+				return "", "", enverr
+			}
+			goarch = strings.TrimSpace(envout.String())
+			compiler = "gc"
+		} else if friendlyErr != nil {
+			return "", "", friendlyErr
+		} else {
+			// This should be unreachable, but be defensive
+			// in case RunRaw's error results are inconsistent.
+			return "", "", rawErr
+		}
+	} else {
+		fields := strings.Fields(stdout.String())
+		if len(fields) < 2 {
+			return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>",
+				stdout.String(), stderr.String())
+		}
+		goarch = fields[0]
+		compiler = fields[1]
+	}
+	return compiler, goarch, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
index 5c080d21..69eec9f4 100644
--- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -9,49 +9,48 @@ import (
 	"strings"
 )
 
-var allModes = []LoadMode{
-	NeedName,
-	NeedFiles,
-	NeedCompiledGoFiles,
-	NeedImports,
-	NeedDeps,
-	NeedExportFile,
-	NeedTypes,
-	NeedSyntax,
-	NeedTypesInfo,
-	NeedTypesSizes,
+var modes = [...]struct {
+	mode LoadMode
+	name string
+}{
+	{NeedName, "NeedName"},
+	{NeedFiles, "NeedFiles"},
+	{NeedCompiledGoFiles, "NeedCompiledGoFiles"},
+	{NeedImports, "NeedImports"},
+	{NeedDeps, "NeedDeps"},
+	{NeedExportFile, "NeedExportFile"},
+	{NeedTypes, "NeedTypes"},
+	{NeedSyntax, "NeedSyntax"},
+	{NeedTypesInfo, "NeedTypesInfo"},
+	{NeedTypesSizes, "NeedTypesSizes"},
+	{NeedForTest, "NeedForTest"},
+	{NeedModule, "NeedModule"},
+	{NeedEmbedFiles, "NeedEmbedFiles"},
+	{NeedEmbedPatterns, "NeedEmbedPatterns"},
+	{NeedTarget, "NeedTarget"},
 }
 
-var modeStrings = []string{
-	"NeedName",
-	"NeedFiles",
-	"NeedCompiledGoFiles",
-	"NeedImports",
-	"NeedDeps",
-	"NeedExportFile",
-	"NeedTypes",
-	"NeedSyntax",
-	"NeedTypesInfo",
-	"NeedTypesSizes",
-}
-
-func (mod LoadMode) String() string {
-	m := mod
-	if m == 0 {
+func (mode LoadMode) String() string {
+	if mode == 0 {
 		return "LoadMode(0)"
 	}
 	var out []string
-	for i, x := range allModes {
-		if x > m {
-			break
-		}
-		if (m & x) != 0 {
-			out = append(out, modeStrings[i])
-			m = m ^ x
+	// named bits
+	for _, item := range modes {
+		if (mode & item.mode) != 0 {
+			mode ^= item.mode
+			out = append(out, item.name)
 		}
 	}
-	if m != 0 {
-		out = append(out, "Unknown")
+	// unnamed residue
+	if mode != 0 {
+		if out == nil {
+			return fmt.Sprintf("LoadMode(%#x)", int(mode))
+		}
+		out = append(out, fmt.Sprintf("%#x", int(mode)))
 	}
-	return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
+	if len(out) == 1 {
+		return out[0]
+	}
+	return "(" + strings.Join(out, "|") + ")"
 }
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 34306ddd..c3a59b8e 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -16,13 +16,13 @@ import (
 	"go/scanner"
 	"go/token"
 	"go/types"
-	"io"
 	"log"
 	"os"
 	"path/filepath"
 	"runtime"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"golang.org/x/sync/errgroup"
@@ -31,7 +31,6 @@ import (
 	"golang.org/x/tools/internal/gocommand"
 	"golang.org/x/tools/internal/packagesinternal"
 	"golang.org/x/tools/internal/typesinternal"
-	"golang.org/x/tools/internal/versions"
 )
 
 // A LoadMode controls the amount of detail to return when loading.
@@ -44,20 +43,33 @@ import (
 // ID and Errors (if present) will always be filled.
 // [Load] may return more information than requested.
 //
+// The Mode flag is a union of several bits named NeedName,
+// NeedFiles, and so on, each of which determines whether
+// a given field of Package (Name, Files, etc) should be
+// populated.
+//
+// For convenience, we provide named constants for the most
+// common combinations of Need flags:
+//
+//	[LoadFiles]     lists of files in each package
+//	[LoadImports]   ... plus imports
+//	[LoadTypes]     ... plus type information
+//	[LoadSyntax]    ... plus type-annotated syntax
+//	[LoadAllSyntax] ... for all dependencies
+//
 // Unfortunately there are a number of open bugs related to
 // interactions among the LoadMode bits:
-// - https://github.com/golang/go/issues/48226
-// - https://github.com/golang/go/issues/56633
-// - https://github.com/golang/go/issues/56677
-// - https://github.com/golang/go/issues/58726
-// - https://github.com/golang/go/issues/63517
+//   - https://go.dev/issue/56633
+//   - https://go.dev/issue/56677
+//   - https://go.dev/issue/58726
+//   - https://go.dev/issue/63517
 type LoadMode int
 
 const (
 	// NeedName adds Name and PkgPath.
 	NeedName LoadMode = 1 << iota
 
-	// NeedFiles adds GoFiles and OtherFiles.
+	// NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
 	NeedFiles
 
 	// NeedCompiledGoFiles adds CompiledGoFiles.
@@ -76,10 +88,10 @@ const (
 	// NeedTypes adds Types, Fset, and IllTyped.
 	NeedTypes
 
-	// NeedSyntax adds Syntax.
+	// NeedSyntax adds Syntax and Fset.
 	NeedSyntax
 
-	// NeedTypesInfo adds TypesInfo.
+	// NeedTypesInfo adds TypesInfo and Fset.
 	NeedTypesInfo
 
 	// NeedTypesSizes adds TypesSizes.
@@ -88,9 +100,10 @@ const (
 	// needInternalDepsErrors adds the internal deps errors field for use by gopls.
 	needInternalDepsErrors
 
-	// needInternalForTest adds the internal forTest field.
+	// NeedForTest adds ForTest.
+	//
 	// Tests must also be set on the context for this field to be populated.
-	needInternalForTest
+	NeedForTest
 
 	// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
 	// Modifies CompiledGoFiles and Types, and has no effect on its own.
@@ -104,27 +117,27 @@ const (
 
 	// NeedEmbedPatterns adds EmbedPatterns.
 	NeedEmbedPatterns
+
+	// NeedTarget adds Target.
+	NeedTarget
+
+	// Be sure to update loadmode_string.go when adding new items!
 )
 
 const (
-	// Deprecated: LoadFiles exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
+	// LoadFiles loads the name and file names for the initial packages.
 	LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
 
-	// Deprecated: LoadImports exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
+	// LoadImports loads the name, file names, and import mapping for the initial packages.
 	LoadImports = LoadFiles | NeedImports
 
-	// Deprecated: LoadTypes exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
+	// LoadTypes loads exported type information for the initial packages.
 	LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
 
-	// Deprecated: LoadSyntax exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
+	// LoadSyntax loads typed syntax for the initial packages.
 	LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
 
-	// Deprecated: LoadAllSyntax exists for historical compatibility
-	// and should not be used. Please directly specify the needed fields using the Need values.
+	// LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
 	LoadAllSyntax = LoadSyntax | NeedDeps
 
 	// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
@@ -134,13 +147,7 @@ const (
 // A Config specifies details about how packages should be loaded.
 // The zero value is a valid configuration.
 //
-// Calls to Load do not modify this struct.
-//
-// TODO(adonovan): #67702: this is currently false: in fact,
-// calls to [Load] do not modify the public fields of this struct, but
-// may modify hidden fields, so concurrent calls to [Load] must not
-// use the same Config. But perhaps we should reestablish the
-// documented invariant.
+// Calls to [Load] do not modify this struct.
 type Config struct {
 	// Mode controls the level of information returned for each package.
 	Mode LoadMode
@@ -171,19 +178,10 @@ type Config struct {
 	//
 	Env []string
 
-	// gocmdRunner guards go command calls from concurrency errors.
-	gocmdRunner *gocommand.Runner
-
 	// BuildFlags is a list of command-line flags to be passed through to
 	// the build system's query tool.
 	BuildFlags []string
 
-	// modFile will be used for -modfile in go command invocations.
-	modFile string
-
-	// modFlag will be used for -modfile in go command invocations.
-	modFlag string
-
 	// Fset provides source position information for syntax trees and types.
 	// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
 	Fset *token.FileSet
@@ -230,21 +228,24 @@ type Config struct {
 	// drivers may vary in their level of support for overlays.
 	Overlay map[string][]byte
 
-	// goListOverlayFile is the JSON file that encodes the Overlay
-	// mapping, used by 'go list -overlay=...'
-	goListOverlayFile string
+	// -- Hidden configuration fields only for use in x/tools --
+
+	// modFile will be used for -modfile in go command invocations.
+	modFile string
+
+	// modFlag will be used for -modfile in go command invocations.
+	modFlag string
 }
 
 // Load loads and returns the Go packages named by the given patterns.
 //
-// Config specifies loading options;
-// nil behaves the same as an empty Config.
+// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
 //
 // The [Config.Mode] field is a set of bits that determine what kinds
 // of information should be computed and returned. Modes that require
 // more information tend to be slower. See [LoadMode] for details
 // and important caveats. Its zero value is equivalent to
-// NeedName | NeedFiles | NeedCompiledGoFiles.
+// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
 //
 // Each call to Load returns a new set of [Package] instances.
 // The Packages and their Imports form a directed acyclic graph.
@@ -261,7 +262,7 @@ type Config struct {
 // Errors associated with a particular package are recorded in the
 // corresponding Package's Errors list, and do not cause Load to
 // return an error. Clients may need to handle such errors before
-// proceeding with further analysis. The PrintErrors function is
+// proceeding with further analysis. The [PrintErrors] function is
 // provided for convenient display of all errors.
 func Load(cfg *Config, patterns ...string) ([]*Package, error) {
 	ld := newLoader(cfg)
@@ -324,21 +325,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro
 		} else if !response.NotHandled {
 			return response, true, nil
 		}
-		// (fall through)
+		// not handled: fall through
 	}
 
 	// go list fallback
-	//
+
 	// Write overlays once, as there are many calls
 	// to 'go list' (one per chunk plus others too).
-	overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
+	overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
 	if err != nil {
 		return nil, false, err
 	}
 	defer cleanupOverlay()
-	cfg.goListOverlayFile = overlay
 
-	response, err := callDriverOnChunks(goListDriver, cfg, chunks)
+	var runner gocommand.Runner // (shared across many 'go list' calls)
+	driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
+		return goListDriver(cfg, &runner, overlayFile, patterns)
+	}
+	response, err := callDriverOnChunks(driver, cfg, chunks)
 	if err != nil {
 		return nil, false, err
 	}
@@ -376,16 +380,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
 
 func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
 	if len(chunks) == 0 {
-		return driver(cfg)
+		return driver(cfg, nil)
 	}
 	responses := make([]*DriverResponse, len(chunks))
 	errNotHandled := errors.New("driver returned NotHandled")
 	var g errgroup.Group
 	for i, chunk := range chunks {
-		i := i
-		chunk := chunk
 		g.Go(func() (err error) {
-			responses[i], err = driver(cfg, chunk...)
+			responses[i], err = driver(cfg, chunk)
 			if responses[i] != nil && responses[i].NotHandled {
 				err = errNotHandled
 			}
@@ -435,6 +437,12 @@ type Package struct {
 	// PkgPath is the package path as used by the go/types package.
 	PkgPath string
 
+	// Dir is the directory associated with the package, if it exists.
+	//
+	// For packages listed by the go command, this is the directory containing
+	// the package files.
+	Dir string
+
 	// Errors contains any errors encountered querying the metadata
 	// of the package, or while parsing or type-checking its files.
 	Errors []Error
@@ -474,6 +482,10 @@ type Package struct {
 	// information for the package as provided by the build system.
 	ExportFile string
 
+	// Target is the absolute install path of the .a file, for libraries,
+	// and of the executable file, for binaries.
+	Target string
+
 	// Imports maps import paths appearing in the package's Go source files
 	// to corresponding loaded Packages.
 	Imports map[string]*Package
@@ -522,8 +534,8 @@ type Package struct {
 
 	// -- internal --
 
-	// forTest is the package under test, if any.
-	forTest string
+	// ForTest is the package under test, if any.
+	ForTest string
 
 	// depsErrors is the DepsErrors field from the go list response, if any.
 	depsErrors []*packagesinternal.PackageError
@@ -552,9 +564,6 @@ type ModuleError struct {
 }
 
 func init() {
-	packagesinternal.GetForTest = func(p interface{}) string {
-		return p.(*Package).forTest
-	}
 	packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
 		return p.(*Package).depsErrors
 	}
@@ -566,7 +575,6 @@ func init() {
 	}
 	packagesinternal.TypecheckCgo = int(typecheckCgo)
 	packagesinternal.DepsErrors = int(needInternalDepsErrors)
-	packagesinternal.ForTest = int(needInternalForTest)
 }
 
 // An Error describes a problem with a package's metadata, syntax, or types.
@@ -682,18 +690,19 @@ func (p *Package) String() string { return p.ID }
 // loaderPackage augments Package with state used during the loading phase
 type loaderPackage struct {
 	*Package
-	importErrors map[string]error // maps each bad import to its error
-	loadOnce     sync.Once
-	color        uint8 // for cycle detection
-	needsrc      bool  // load from source (Mode >= LoadTypes)
-	needtypes    bool  // type information is either requested or depended on
-	initial      bool  // package was matched by a pattern
-	goVersion    int   // minor version number of go command on PATH
+	importErrors    map[string]error // maps each bad import to its error
+	preds           []*loaderPackage // packages that import this one
+	unfinishedSuccs atomic.Int32     // number of direct imports not yet loaded
+	color           uint8            // for cycle detection
+	needsrc         bool             // load from source (Mode >= LoadTypes)
+	needtypes       bool             // type information is either requested or depended on
+	initial         bool             // package was matched by a pattern
+	goVersion       int              // minor version number of go command on PATH
 }
 
 // loader holds the working state of a single call to load.
 type loader struct {
-	pkgs map[string]*loaderPackage
+	pkgs map[string]*loaderPackage // keyed by Package.ID
 	Config
 	sizes        types.Sizes // non-nil if needed by mode
 	parseCache   map[string]*parseValue
@@ -739,9 +748,6 @@ func newLoader(cfg *Config) *loader {
 	if ld.Config.Env == nil {
 		ld.Config.Env = os.Environ()
 	}
-	if ld.Config.gocmdRunner == nil {
-		ld.Config.gocmdRunner = &gocommand.Runner{}
-	}
 	if ld.Context == nil {
 		ld.Context = context.Background()
 	}
@@ -755,7 +761,7 @@ func newLoader(cfg *Config) *loader {
 	ld.requestedMode = ld.Mode
 	ld.Mode = impliedLoadMode(ld.Mode)
 
-	if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
+	if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
 		if ld.Fset == nil {
 			ld.Fset = token.NewFileSet()
 		}
@@ -764,6 +770,7 @@ func newLoader(cfg *Config) *loader {
 		// because we load source if export data is missing.
 		if ld.ParseFile == nil {
 			ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+				// We implicitly promise to keep doing ast.Object resolution. :(
 				const mode = parser.AllErrors | parser.ParseComments
 				return parser.ParseFile(fset, filename, src, mode)
 			}
@@ -795,7 +802,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
 		// This package needs type information if the caller requested types and the package is
 		// either a root, or it's a non-root and the user requested dependencies ...
-		needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
+		needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
 		// This package needs source if the call requested source (or types info, which implies source)
 		// and the package is either a root, or itas a non- root and the user requested dependencies...
 		needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
@@ -820,9 +827,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		}
 	}
 
-	if ld.Mode&NeedImports != 0 {
-		// Materialize the import graph.
-
+	// Materialize the import graph if it is needed (NeedImports),
+	// or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
+	var leaves []*loaderPackage // packages with no unfinished successors
+	if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
 		const (
 			white = 0 // new
 			grey  = 1 // in progress
@@ -841,63 +849,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		// dependency on a package that does. These are the only packages
 		// for which we load source code.
 		var stack []*loaderPackage
-		var visit func(lpkg *loaderPackage) bool
-		visit = func(lpkg *loaderPackage) bool {
-			switch lpkg.color {
-			case black:
-				return lpkg.needsrc
-			case grey:
+		var visit func(from, lpkg *loaderPackage) bool
+		visit = func(from, lpkg *loaderPackage) bool {
+			if lpkg.color == grey {
 				panic("internal error: grey node")
 			}
-			lpkg.color = grey
-			stack = append(stack, lpkg) // push
-			stubs := lpkg.Imports       // the structure form has only stubs with the ID in the Imports
-			lpkg.Imports = make(map[string]*Package, len(stubs))
-			for importPath, ipkg := range stubs {
-				var importErr error
-				imp := ld.pkgs[ipkg.ID]
-				if imp == nil {
-					// (includes package "C" when DisableCgo)
-					importErr = fmt.Errorf("missing package: %q", ipkg.ID)
-				} else if imp.color == grey {
-					importErr = fmt.Errorf("import cycle: %s", stack)
-				}
-				if importErr != nil {
-					if lpkg.importErrors == nil {
-						lpkg.importErrors = make(map[string]error)
+			if lpkg.color == white {
+				lpkg.color = grey
+				stack = append(stack, lpkg) // push
+				stubs := lpkg.Imports       // the structure form has only stubs with the ID in the Imports
+				lpkg.Imports = make(map[string]*Package, len(stubs))
+				for importPath, ipkg := range stubs {
+					var importErr error
+					imp := ld.pkgs[ipkg.ID]
+					if imp == nil {
+						// (includes package "C" when DisableCgo)
+						importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+					} else if imp.color == grey {
+						importErr = fmt.Errorf("import cycle: %s", stack)
 					}
-					lpkg.importErrors[importPath] = importErr
-					continue
+					if importErr != nil {
+						if lpkg.importErrors == nil {
+							lpkg.importErrors = make(map[string]error)
+						}
+						lpkg.importErrors[importPath] = importErr
+						continue
+					}
+
+					if visit(lpkg, imp) {
+						lpkg.needsrc = true
+					}
+					lpkg.Imports[importPath] = imp.Package
 				}
 
-				if visit(imp) {
-					lpkg.needsrc = true
+				// -- postorder --
+
+				// Complete type information is required for the
+				// immediate dependencies of each source package.
+				if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
+					for _, ipkg := range lpkg.Imports {
+						ld.pkgs[ipkg.ID].needtypes = true
+					}
 				}
-				lpkg.Imports[importPath] = imp.Package
+
+				// NeedTypeSizes causes TypeSizes to be set even
+				// on packages for which types aren't needed.
+				if ld.Mode&NeedTypesSizes != 0 {
+					lpkg.TypesSizes = ld.sizes
+				}
+
+				// Add packages with no imports directly to the queue of leaves.
+				if len(lpkg.Imports) == 0 {
+					leaves = append(leaves, lpkg)
+				}
+
+				stack = stack[:len(stack)-1] // pop
+				lpkg.color = black
 			}
 
-			// Complete type information is required for the
-			// immediate dependencies of each source package.
-			if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
-				for _, ipkg := range lpkg.Imports {
-					ld.pkgs[ipkg.ID].needtypes = true
-				}
+			// Add edge from predecessor.
+			if from != nil {
+				from.unfinishedSuccs.Add(+1) // incref
+				lpkg.preds = append(lpkg.preds, from)
 			}
 
-			// NeedTypeSizes causes TypeSizes to be set even
-			// on packages for which types aren't needed.
-			if ld.Mode&NeedTypesSizes != 0 {
-				lpkg.TypesSizes = ld.sizes
-			}
-			stack = stack[:len(stack)-1] // pop
-			lpkg.color = black
-
 			return lpkg.needsrc
 		}
 
 		// For each initial package, create its import DAG.
 		for _, lpkg := range initial {
-			visit(lpkg)
+			visit(nil, lpkg)
 		}
 
 	} else {
@@ -910,16 +931,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 
 	// Load type data and syntax if needed, starting at
 	// the initial packages (roots of the import DAG).
-	if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
-		var wg sync.WaitGroup
-		for _, lpkg := range initial {
-			wg.Add(1)
-			go func(lpkg *loaderPackage) {
-				ld.loadRecursive(lpkg)
-				wg.Done()
-			}(lpkg)
+	if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+
+		// We avoid using g.SetLimit to limit concurrency as
+		// it makes g.Go stop accepting work, which prevents
+		// workers from enqeuing, and thus finishing, and thus
+		// allowing the group to make progress: deadlock.
+		//
+		// Instead we use the ioLimit and cpuLimit semaphores.
+		g, _ := errgroup.WithContext(ld.Context)
+
+		// enqueues adds a package to the type-checking queue.
+		// It must have no unfinished successors.
+		var enqueue func(*loaderPackage)
+		enqueue = func(lpkg *loaderPackage) {
+			g.Go(func() error {
+				// Parse and type-check.
+				ld.loadPackage(lpkg)
+
+				// Notify each waiting predecessor,
+				// and enqueue it when it becomes a leaf.
+				for _, pred := range lpkg.preds {
+					if pred.unfinishedSuccs.Add(-1) == 0 { // decref
+						enqueue(pred)
+					}
+				}
+
+				return nil
+			})
+		}
+
+		// Load leaves first, adding new packages
+		// to the queue as they become leaves.
+		for _, leaf := range leaves {
+			enqueue(leaf)
+		}
+
+		if err := g.Wait(); err != nil {
+			return nil, err // cancelled
 		}
-		wg.Wait()
 	}
 
 	// If the context is done, return its error and
@@ -961,12 +1011,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 		}
 		if ld.requestedMode&NeedTypes == 0 {
 			ld.pkgs[i].Types = nil
-			ld.pkgs[i].Fset = nil
 			ld.pkgs[i].IllTyped = false
 		}
 		if ld.requestedMode&NeedSyntax == 0 {
 			ld.pkgs[i].Syntax = nil
 		}
+		if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
+			ld.pkgs[i].Fset = nil
+		}
 		if ld.requestedMode&NeedTypesInfo == 0 {
 			ld.pkgs[i].TypesInfo = nil
 		}
@@ -981,31 +1033,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
 	return result, nil
 }
 
-// loadRecursive loads the specified package and its dependencies,
-// recursively, in parallel, in topological order.
-// It is atomic and idempotent.
-// Precondition: ld.Mode&NeedTypes.
-func (ld *loader) loadRecursive(lpkg *loaderPackage) {
-	lpkg.loadOnce.Do(func() {
-		// Load the direct dependencies, in parallel.
-		var wg sync.WaitGroup
-		for _, ipkg := range lpkg.Imports {
-			imp := ld.pkgs[ipkg.ID]
-			wg.Add(1)
-			go func(imp *loaderPackage) {
-				ld.loadRecursive(imp)
-				wg.Done()
-			}(imp)
-		}
-		wg.Wait()
-		ld.loadPackage(lpkg)
-	})
-}
-
-// loadPackage loads the specified package.
+// loadPackage loads/parses/typechecks the specified package.
 // It must be called only once per Package,
 // after immediate dependencies are loaded.
-// Precondition: ld.Mode & NeedTypes.
+// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
 func (ld *loader) loadPackage(lpkg *loaderPackage) {
 	if lpkg.PkgPath == "unsafe" {
 		// Fill in the blanks to avoid surprises.
@@ -1041,6 +1072,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 	if !lpkg.needtypes && !lpkg.needsrc {
 		return
 	}
+
+	// TODO(adonovan): this condition looks wrong:
+	// I think it should be lpkg.needtypes && !lpg.needsrc,
+	// so that NeedSyntax without NeedTypes can be satisfied by export data.
 	if !lpkg.needsrc {
 		if err := ld.loadFromExportData(lpkg); err != nil {
 			lpkg.Errors = append(lpkg.Errors, Error{
@@ -1146,7 +1181,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 	}
 
 	lpkg.Syntax = files
-	if ld.Config.Mode&NeedTypes == 0 {
+	if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
 		return
 	}
 
@@ -1157,16 +1192,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 		return
 	}
 
-	lpkg.TypesInfo = &types.Info{
-		Types:      make(map[ast.Expr]types.TypeAndValue),
-		Defs:       make(map[*ast.Ident]types.Object),
-		Uses:       make(map[*ast.Ident]types.Object),
-		Implicits:  make(map[ast.Node]types.Object),
-		Instances:  make(map[*ast.Ident]types.Instance),
-		Scopes:     make(map[ast.Node]*types.Scope),
-		Selections: make(map[*ast.SelectorExpr]*types.Selection),
+	// Populate TypesInfo only if needed, as it
+	// causes the type checker to work much harder.
+	if ld.Config.Mode&NeedTypesInfo != 0 {
+		lpkg.TypesInfo = &types.Info{
+			Types:        make(map[ast.Expr]types.TypeAndValue),
+			Defs:         make(map[*ast.Ident]types.Object),
+			Uses:         make(map[*ast.Ident]types.Object),
+			Implicits:    make(map[ast.Node]types.Object),
+			Instances:    make(map[*ast.Ident]types.Instance),
+			Scopes:       make(map[ast.Node]*types.Scope),
+			Selections:   make(map[*ast.SelectorExpr]*types.Selection),
+			FileVersions: make(map[*ast.File]string),
+		}
 	}
-	versions.InitFileVersions(lpkg.TypesInfo)
 	lpkg.TypesSizes = ld.sizes
 
 	importer := importerFunc(func(path string) (*types.Package, error) {
@@ -1219,6 +1258,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
 		}
 	}
 
+	// Type-checking is CPU intensive.
+	cpuLimit <- unit{}            // acquire a token
+	defer func() { <-cpuLimit }() // release a token
+
 	typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
 	lpkg.importErrors = nil // no longer needed
 
@@ -1283,8 +1326,11 @@ type importerFunc func(path string) (*types.Package, error)
 func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
 
 // We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 20)
+// the number of parallel I/O calls or CPU threads per process.
+var (
+	ioLimit  = make(chan unit, 20)
+	cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
+)
 
 func (ld *loader) parseFile(filename string) (*ast.File, error) {
 	ld.parseCacheMu.Lock()
@@ -1301,20 +1347,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
 
 		var src []byte
 		for f, contents := range ld.Config.Overlay {
+			// TODO(adonovan): Inefficient for large overlays.
+			// Do an exact name-based map lookup
+			// (for nonexistent files) followed by a
+			// FileID-based map lookup (for existing ones).
 			if sameFile(f, filename) {
 				src = contents
+				break
 			}
 		}
 		var err error
 		if src == nil {
-			ioLimit <- true // wait
+			ioLimit <- unit{} // acquire a token
 			src, err = os.ReadFile(filename)
-			<-ioLimit // signal
+			<-ioLimit // release a token
 		}
 		if err != nil {
 			v.err = err
 		} else {
+			// Parsing is CPU intensive.
+			cpuLimit <- unit{} // acquire a token
 			v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
+			<-cpuLimit // release a token
 		}
 
 		close(v.ready)
@@ -1329,18 +1383,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
 // Because files are scanned in parallel, the token.Pos
 // positions of the resulting ast.Files are not ordered.
 func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
-	var wg sync.WaitGroup
-	n := len(filenames)
-	parsed := make([]*ast.File, n)
-	errors := make([]error, n)
-	for i, file := range filenames {
-		wg.Add(1)
-		go func(i int, filename string) {
+	var (
+		n      = len(filenames)
+		parsed = make([]*ast.File, n)
+		errors = make([]error, n)
+	)
+	var g errgroup.Group
+	for i, filename := range filenames {
+		// This creates goroutines unnecessarily in the
+		// cache-hit case, but that case is uncommon.
+		g.Go(func() error {
 			parsed[i], errors[i] = ld.parseFile(filename)
-			wg.Done()
-		}(i, file)
+			return nil
+		})
 	}
-	wg.Wait()
+	g.Wait()
 
 	// Eliminate nils, preserving order.
 	var o int
@@ -1499,6 +1556,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
 		// All these things require knowing the import graph.
 		loadMode |= NeedImports
 	}
+	if loadMode&NeedTypes != 0 {
+		// Types require the GoVersion from Module.
+		loadMode |= NeedModule
+	}
 
 	return loadMode
 }
@@ -1507,4 +1568,4 @@ func usesExportData(cfg *Config) bool {
 	return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
 }
 
-var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
+type unit struct{}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
index a1dcc40b..df14ffd9 100644
--- a/vendor/golang.org/x/tools/go/packages/visit.go
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
 // PrintErrors returns the number of errors printed.
 func PrintErrors(pkgs []*Package) int {
 	var n int
+	errModules := make(map[*Module]bool)
 	Visit(pkgs, nil, func(pkg *Package) {
 		for _, err := range pkg.Errors {
 			fmt.Fprintln(os.Stderr, err)
 			n++
 		}
+
+		// Print pkg.Module.Error once if present.
+		mod := pkg.Module
+		if mod != nil && mod.Error != nil && !errModules[mod] {
+			errModules[mod] = true
+			fmt.Fprintln(os.Stderr, mod.Error.Err)
+			n++
+		}
 	})
 	return n
 }
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index a2386c34..16ed3c17 100644
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -51,7 +51,7 @@ type Path string
 //
 //	PO package->object	Package.Scope.Lookup
 //	OT  object->type 	Object.Type
-//	TT    type->type 	Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
+//	TT    type->type 	Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
 //	TO   type->object	Type.{At,Field,Method,Obj} [AFMO]
 //
 // All valid paths start with a package and end at an object
@@ -63,8 +63,8 @@ type Path string
 //   - The only PO operator is Package.Scope.Lookup, which requires an identifier.
 //   - The only OT operator is Object.Type,
 //     which we encode as '.' because dot cannot appear in an identifier.
-//   - The TT operators are encoded as [EKPRUTC];
-//     one of these (TypeParam) requires an integer operand,
+//   - The TT operators are encoded as [EKPRUTrCa];
+//     two of these ({,Recv}TypeParams) require an integer operand,
 //     which is encoded as a string of decimal digits.
 //   - The TO operators are encoded as [AFMO];
 //     three of these (At,Field,Method) require an integer operand,
@@ -98,19 +98,21 @@ const (
 	opType = '.' // .Type()		  (Object)
 
 	// type->type operators
-	opElem       = 'E' // .Elem()		        (Pointer, Slice, Array, Chan, Map)
-	opKey        = 'K' // .Key()		        (Map)
-	opParams     = 'P' // .Params()		      (Signature)
-	opResults    = 'R' // .Results()	      (Signature)
-	opUnderlying = 'U' // .Underlying()	    (Named)
-	opTypeParam  = 'T' // .TypeParams.At(i) (Named, Signature)
-	opConstraint = 'C' // .Constraint()     (TypeParam)
+	opElem          = 'E' // .Elem()		(Pointer, Slice, Array, Chan, Map)
+	opKey           = 'K' // .Key()			(Map)
+	opParams        = 'P' // .Params()		(Signature)
+	opResults       = 'R' // .Results()		(Signature)
+	opUnderlying    = 'U' // .Underlying()		(Named)
+	opTypeParam     = 'T' // .TypeParams.At(i)	(Named, Signature)
+	opRecvTypeParam = 'r' // .RecvTypeParams.At(i)	(Signature)
+	opConstraint    = 'C' // .Constraint()		(TypeParam)
+	opRhs           = 'a' // .Rhs()			(Alias)
 
 	// type->object operators
-	opAt     = 'A' // .At(i)		 (Tuple)
-	opField  = 'F' // .Field(i)	 (Struct)
-	opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
-	opObj    = 'O' // .Obj()		 (Named, TypeParam)
+	opAt     = 'A' // .At(i)	(Tuple)
+	opField  = 'F' // .Field(i)	(Struct)
+	opMethod = 'M' // .Method(i)	(Named or Interface; not Struct: "promoted" names are ignored)
+	opObj    = 'O' // .Obj()	(Named, TypeParam)
 )
 
 // For is equivalent to new(Encoder).For(obj).
@@ -226,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 	//    Reject obviously non-viable cases.
 	switch obj := obj.(type) {
 	case *types.TypeName:
-		if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok {
+		if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
 			// With the exception of type parameters, only package-level type names
 			// have a path.
 			return "", fmt.Errorf("no path for %v", obj)
@@ -278,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 		path = append(path, opType)
 
 		T := o.Type()
-
-		if tname.IsAlias() {
-			// type alias
-			if r := find(obj, T, path, nil); r != nil {
+		if alias, ok := T.(*types.Alias); ok {
+			if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
 				return Path(r), nil
 			}
-		} else {
-			if named, _ := T.(*types.Named); named != nil {
-				if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil {
-					// generic named type
-					return Path(r), nil
-				}
+			if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
+				return Path(r), nil
 			}
+
+		} else if tname.IsAlias() {
+			// legacy alias
+			if r := find(obj, T, path); r != nil {
+				return Path(r), nil
+			}
+
+		} else if named, ok := T.(*types.Named); ok {
 			// defined (named) type
-			if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
+			if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
+				return Path(r), nil
+			}
+			if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
 				return Path(r), nil
 			}
 		}
@@ -305,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 		if _, ok := o.(*types.TypeName); !ok {
 			if o.Exported() {
 				// exported non-type (const, var, func)
-				if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
+				if r := find(obj, o.Type(), append(path, opType)); r != nil {
 					return Path(r), nil
 				}
 			}
@@ -313,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 		}
 
 		// Inspect declared methods of defined types.
-		if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok {
+		if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
 			path = append(path, opType)
 			// The method index here is always with respect
 			// to the underlying go/types data structures,
@@ -325,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
 				if m == obj {
 					return Path(path2), nil // found declared method
 				}
-				if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
+				if r := find(obj, m.Type(), append(path2, opType)); r != nil {
 					return Path(r), nil
 				}
 			}
@@ -440,43 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
 //
 // The seen map is used to short circuit cycles through type parameters. If
 // nil, it will be allocated as necessary.
-func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
+//
+// The seenMethods map is used internally to short circuit cycles through
+// interface methods, such as occur in the following example:
+//
+//	type I interface { f() interface{I} }
+//
+// See golang/go#68046 for details.
+func find(obj types.Object, T types.Type, path []byte) []byte {
+	return (&finder{obj: obj}).find(T, path)
+}
+
+// finder closes over search state for a call to find.
+type finder struct {
+	obj             types.Object             // the sought object
+	seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
+	seenMethods     map[*types.Func]bool     // for cycle breaking through recursive interfaces
+}
+
+func (f *finder) find(T types.Type, path []byte) []byte {
 	switch T := T.(type) {
-	case *aliases.Alias:
-		return find(obj, aliases.Unalias(T), path, seen)
+	case *types.Alias:
+		return f.find(types.Unalias(T), path)
 	case *types.Basic, *types.Named:
 		// Named types belonging to pkg were handled already,
 		// so T must belong to another package. No path.
 		return nil
 	case *types.Pointer:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Slice:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Array:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Chan:
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Map:
-		if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
+		if r := f.find(T.Key(), append(path, opKey)); r != nil {
 			return r
 		}
-		return find(obj, T.Elem(), append(path, opElem), seen)
+		return f.find(T.Elem(), append(path, opElem))
 	case *types.Signature:
-		if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil {
+		if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
 			return r
 		}
-		if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
+		if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
 			return r
 		}
-		return find(obj, T.Results(), append(path, opResults), seen)
+		if r := f.find(T.Params(), append(path, opParams)); r != nil {
+			return r
+		}
+		return f.find(T.Results(), append(path, opResults))
 	case *types.Struct:
 		for i := 0; i < T.NumFields(); i++ {
 			fld := T.Field(i)
 			path2 := appendOpArg(path, opField, i)
-			if fld == obj {
+			if fld == f.obj {
 				return path2 // found field var
 			}
-			if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
+			if r := f.find(fld.Type(), append(path2, opType)); r != nil {
 				return r
 			}
 		}
@@ -485,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
 		for i := 0; i < T.Len(); i++ {
 			v := T.At(i)
 			path2 := appendOpArg(path, opAt, i)
-			if v == obj {
+			if v == f.obj {
 				return path2 // found param/result var
 			}
-			if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
+			if r := f.find(v.Type(), append(path2, opType)); r != nil {
 				return r
 			}
 		}
@@ -496,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
 	case *types.Interface:
 		for i := 0; i < T.NumMethods(); i++ {
 			m := T.Method(i)
+			if f.seenMethods[m] {
+				return nil
+			}
 			path2 := appendOpArg(path, opMethod, i)
-			if m == obj {
+			if m == f.obj {
 				return path2 // found interface method
 			}
-			if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
+			if f.seenMethods == nil {
+				f.seenMethods = make(map[*types.Func]bool)
+			}
+			f.seenMethods[m] = true
+			if r := f.find(m.Type(), append(path2, opType)); r != nil {
 				return r
 			}
 		}
 		return nil
 	case *types.TypeParam:
 		name := T.Obj()
-		if name == obj {
-			return append(path, opObj)
-		}
-		if seen[name] {
+		if f.seenTParamNames[name] {
 			return nil
 		}
-		if seen == nil {
-			seen = make(map[*types.TypeName]bool)
+		if name == f.obj {
+			return append(path, opObj)
 		}
-		seen[name] = true
-		if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
+		if f.seenTParamNames == nil {
+			f.seenTParamNames = make(map[*types.TypeName]bool)
+		}
+		f.seenTParamNames[name] = true
+		if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
 			return r
 		}
 		return nil
@@ -525,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
 	panic(T)
 }
 
-func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
+func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
+	return (&finder{obj: obj}).findTypeParam(list, path, op)
+}
+
+func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
 	for i := 0; i < list.Len(); i++ {
 		tparam := list.At(i)
-		path2 := appendOpArg(path, opTypeParam, i)
-		if r := find(obj, tparam, path2, seen); r != nil {
+		path2 := appendOpArg(path, op, i)
+		if r := f.find(tparam, path2); r != nil {
 			return r
 		}
 	}
@@ -580,10 +619,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
 		code := suffix[0]
 		suffix = suffix[1:]
 
-		// Codes [AFM] have an integer operand.
+		// Codes [AFMTr] have an integer operand.
 		var index int
 		switch code {
-		case opAt, opField, opMethod, opTypeParam:
+		case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
 			rest := strings.TrimLeft(suffix, "0123456789")
 			numerals := suffix[:len(suffix)-len(rest)]
 			suffix = rest
@@ -616,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
 
 		// Inv: t != nil, obj == nil
 
-		t = aliases.Unalias(t)
+		t = types.Unalias(t)
 		switch code {
 		case opElem:
 			hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
@@ -653,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
 			}
 			t = named.Underlying()
 
+		case opRhs:
+			if alias, ok := t.(*types.Alias); ok {
+				t = aliases.Rhs(alias)
+			} else if false && aliases.Enabled() {
+				// The Enabled check is too expensive, so for now we
+				// simply assume that aliases are not enabled.
+				// TODO(adonovan): replace with "if true {" when go1.24 is assured.
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
+			}
+
 		case opTypeParam:
 			hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
 			if !ok {
@@ -664,6 +713,17 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
 			}
 			t = tparams.At(index)
 
+		case opRecvTypeParam:
+			sig, ok := t.(*types.Signature) // Signature
+			if !ok {
+				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+			}
+			rtparams := sig.RecvTypeParams()
+			if n := rtparams.Len(); index >= n {
+				return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+			}
+			t = rtparams.At(index)
+
 		case opConstraint:
 			tparam, ok := t.(*types.TypeParam)
 			if !ok {
@@ -725,6 +785,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
 		}
 	}
 
+	if obj == nil {
+		panic(p) // path does not end in an object-valued operator
+	}
+
 	if obj.Pkg() != pkg {
 		return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
 	}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
new file mode 100644
index 00000000..75438035
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+	"go/ast"
+	"go/types"
+
+	"golang.org/x/tools/internal/typeparams"
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+//
+// Functions and methods may potentially have type parameters.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+	fun := ast.Unparen(call.Fun)
+
+	// Look through type instantiation if necessary.
+	isInstance := false
+	switch fun.(type) {
+	case *ast.IndexExpr, *ast.IndexListExpr:
+		// When extracting the callee from an *IndexExpr, we need to check that
+		// it is a *types.Func and not a *types.Var.
+		// Example: Don't match a slice m within the expression `m[0]()`.
+		isInstance = true
+		fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
+	}
+
+	var obj types.Object
+	switch fun := fun.(type) {
+	case *ast.Ident:
+		obj = info.Uses[fun] // type, var, builtin, or declared func
+	case *ast.SelectorExpr:
+		if sel, ok := info.Selections[fun]; ok {
+			obj = sel.Obj() // method or field
+		} else {
+			obj = info.Uses[fun.Sel] // qualified identifier?
+		}
+	}
+	if _, ok := obj.(*types.TypeName); ok {
+		return nil // T(x) is a conversion, not a call
+	}
+	// A Func is required to match instantiations.
+	if _, ok := obj.(*types.Func); isInstance && !ok {
+		return nil // Was not a Func.
+	}
+	return obj
+}
+
+// StaticCallee returns the target (function or method) of a static function
+// call, if any. It returns nil for calls to builtins.
+//
+// Note: for calls of instantiated functions and methods, StaticCallee returns
+// the corresponding generic function or method on the generic type.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+	if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
+		return f
+	}
+	return nil
+}
+
+func interfaceMethod(f *types.Func) bool {
+	recv := f.Type().(*types.Signature).Recv()
+	return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
new file mode 100644
index 00000000..b81ce0c3
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
@@ -0,0 +1,30 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+	var result []*types.Package
+	seen := make(map[*types.Package]bool)
+	var visit func(pkgs []*types.Package)
+	visit = func(pkgs []*types.Package) {
+		for _, p := range pkgs {
+			if !seen[p] {
+				seen[p] = true
+				visit(p.Imports())
+				result = append(result, p)
+			}
+		}
+	}
+	visit(pkgs)
+	return result
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
new file mode 100644
index 00000000..43261147
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -0,0 +1,470 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as [Map],
+// a hash table that maps [types.Type] to any value.
+package typeutil
+
+import (
+	"bytes"
+	"fmt"
+	"go/types"
+	"hash/maphash"
+	"unsafe"
+
+	"golang.org/x/tools/internal/typeparams"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary values.  The concrete types that implement
+// the Type interface are pointers.  Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Read-only map operations ([Map.At], [Map.Len], and so on) may
+// safely be called concurrently.
+//
+// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
+// and 69559, if the latter proposals for a generic hash-map type and
+// a types.Hash function are accepted.
+type Map struct {
+	table  map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+	length int                // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+	key   types.Type
+	value any
+}
+
+// SetHasher has no effect.
+//
+// It is a relic of an optimization that is no longer profitable. Do
+// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
+func (m *Map) SetHasher(Hasher) {}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+func (m *Map) Delete(key types.Type) bool {
+	if m != nil && m.table != nil {
+		hash := hash(key)
+		bucket := m.table[hash]
+		for i, e := range bucket {
+			if e.key != nil && types.Identical(key, e.key) {
+				// We can't compact the bucket as it
+				// would disturb iterators.
+				bucket[i] = entry{}
+				m.length--
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+func (m *Map) At(key types.Type) any {
+	if m != nil && m.table != nil {
+		for _, e := range m.table[hash(key)] {
+			if e.key != nil && types.Identical(key, e.key) {
+				return e.value
+			}
+		}
+	}
+	return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value any) (prev any) {
+	if m.table != nil {
+		hash := hash(key)
+		bucket := m.table[hash]
+		var hole *entry
+		for i, e := range bucket {
+			if e.key == nil {
+				hole = &bucket[i]
+			} else if types.Identical(key, e.key) {
+				prev = e.value
+				bucket[i].value = value
+				return
+			}
+		}
+
+		if hole != nil {
+			*hole = entry{key, value} // overwrite deleted entry
+		} else {
+			m.table[hash] = append(bucket, entry{key, value})
+		}
+	} else {
+		hash := hash(key)
+		m.table = map[uint32][]entry{hash: {entry{key, value}}}
+	}
+
+	m.length++
+	return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+	if m != nil {
+		return m.length
+	}
+	return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+func (m *Map) Iterate(f func(key types.Type, value any)) {
+	if m != nil {
+		for _, bucket := range m.table {
+			for _, e := range bucket {
+				if e.key != nil {
+					f(e.key, e.value)
+				}
+			}
+		}
+	}
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+	keys := make([]types.Type, 0, m.Len())
+	m.Iterate(func(key types.Type, _ any) {
+		keys = append(keys, key)
+	})
+	return keys
+}
+
+func (m *Map) toString(values bool) string {
+	if m == nil {
+		return "{}"
+	}
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "{")
+	sep := ""
+	m.Iterate(func(key types.Type, value any) {
+		fmt.Fprint(&buf, sep)
+		sep = ", "
+		fmt.Fprint(&buf, key)
+		if values {
+			fmt.Fprintf(&buf, ": %q", value)
+		}
+	})
+	fmt.Fprint(&buf, "}")
+	return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+func (m *Map) String() string {
+	return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+func (m *Map) KeysString() string {
+	return m.toString(false)
+}
+
+// -- Hasher --
+
+// hash returns the hash of type t.
+// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
+func hash(t types.Type) uint32 {
+	return theHasher.Hash(t)
+}
+
+// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
+// Hashers are stateless, and all are equivalent.
+type Hasher struct{}
+
+var theHasher Hasher
+
+// MakeHasher returns Hasher{}.
+// Hashers are stateless; all are equivalent.
+func MakeHasher() Hasher { return theHasher }
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+	return hasher{inGenericSig: false}.hash(t)
+}
+
+// hasher holds the state of a single Hash traversal: whether we are
+// inside the signature of a generic function; this is used to
+// optimize [hasher.hashTypeParam].
+type hasher struct{ inGenericSig bool }
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+	var h uint32
+	for i := 0; i < len(s); i++ {
+		h ^= uint32(s[i])
+		h *= 16777619
+	}
+	return h
+}
+
+// hash computes the hash of t.
+func (h hasher) hash(t types.Type) uint32 {
+	// See Identical for rationale.
+	switch t := t.(type) {
+	case *types.Basic:
+		return uint32(t.Kind())
+
+	case *types.Alias:
+		return h.hash(types.Unalias(t))
+
+	case *types.Array:
+		return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
+
+	case *types.Slice:
+		return 9049 + 2*h.hash(t.Elem())
+
+	case *types.Struct:
+		var hash uint32 = 9059
+		for i, n := 0, t.NumFields(); i < n; i++ {
+			f := t.Field(i)
+			if f.Anonymous() {
+				hash += 8861
+			}
+			hash += hashString(t.Tag(i))
+			hash += hashString(f.Name()) // (ignore f.Pkg)
+			hash += h.hash(f.Type())
+		}
+		return hash
+
+	case *types.Pointer:
+		return 9067 + 2*h.hash(t.Elem())
+
+	case *types.Signature:
+		var hash uint32 = 9091
+		if t.Variadic() {
+			hash *= 8863
+		}
+
+		tparams := t.TypeParams()
+		if n := tparams.Len(); n > 0 {
+			h.inGenericSig = true // affects constraints, params, and results
+
+			for i := range n {
+				tparam := tparams.At(i)
+				hash += 7 * h.hash(tparam.Constraint())
+			}
+		}
+
+		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+	case *types.Union:
+		return h.hashUnion(t)
+
+	case *types.Interface:
+		// Interfaces are identical if they have the same set of methods, with
+		// identical names and types, and they have the same set of type
+		// restrictions. See go/types.identical for more details.
+		var hash uint32 = 9103
+
+		// Hash methods.
+		for i, n := 0, t.NumMethods(); i < n; i++ {
+			// Method order is not significant.
+			// Ignore m.Pkg().
+			m := t.Method(i)
+			// Use shallow hash on method signature to
+			// avoid anonymous interface cycles.
+			hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
+		}
+
+		// Hash type restrictions.
+		terms, err := typeparams.InterfaceTermSet(t)
+		// if err != nil t has invalid type restrictions.
+		if err == nil {
+			hash += h.hashTermSet(terms)
+		}
+
+		return hash
+
+	case *types.Map:
+		return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
+
+	case *types.Chan:
+		return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
+
+	case *types.Named:
+		hash := h.hashTypeName(t.Obj())
+		targs := t.TypeArgs()
+		for i := 0; i < targs.Len(); i++ {
+			targ := targs.At(i)
+			hash += 2 * h.hash(targ)
+		}
+		return hash
+
+	case *types.TypeParam:
+		return h.hashTypeParam(t)
+
+	case *types.Tuple:
+		return h.hashTuple(t)
+	}
+
+	panic(fmt.Sprintf("%T: %v", t, t))
+}
+
+func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
+	// See go/types.identicalTypes for rationale.
+	n := tuple.Len()
+	hash := 9137 + 2*uint32(n)
+	for i := range n {
+		hash += 3 * h.hash(tuple.At(i).Type())
+	}
+	return hash
+}
+
+func (h hasher) hashUnion(t *types.Union) uint32 {
+	// Hash type restrictions.
+	terms, err := typeparams.UnionTermSet(t)
+	// if err != nil t has invalid type restrictions. Fall back on a non-zero
+	// hash.
+	if err != nil {
+		return 9151
+	}
+	return h.hashTermSet(terms)
+}
+
+func (h hasher) hashTermSet(terms []*types.Term) uint32 {
+	hash := 9157 + 2*uint32(len(terms))
+	for _, term := range terms {
+		// term order is not significant.
+		termHash := h.hash(term.Type())
+		if term.Tilde() {
+			termHash *= 9161
+		}
+		hash += 3 * termHash
+	}
+	return hash
+}
+
+// hashTypeParam returns the hash of a type parameter.
+func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
+	// Within the signature of a generic function, TypeParams are
+	// identical if they have the same index and constraint, so we
+	// hash them based on index.
+	//
+	// When we are outside a generic function, free TypeParams are
+	// identical iff they are the same object, so we can use a
+	// more discriminating hash consistent with object identity.
+	// This optimization saves [Map] about 4% when hashing all the
+	// types.Info.Types in the forward closure of net/http.
+	if !h.inGenericSig {
+		// Optimization: outside a generic function signature,
+		// use a more discrimating hash consistent with object identity.
+		return h.hashTypeName(t.Obj())
+	}
+	return 9173 + 3*uint32(t.Index())
+}
+
+var theSeed = maphash.MakeSeed()
+
+// hashTypeName hashes the pointer of tname.
+func (hasher) hashTypeName(tname *types.TypeName) uint32 {
+	// Since types.Identical uses == to compare TypeNames,
+	// the Hash function uses maphash.Comparable.
+	// TODO(adonovan): or will, when it becomes available in go1.24.
+	// In the meantime we use the pointer's numeric value.
+	//
+	//   hash := maphash.Comparable(theSeed, tname)
+	//
+	// (Another approach would be to hash the name and package
+	// path, and whether or not it is a package-level typename. It
+	// is rare for a package to define multiple local types with
+	// the same name.)
+	hash := uintptr(unsafe.Pointer(tname))
+	return uint32(hash ^ (hash >> 32))
+}
+
+// shallowHash computes a hash of t without looking at any of its
+// element Types, to avoid potential anonymous cycles in the types of
+// interface methods.
+//
+// When an unnamed non-empty interface type appears anywhere among the
+// arguments or results of an interface method, there is a potential
+// for endless recursion. Consider:
+//
+//	type X interface { m() []*interface { X } }
+//
+// The problem is that the Methods of the interface in m's result type
+// include m itself; there is no mention of the named type X that
+// might help us break the cycle.
+// (See comment in go/types.identical, case *Interface, for more.)
+func (h hasher) shallowHash(t types.Type) uint32 {
+	// t is the type of an interface method (Signature),
+	// its params or results (Tuples), or their immediate
+	// elements (mostly Slice, Pointer, Basic, Named),
+	// so there's no need to optimize anything else.
+	switch t := t.(type) {
+	case *types.Alias:
+		return h.shallowHash(types.Unalias(t))
+
+	case *types.Signature:
+		var hash uint32 = 604171
+		if t.Variadic() {
+			hash *= 971767
+		}
+		// The Signature/Tuple recursion is always finite
+		// and invariably shallow.
+		return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
+
+	case *types.Tuple:
+		n := t.Len()
+		hash := 9137 + 2*uint32(n)
+		for i := range n {
+			hash += 53471161 * h.shallowHash(t.At(i).Type())
+		}
+		return hash
+
+	case *types.Basic:
+		return 45212177 * uint32(t.Kind())
+
+	case *types.Array:
+		return 1524181 + 2*uint32(t.Len())
+
+	case *types.Slice:
+		return 2690201
+
+	case *types.Struct:
+		return 3326489
+
+	case *types.Pointer:
+		return 4393139
+
+	case *types.Union:
+		return 562448657
+
+	case *types.Interface:
+		return 2124679 // no recursion here
+
+	case *types.Map:
+		return 9109
+
+	case *types.Chan:
+		return 9127
+
+	case *types.Named:
+		return h.hashTypeName(t.Obj())
+
+	case *types.TypeParam:
+		return h.hashTypeParam(t)
+	}
+	panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 00000000..f7666028
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+	"go/types"
+	"sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+	mu     sync.Mutex
+	named  map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+	others map[types.Type]*types.MethodSet                            // all other types
+}
+
+// MethodSet returns the method set of type T.  It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+	if cache == nil {
+		return types.NewMethodSet(T)
+	}
+	cache.mu.Lock()
+	defer cache.mu.Unlock()
+
+	switch T := types.Unalias(T).(type) {
+	case *types.Named:
+		return cache.lookupNamed(T).value
+
+	case *types.Pointer:
+		if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
+			return cache.lookupNamed(N).pointer
+		}
+	}
+
+	// all other types
+	// (The map uses pointer equivalence, not type identity.)
+	mset := cache.others[T]
+	if mset == nil {
+		mset = types.NewMethodSet(T)
+		if cache.others == nil {
+			cache.others = make(map[types.Type]*types.MethodSet)
+		}
+		cache.others[T] = mset
+	}
+	return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+	if cache.named == nil {
+		cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+	}
+	// Avoid recomputing mset(*T) for each distinct Pointer
+	// instance whose underlying type is a named type.
+	msets, ok := cache.named[named]
+	if !ok {
+		msets.value = types.NewMethodSet(named)
+		msets.pointer = types.NewMethodSet(types.NewPointer(named))
+		cache.named[named] = msets
+	}
+	return msets
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
new file mode 100644
index 00000000..9dda6a25
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -0,0 +1,53 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import (
+	"go/types"
+)
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+	isPointerToConcrete := func(T types.Type) bool {
+		ptr, ok := types.Unalias(T).(*types.Pointer)
+		return ok && !types.IsInterface(ptr.Elem())
+	}
+
+	var result []*types.Selection
+	mset := msets.MethodSet(T)
+	if types.IsInterface(T) || isPointerToConcrete(T) {
+		for i, n := 0, mset.Len(); i < n; i++ {
+			result = append(result, mset.At(i))
+		}
+	} else {
+		// T is some other concrete type.
+		// Report methods of T and *T, preferring those of T.
+		pmset := msets.MethodSet(types.NewPointer(T))
+		for i, n := 0, pmset.Len(); i < n; i++ {
+			meth := pmset.At(i)
+			if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+				meth = m
+			}
+			result = append(result, meth)
+		}
+
+	}
+	return result
+}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go
index c24c2eee..b9425f5a 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go
@@ -22,11 +22,17 @@ import (
 // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
 // function is expensive and should be called once per task (e.g.
 // package import), not once per call to NewAlias.
-func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
+//
+// Precondition: enabled || len(tparams)==0.
+// If materialized aliases are disabled, there must not be any type parameters.
+func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
 	if enabled {
 		tname := types.NewTypeName(pos, pkg, name, nil)
-		newAlias(tname, rhs)
+		SetTypeParams(types.NewAlias(tname, rhs), tparams)
 		return tname
 	}
+	if len(tparams) > 0 {
+		panic("cannot create an alias with type parameters when gotypesalias is not enabled")
+	}
 	return types.NewTypeName(pos, pkg, name, rhs)
 }
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
deleted file mode 100644
index c027b9f3..00000000
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package aliases
-
-import (
-	"go/types"
-)
-
-// Alias is a placeholder for a go/types.Alias for <=1.21.
-// It will never be created by go/types.
-type Alias struct{}
-
-func (*Alias) String() string         { panic("unreachable") }
-func (*Alias) Underlying() types.Type { panic("unreachable") }
-func (*Alias) Obj() *types.TypeName   { panic("unreachable") }
-func Rhs(alias *Alias) types.Type     { panic("unreachable") }
-
-// Unalias returns the type t for go <=1.21.
-func Unalias(t types.Type) types.Type { return t }
-
-func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
-
-// Enabled reports whether [NewAlias] should create [types.Alias] types.
-//
-// Before go1.22, this function always returns false.
-func Enabled() bool { return false }
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
index b3299548..7716a333 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build go1.22
-// +build go1.22
-
 package aliases
 
 import (
@@ -14,31 +11,51 @@ import (
 	"go/types"
 )
 
-// Alias is an alias of types.Alias.
-type Alias = types.Alias
-
 // Rhs returns the type on the right-hand side of the alias declaration.
-func Rhs(alias *Alias) types.Type {
+func Rhs(alias *types.Alias) types.Type {
 	if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
 		return alias.Rhs() // go1.23+
 	}
 
 	// go1.22's Alias didn't have the Rhs method,
 	// so Unalias is the best we can do.
-	return Unalias(alias)
+	return types.Unalias(alias)
 }
 
-// Unalias is a wrapper of types.Unalias.
-func Unalias(t types.Type) types.Type { return types.Unalias(t) }
+// TypeParams returns the type parameter list of the alias.
+func TypeParams(alias *types.Alias) *types.TypeParamList {
+	if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
+		return alias.TypeParams() // go1.23+
+	}
+	return nil
+}
 
-// newAlias is an internal alias around types.NewAlias.
-// Direct usage is discouraged as the moment.
-// Try to use NewAlias instead.
-func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
-	a := types.NewAlias(tname, rhs)
-	// TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
-	Unalias(a)
-	return a
+// SetTypeParams sets the type parameters of the alias type.
+func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
+	if alias, ok := any(alias).(interface {
+		SetTypeParams(tparams []*types.TypeParam)
+	}); ok {
+		alias.SetTypeParams(tparams) // go1.23+
+	} else if len(tparams) > 0 {
+		panic("cannot set type parameters of an Alias type in go1.22")
+	}
+}
+
+// TypeArgs returns the type arguments used to instantiate the Alias type.
+func TypeArgs(alias *types.Alias) *types.TypeList {
+	if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
+		return alias.TypeArgs() // go1.23+
+	}
+	return nil // empty (go1.22)
+}
+
+// Origin returns the generic Alias type of which alias is an instance.
+// If alias is not an instance of a generic alias, Origin returns alias.
+func Origin(alias *types.Alias) *types.Alias {
+	if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
+		return alias.Origin() // go1.23+
+	}
+	return alias // not an instance of a generic alias (go1.22)
 }
 
 // Enabled reports whether [NewAlias] should create [types.Alias] types.
@@ -56,7 +73,7 @@ func Enabled() bool {
 	//     many tests. Therefore any attempt to cache the result
 	//     is just incorrect.
 	fset := token.NewFileSet()
-	f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
+	f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
 	pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
 	_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
 	return enabled
diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go
new file mode 100644
index 00000000..4f6ccfd6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go
@@ -0,0 +1,295 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edge defines identifiers for each field of an ast.Node
+// struct type that refers to another Node.
+package edge
+
+import (
+	"fmt"
+	"go/ast"
+	"reflect"
+)
+
+// A Kind describes a field of an ast.Node struct.
+type Kind uint8
+
+// String returns a description of the edge kind.
+func (k Kind) String() string {
+	if k == Invalid {
+		return ""
+	}
+	info := fieldInfos[k]
+	return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name)
+}
+
+// NodeType returns the pointer-to-struct type of the ast.Node implementation.
+func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType }
+
+// FieldName returns the name of the field.
+func (k Kind) FieldName() string { return fieldInfos[k].name }
+
+// FieldType returns the declared type of the field.
+func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType }
+
+// Get returns the direct child of n identified by (k, idx).
+// n's type must match k.NodeType().
+// idx must be a valid slice index, or -1 for a non-slice.
+func (k Kind) Get(n ast.Node, idx int) ast.Node {
+	if k.NodeType() != reflect.TypeOf(n) {
+		panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n))
+	}
+	v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index)
+	if idx != -1 {
+		v = v.Index(idx) // asserts valid index
+	} else {
+		// (The type assertion below asserts that v is not a slice.)
+	}
+	return v.Interface().(ast.Node) // may be nil
+}
+
+const (
+	Invalid Kind = iota // for nodes at the root of the traversal
+
+	// Kinds are sorted alphabetically.
+	// Numbering is not stable.
+	// Each is named Type_Field, where Type is the
+	// ast.Node struct type and Field is the name of the field
+
+	ArrayType_Elt
+	ArrayType_Len
+	AssignStmt_Lhs
+	AssignStmt_Rhs
+	BinaryExpr_X
+	BinaryExpr_Y
+	BlockStmt_List
+	BranchStmt_Label
+	CallExpr_Args
+	CallExpr_Fun
+	CaseClause_Body
+	CaseClause_List
+	ChanType_Value
+	CommClause_Body
+	CommClause_Comm
+	CommentGroup_List
+	CompositeLit_Elts
+	CompositeLit_Type
+	DeclStmt_Decl
+	DeferStmt_Call
+	Ellipsis_Elt
+	ExprStmt_X
+	FieldList_List
+	Field_Comment
+	Field_Doc
+	Field_Names
+	Field_Tag
+	Field_Type
+	File_Decls
+	File_Doc
+	File_Name
+	ForStmt_Body
+	ForStmt_Cond
+	ForStmt_Init
+	ForStmt_Post
+	FuncDecl_Body
+	FuncDecl_Doc
+	FuncDecl_Name
+	FuncDecl_Recv
+	FuncDecl_Type
+	FuncLit_Body
+	FuncLit_Type
+	FuncType_Params
+	FuncType_Results
+	FuncType_TypeParams
+	GenDecl_Doc
+	GenDecl_Specs
+	GoStmt_Call
+	IfStmt_Body
+	IfStmt_Cond
+	IfStmt_Else
+	IfStmt_Init
+	ImportSpec_Comment
+	ImportSpec_Doc
+	ImportSpec_Name
+	ImportSpec_Path
+	IncDecStmt_X
+	IndexExpr_Index
+	IndexExpr_X
+	IndexListExpr_Indices
+	IndexListExpr_X
+	InterfaceType_Methods
+	KeyValueExpr_Key
+	KeyValueExpr_Value
+	LabeledStmt_Label
+	LabeledStmt_Stmt
+	MapType_Key
+	MapType_Value
+	ParenExpr_X
+	RangeStmt_Body
+	RangeStmt_Key
+	RangeStmt_Value
+	RangeStmt_X
+	ReturnStmt_Results
+	SelectStmt_Body
+	SelectorExpr_Sel
+	SelectorExpr_X
+	SendStmt_Chan
+	SendStmt_Value
+	SliceExpr_High
+	SliceExpr_Low
+	SliceExpr_Max
+	SliceExpr_X
+	StarExpr_X
+	StructType_Fields
+	SwitchStmt_Body
+	SwitchStmt_Init
+	SwitchStmt_Tag
+	TypeAssertExpr_Type
+	TypeAssertExpr_X
+	TypeSpec_Comment
+	TypeSpec_Doc
+	TypeSpec_Name
+	TypeSpec_Type
+	TypeSpec_TypeParams
+	TypeSwitchStmt_Assign
+	TypeSwitchStmt_Body
+	TypeSwitchStmt_Init
+	UnaryExpr_X
+	ValueSpec_Comment
+	ValueSpec_Doc
+	ValueSpec_Names
+	ValueSpec_Type
+	ValueSpec_Values
+
+	maxKind
+)
+
+// Assert that the encoding fits in 7 bits,
+// as the inspector relies on this.
+// (We are currently at 104.)
+var _ = [1 << 7]struct{}{}[maxKind]
+
+type fieldInfo struct {
+	nodeType  reflect.Type // pointer-to-struct type of ast.Node implementation
+	name      string
+	index     int
+	fieldType reflect.Type
+}
+
+func info[N ast.Node](fieldName string) fieldInfo {
+	nodePtrType := reflect.TypeFor[N]()
+	f, ok := nodePtrType.Elem().FieldByName(fieldName)
+	if !ok {
+		panic(fieldName)
+	}
+	return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type}
+}
+
+var fieldInfos = [...]fieldInfo{
+	Invalid:               {},
+	ArrayType_Elt:         info[*ast.ArrayType]("Elt"),
+	ArrayType_Len:         info[*ast.ArrayType]("Len"),
+	AssignStmt_Lhs:        info[*ast.AssignStmt]("Lhs"),
+	AssignStmt_Rhs:        info[*ast.AssignStmt]("Rhs"),
+	BinaryExpr_X:          info[*ast.BinaryExpr]("X"),
+	BinaryExpr_Y:          info[*ast.BinaryExpr]("Y"),
+	BlockStmt_List:        info[*ast.BlockStmt]("List"),
+	BranchStmt_Label:      info[*ast.BranchStmt]("Label"),
+	CallExpr_Args:         info[*ast.CallExpr]("Args"),
+	CallExpr_Fun:          info[*ast.CallExpr]("Fun"),
+	CaseClause_Body:       info[*ast.CaseClause]("Body"),
+	CaseClause_List:       info[*ast.CaseClause]("List"),
+	ChanType_Value:        info[*ast.ChanType]("Value"),
+	CommClause_Body:       info[*ast.CommClause]("Body"),
+	CommClause_Comm:       info[*ast.CommClause]("Comm"),
+	CommentGroup_List:     info[*ast.CommentGroup]("List"),
+	CompositeLit_Elts:     info[*ast.CompositeLit]("Elts"),
+	CompositeLit_Type:     info[*ast.CompositeLit]("Type"),
+	DeclStmt_Decl:         info[*ast.DeclStmt]("Decl"),
+	DeferStmt_Call:        info[*ast.DeferStmt]("Call"),
+	Ellipsis_Elt:          info[*ast.Ellipsis]("Elt"),
+	ExprStmt_X:            info[*ast.ExprStmt]("X"),
+	FieldList_List:        info[*ast.FieldList]("List"),
+	Field_Comment:         info[*ast.Field]("Comment"),
+	Field_Doc:             info[*ast.Field]("Doc"),
+	Field_Names:           info[*ast.Field]("Names"),
+	Field_Tag:             info[*ast.Field]("Tag"),
+	Field_Type:            info[*ast.Field]("Type"),
+	File_Decls:            info[*ast.File]("Decls"),
+	File_Doc:              info[*ast.File]("Doc"),
+	File_Name:             info[*ast.File]("Name"),
+	ForStmt_Body:          info[*ast.ForStmt]("Body"),
+	ForStmt_Cond:          info[*ast.ForStmt]("Cond"),
+	ForStmt_Init:          info[*ast.ForStmt]("Init"),
+	ForStmt_Post:          info[*ast.ForStmt]("Post"),
+	FuncDecl_Body:         info[*ast.FuncDecl]("Body"),
+	FuncDecl_Doc:          info[*ast.FuncDecl]("Doc"),
+	FuncDecl_Name:         info[*ast.FuncDecl]("Name"),
+	FuncDecl_Recv:         info[*ast.FuncDecl]("Recv"),
+	FuncDecl_Type:         info[*ast.FuncDecl]("Type"),
+	FuncLit_Body:          info[*ast.FuncLit]("Body"),
+	FuncLit_Type:          info[*ast.FuncLit]("Type"),
+	FuncType_Params:       info[*ast.FuncType]("Params"),
+	FuncType_Results:      info[*ast.FuncType]("Results"),
+	FuncType_TypeParams:   info[*ast.FuncType]("TypeParams"),
+	GenDecl_Doc:           info[*ast.GenDecl]("Doc"),
+	GenDecl_Specs:         info[*ast.GenDecl]("Specs"),
+	GoStmt_Call:           info[*ast.GoStmt]("Call"),
+	IfStmt_Body:           info[*ast.IfStmt]("Body"),
+	IfStmt_Cond:           info[*ast.IfStmt]("Cond"),
+	IfStmt_Else:           info[*ast.IfStmt]("Else"),
+	IfStmt_Init:           info[*ast.IfStmt]("Init"),
+	ImportSpec_Comment:    info[*ast.ImportSpec]("Comment"),
+	ImportSpec_Doc:        info[*ast.ImportSpec]("Doc"),
+	ImportSpec_Name:       info[*ast.ImportSpec]("Name"),
+	ImportSpec_Path:       info[*ast.ImportSpec]("Path"),
+	IncDecStmt_X:          info[*ast.IncDecStmt]("X"),
+	IndexExpr_Index:       info[*ast.IndexExpr]("Index"),
+	IndexExpr_X:           info[*ast.IndexExpr]("X"),
+	IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"),
+	IndexListExpr_X:       info[*ast.IndexListExpr]("X"),
+	InterfaceType_Methods: info[*ast.InterfaceType]("Methods"),
+	KeyValueExpr_Key:      info[*ast.KeyValueExpr]("Key"),
+	KeyValueExpr_Value:    info[*ast.KeyValueExpr]("Value"),
+	LabeledStmt_Label:     info[*ast.LabeledStmt]("Label"),
+	LabeledStmt_Stmt:      info[*ast.LabeledStmt]("Stmt"),
+	MapType_Key:           info[*ast.MapType]("Key"),
+	MapType_Value:         info[*ast.MapType]("Value"),
+	ParenExpr_X:           info[*ast.ParenExpr]("X"),
+	RangeStmt_Body:        info[*ast.RangeStmt]("Body"),
+	RangeStmt_Key:         info[*ast.RangeStmt]("Key"),
+	RangeStmt_Value:       info[*ast.RangeStmt]("Value"),
+	RangeStmt_X:           info[*ast.RangeStmt]("X"),
+	ReturnStmt_Results:    info[*ast.ReturnStmt]("Results"),
+	SelectStmt_Body:       info[*ast.SelectStmt]("Body"),
+	SelectorExpr_Sel:      info[*ast.SelectorExpr]("Sel"),
+	SelectorExpr_X:        info[*ast.SelectorExpr]("X"),
+	SendStmt_Chan:         info[*ast.SendStmt]("Chan"),
+	SendStmt_Value:        info[*ast.SendStmt]("Value"),
+	SliceExpr_High:        info[*ast.SliceExpr]("High"),
+	SliceExpr_Low:         info[*ast.SliceExpr]("Low"),
+	SliceExpr_Max:         info[*ast.SliceExpr]("Max"),
+	SliceExpr_X:           info[*ast.SliceExpr]("X"),
+	StarExpr_X:            info[*ast.StarExpr]("X"),
+	StructType_Fields:     info[*ast.StructType]("Fields"),
+	SwitchStmt_Body:       info[*ast.SwitchStmt]("Body"),
+	SwitchStmt_Init:       info[*ast.SwitchStmt]("Init"),
+	SwitchStmt_Tag:        info[*ast.SwitchStmt]("Tag"),
+	TypeAssertExpr_Type:   info[*ast.TypeAssertExpr]("Type"),
+	TypeAssertExpr_X:      info[*ast.TypeAssertExpr]("X"),
+	TypeSpec_Comment:      info[*ast.TypeSpec]("Comment"),
+	TypeSpec_Doc:          info[*ast.TypeSpec]("Doc"),
+	TypeSpec_Name:         info[*ast.TypeSpec]("Name"),
+	TypeSpec_Type:         info[*ast.TypeSpec]("Type"),
+	TypeSpec_TypeParams:   info[*ast.TypeSpec]("TypeParams"),
+	TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"),
+	TypeSwitchStmt_Body:   info[*ast.TypeSwitchStmt]("Body"),
+	TypeSwitchStmt_Init:   info[*ast.TypeSwitchStmt]("Init"),
+	UnaryExpr_X:           info[*ast.UnaryExpr]("X"),
+	ValueSpec_Comment:     info[*ast.ValueSpec]("Comment"),
+	ValueSpec_Doc:         info[*ast.ValueSpec]("Doc"),
+	ValueSpec_Names:       info[*ast.ValueSpec]("Names"),
+	ValueSpec_Type:        info[*ast.ValueSpec]("Type"),
+	ValueSpec_Values:      info[*ast.ValueSpec]("Values"),
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index d98b0db2..d79a605e 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir {
 		return 0
 	}
 }
-
-var predeclOnce sync.Once
-var predecl []types.Type // initialized lazily
-
-func predeclared() []types.Type {
-	predeclOnce.Do(func() {
-		// initialize lazily to be sure that all
-		// elements have been initialized before
-		predecl = []types.Type{ // basic types
-			types.Typ[types.Bool],
-			types.Typ[types.Int],
-			types.Typ[types.Int8],
-			types.Typ[types.Int16],
-			types.Typ[types.Int32],
-			types.Typ[types.Int64],
-			types.Typ[types.Uint],
-			types.Typ[types.Uint8],
-			types.Typ[types.Uint16],
-			types.Typ[types.Uint32],
-			types.Typ[types.Uint64],
-			types.Typ[types.Uintptr],
-			types.Typ[types.Float32],
-			types.Typ[types.Float64],
-			types.Typ[types.Complex64],
-			types.Typ[types.Complex128],
-			types.Typ[types.String],
-
-			// basic type aliases
-			types.Universe.Lookup("byte").Type(),
-			types.Universe.Lookup("rune").Type(),
-
-			// error
-			types.Universe.Lookup("error").Type(),
-
-			// untyped types
-			types.Typ[types.UntypedBool],
-			types.Typ[types.UntypedInt],
-			types.Typ[types.UntypedRune],
-			types.Typ[types.UntypedFloat],
-			types.Typ[types.UntypedComplex],
-			types.Typ[types.UntypedString],
-			types.Typ[types.UntypedNil],
-
-			// package unsafe
-			types.Typ[types.UnsafePointer],
-
-			// invalid type
-			types.Typ[types.Invalid], // only appears in packages with errors
-
-			// used internally by gc; never used by this package or in .a files
-			anyType{},
-		}
-		predecl = append(predecl, additionalPredeclared()...)
-	})
-	return predecl
-}
-
-type anyType struct{}
-
-func (t anyType) Underlying() types.Type { return t }
-func (t anyType) String() string         { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
index f6437feb..5662a311 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -2,49 +2,183 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
-
-// This file implements FindExportData.
+// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
+// This file also additionally implements FindExportData for gcexportdata.NewReader.
 
 package gcimporter
 
 import (
 	"bufio"
+	"bytes"
+	"errors"
 	"fmt"
+	"go/build"
 	"io"
-	"strconv"
+	"os"
+	"os/exec"
+	"path/filepath"
 	"strings"
+	"sync"
 )
 
-func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
-	// See $GOROOT/include/ar.h.
-	hdr := make([]byte, 16+12+6+6+8+10+2)
-	_, err = io.ReadFull(r, hdr)
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying cmd/compile created archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function.
+// This returns the length of the export data in bytes.
+//
+// This function is needed by [gcexportdata.Read], which must
+// accept inputs produced by the last two releases of cmd/compile,
+// plus tip.
+func FindExportData(r *bufio.Reader) (size int64, err error) {
+	arsize, err := FindPackageDefinition(r)
 	if err != nil {
 		return
 	}
-	// leave for debugging
-	if false {
-		fmt.Printf("header: %s", hdr)
-	}
-	s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
-	length, err := strconv.Atoi(s)
-	size = int64(length)
-	if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
-		err = fmt.Errorf("invalid archive header")
+	size = int64(arsize)
+
+	objapi, headers, err := ReadObjectHeaders(r)
+	if err != nil {
 		return
 	}
-	name = strings.TrimSpace(string(hdr[:16]))
+	size -= int64(len(objapi))
+	for _, h := range headers {
+		size -= int64(len(h))
+	}
+
+	// Check for the binary export data section header "$$B\n".
+	// TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
+	line, err := r.ReadSlice('\n')
+	if err != nil {
+		return
+	}
+	hdr := string(line)
+	if hdr != "$$B\n" {
+		err = fmt.Errorf("unknown export data header: %q", hdr)
+		return
+	}
+	size -= int64(len(hdr))
+
+	// For files with a binary export data header "$$B\n",
+	// these are always terminated by an end-of-section marker "\n$$\n".
+	// So the last bytes must always be this constant.
+	//
+	// The end-of-section marker is not a part of the export data itself.
+	// Do not include these in size.
+	//
+	// It would be nice to have sanity check that the final bytes after
+	// the export data are indeed the end-of-section marker. The split
+	// of gcexportdata.NewReader and gcexportdata.Read make checking this
+	// ugly so gcimporter gives up enforcing this. The compiler and go/types
+	// importer do enforce this, which seems good enough.
+	const endofsection = "\n$$\n"
+	size -= int64(len(endofsection))
+
+	if size < 0 {
+		err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+		return
+	}
+
 	return
 }
 
-// FindExportData positions the reader r at the beginning of the
-// export data section of an underlying GC-created object/archive
-// file by reading from it. The reader must be positioned at the
-// start of the file before calling this function. The hdr result
-// is the string before the export data, either "$$" or "$$B".
-// The size result is the length of the export data in bytes, or -1 if not known.
-func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
+// ReadUnified reads the contents of the unified export data from a reader r
+// that contains the contents of a GC-created archive file.
+//
+// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
+//
+// Supported GC-created archive files have 4 layers of nesting:
+//   - An archive file containing a package definition file.
+//   - The package definition file contains headers followed by a data section.
+//     Headers are lines (≤ 4kb) that do not start with "$$".
+//   - The data section starts with "$$B\n" followed by export data followed
+//     by an end of section marker "\n$$\n". (The section start "$$\n" is no
+//     longer supported.)
+//   - The export data starts with a format byte ('u') followed by the  in
+//     the given format. (See ReadExportDataHeader for older formats.)
+//
+// Putting this together, the bytes in a GC-created archive files are expected
+// to look like the following.
+// See cmd/internal/archive for more details on ar file headers.
+//
+// | \n             | ar file signature
+// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
+// | go object <...>\n     | objabi header
+// | \n  | other headers such as build id
+// | $$B\n                 | binary format marker
+// | u\n             | unified export 
+// | $$\n                  | end-of-section marker
+// | [optional padding]    | padding byte (0x0A) if size is odd
+// | [ar file header]      | other ar files
+// | [ar file data]        |
+func ReadUnified(r *bufio.Reader) (data []byte, err error) {
+	// We historically guaranteed headers at the default buffer size (4096) work.
+	// This ensures we can use ReadSlice throughout.
+	const minBufferSize = 4096
+	r = bufio.NewReaderSize(r, minBufferSize)
+
+	size, err := FindPackageDefinition(r)
+	if err != nil {
+		return
+	}
+	n := size
+
+	objapi, headers, err := ReadObjectHeaders(r)
+	if err != nil {
+		return
+	}
+	n -= len(objapi)
+	for _, h := range headers {
+		n -= len(h)
+	}
+
+	hdrlen, err := ReadExportDataHeader(r)
+	if err != nil {
+		return
+	}
+	n -= hdrlen
+
+	// size also includes the end of section marker. Remove that many bytes from the end.
+	const marker = "\n$$\n"
+	n -= len(marker)
+
+	if n < 0 {
+		err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
+		return
+	}
+
+	// Read n bytes from buf.
+	data = make([]byte, n)
+	_, err = io.ReadFull(r, data)
+	if err != nil {
+		return
+	}
+
+	// Check for marker at the end.
+	var suffix [len(marker)]byte
+	_, err = io.ReadFull(r, suffix[:])
+	if err != nil {
+		return
+	}
+	if s := string(suffix[:]); s != marker {
+		err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
+		return
+	}
+
+	return
+}
+
+// FindPackageDefinition positions the reader r at the beginning of a package
+// definition file ("__.PKGDEF") within a GC-created archive by reading
+// from it, and returns the size of the package definition file in the archive.
+//
+// The reader must be positioned at the start of the archive file before calling
+// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
+//
+// See cmd/internal/archive for details on the archive format.
+func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
+	// Uses ReadSlice to limit risk of malformed inputs.
+
 	// Read first line to make sure this is an object file.
 	line, err := r.ReadSlice('\n')
 	if err != nil {
@@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
 		return
 	}
 
-	if string(line) == "!\n" {
-		// Archive file. Scan to __.PKGDEF.
-		var name string
-		if name, size, err = readGopackHeader(r); err != nil {
-			return
-		}
-
-		// First entry should be __.PKGDEF.
-		if name != "__.PKGDEF" {
-			err = fmt.Errorf("go archive is missing __.PKGDEF")
-			return
-		}
-
-		// Read first line of __.PKGDEF data, so that line
-		// is once again the first line of the input.
-		if line, err = r.ReadSlice('\n'); err != nil {
-			err = fmt.Errorf("can't find export data (%v)", err)
-			return
-		}
-		size -= int64(len(line))
-	}
-
-	// Now at __.PKGDEF in archive or still at beginning of file.
-	// Either way, line should begin with "go object ".
-	if !strings.HasPrefix(string(line), "go object ") {
-		err = fmt.Errorf("not a Go object file")
+	// Is the first line an archive file signature?
+	if string(line) != "!\n" {
+		err = fmt.Errorf("not the start of an archive file (%q)", line)
 		return
 	}
 
-	// Skip over object header to export data.
-	// Begins after first line starting with $$.
-	for line[0] != '$' {
-		if line, err = r.ReadSlice('\n'); err != nil {
-			err = fmt.Errorf("can't find export data (%v)", err)
-			return
-		}
-		size -= int64(len(line))
-	}
-	hdr = string(line)
-	if size < 0 {
-		size = -1
+	// package export block should be first
+	size = readArchiveHeader(r, "__.PKGDEF")
+	if size <= 0 {
+		err = fmt.Errorf("not a package file")
+		return
 	}
 
 	return
 }
+
+// ReadObjectHeaders reads object headers from the reader. Object headers are
+// lines that do not start with an end-of-section marker "$$". The first header
+// is the objabi header. On success, the reader will be positioned at the beginning
+// of the end-of-section marker.
+//
+// It returns an error if any header does not fit in r.Size() bytes.
+func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
+	// line is a temporary buffer for headers.
+	// Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
+	var line []byte
+
+	// objapi header should be the first line
+	if line, err = r.ReadSlice('\n'); err != nil {
+		err = fmt.Errorf("can't find export data (%v)", err)
+		return
+	}
+	objapi = string(line)
+
+	// objapi header begins with "go object ".
+	if !strings.HasPrefix(objapi, "go object ") {
+		err = fmt.Errorf("not a go object file: %s", objapi)
+		return
+	}
+
+	// process remaining object header lines
+	for {
+		// check for an end of section marker "$$"
+		line, err = r.Peek(2)
+		if err != nil {
+			return
+		}
+		if string(line) == "$$" {
+			return // stop
+		}
+
+		// read next header
+		line, err = r.ReadSlice('\n')
+		if err != nil {
+			return
+		}
+		headers = append(headers, string(line))
+	}
+}
+
+// ReadExportDataHeader reads the export data header and format from r.
+// It returns the number of bytes read, or an error if the format is no longer
+// supported or it failed to read.
+//
+// The only currently supported format is binary export data in the
+// unified export format.
+func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
+	// Read export data header.
+	line, err := r.ReadSlice('\n')
+	if err != nil {
+		return
+	}
+
+	hdr := string(line)
+	switch hdr {
+	case "$$\n":
+		err = fmt.Errorf("old textual export format no longer supported (recompile package)")
+		return
+
+	case "$$B\n":
+		var format byte
+		format, err = r.ReadByte()
+		if err != nil {
+			return
+		}
+		// The unified export format starts with a 'u'.
+		switch format {
+		case 'u':
+		default:
+			// Older no longer supported export formats include:
+			// indexed export format which started with an 'i'; and
+			// the older binary export format which started with a 'c',
+			// 'd', or 'v' (from "version").
+			err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
+			return
+		}
+
+	default:
+		err = fmt.Errorf("unknown export data header: %q", hdr)
+		return
+	}
+
+	n = len(hdr) + 1 // + 1 is for 'u'
+	return
+}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+//
+// FindPkg is only used in tests within x/tools.
+func FindPkg(path, srcDir string) (filename, id string, err error) {
+	// TODO(taking): Move internal/exportdata.FindPkg into its own file,
+	// and then this copy into a _test package.
+	if path == "" {
+		return "", "", errors.New("path is empty")
+	}
+
+	var noext string
+	switch {
+	default:
+		// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+		// Don't require the source files to be present.
+		if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+			srcDir = abs
+		}
+		var bp *build.Package
+		bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+		if bp.PkgObj == "" {
+			if bp.Goroot && bp.Dir != "" {
+				filename, err = lookupGorootExport(bp.Dir)
+				if err == nil {
+					_, err = os.Stat(filename)
+				}
+				if err == nil {
+					return filename, bp.ImportPath, nil
+				}
+			}
+			goto notfound
+		} else {
+			noext = strings.TrimSuffix(bp.PkgObj, ".a")
+		}
+		id = bp.ImportPath
+
+	case build.IsLocalImport(path):
+		// "./x" -> "/this/directory/x.ext", "/this/directory/x"
+		noext = filepath.Join(srcDir, path)
+		id = noext
+
+	case filepath.IsAbs(path):
+		// for completeness only - go/build.Import
+		// does not support absolute imports
+		// "/x" -> "/x.ext", "/x"
+		noext = path
+		id = path
+	}
+
+	if false { // for debugging
+		if path != id {
+			fmt.Printf("%s -> %s\n", path, id)
+		}
+	}
+
+	// try extensions
+	for _, ext := range pkgExts {
+		filename = noext + ext
+		f, statErr := os.Stat(filename)
+		if statErr == nil && !f.IsDir() {
+			return filename, id, nil
+		}
+		if err == nil {
+			err = statErr
+		}
+	}
+
+notfound:
+	if err == nil {
+		return "", path, fmt.Errorf("can't find import: %q", path)
+	}
+	return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
+}
+
+var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
+
+var exportMap sync.Map // package dir → func() (string, error)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+//
+// lookupGorootExport is only used in tests within x/tools.
+func lookupGorootExport(pkgDir string) (string, error) {
+	f, ok := exportMap.Load(pkgDir)
+	if !ok {
+		var (
+			listOnce   sync.Once
+			exportPath string
+			err        error
+		)
+		f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
+			listOnce.Do(func() {
+				cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
+				cmd.Dir = build.Default.GOROOT
+				cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
+				var output []byte
+				output, err = cmd.Output()
+				if err != nil {
+					if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+						err = errors.New(string(ee.Stderr))
+					}
+					return
+				}
+
+				exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+				if len(exports) != 1 {
+					err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
+					return
+				}
+
+				exportPath = exports[0]
+			})
+
+			return exportPath, err
+		})
+	}
+
+	return f.(func() (string, error))()
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index 39df9112..3dbd21d1 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
 
 import (
 	"bufio"
-	"bytes"
 	"fmt"
-	"go/build"
 	"go/token"
 	"go/types"
 	"io"
 	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"sync"
 )
 
 const (
@@ -45,125 +39,14 @@ const (
 	trace = false
 )
 
-var exportMap sync.Map // package dir → func() (string, bool)
-
-// lookupGorootExport returns the location of the export data
-// (normally found in the build cache, but located in GOROOT/pkg
-// in prior Go releases) for the package located in pkgDir.
-//
-// (We use the package's directory instead of its import path
-// mainly to simplify handling of the packages in src/vendor
-// and cmd/vendor.)
-func lookupGorootExport(pkgDir string) (string, bool) {
-	f, ok := exportMap.Load(pkgDir)
-	if !ok {
-		var (
-			listOnce   sync.Once
-			exportPath string
-		)
-		f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
-			listOnce.Do(func() {
-				cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
-				cmd.Dir = build.Default.GOROOT
-				var output []byte
-				output, err := cmd.Output()
-				if err != nil {
-					return
-				}
-
-				exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
-				if len(exports) != 1 {
-					return
-				}
-
-				exportPath = exports[0]
-			})
-
-			return exportPath, exportPath != ""
-		})
-	}
-
-	return f.(func() (string, bool))()
-}
-
-var pkgExts = [...]string{".a", ".o"}
-
-// FindPkg returns the filename and unique package id for an import
-// path based on package information provided by build.Import (using
-// the build.Default build.Context). A relative srcDir is interpreted
-// relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
-	if path == "" {
-		return
-	}
-
-	var noext string
-	switch {
-	default:
-		// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
-		// Don't require the source files to be present.
-		if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
-			srcDir = abs
-		}
-		bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
-		if bp.PkgObj == "" {
-			var ok bool
-			if bp.Goroot && bp.Dir != "" {
-				filename, ok = lookupGorootExport(bp.Dir)
-			}
-			if !ok {
-				id = path // make sure we have an id to print in error message
-				return
-			}
-		} else {
-			noext = strings.TrimSuffix(bp.PkgObj, ".a")
-			id = bp.ImportPath
-		}
-
-	case build.IsLocalImport(path):
-		// "./x" -> "/this/directory/x.ext", "/this/directory/x"
-		noext = filepath.Join(srcDir, path)
-		id = noext
-
-	case filepath.IsAbs(path):
-		// for completeness only - go/build.Import
-		// does not support absolute imports
-		// "/x" -> "/x.ext", "/x"
-		noext = path
-		id = path
-	}
-
-	if false { // for debugging
-		if path != id {
-			fmt.Printf("%s -> %s\n", path, id)
-		}
-	}
-
-	if filename != "" {
-		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
-			return
-		}
-	}
-
-	// try extensions
-	for _, ext := range pkgExts {
-		filename = noext + ext
-		if f, err := os.Stat(filename); err == nil && !f.IsDir() {
-			return
-		}
-	}
-
-	filename = "" // not found
-	return
-}
-
 // Import imports a gc-generated package given its import path and srcDir, adds
 // the corresponding package object to the packages map, and returns the object.
 // The packages map must contain all packages already imported.
-func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+//
+// Import is only used in tests.
+func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
 	var rc io.ReadCloser
-	var filename, id string
+	var id string
 	if lookup != nil {
 		// With custom lookup specified, assume that caller has
 		// converted path to a canonical import path for use in the map.
@@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
 		}
 		rc = f
 	} else {
-		filename, id = FindPkg(path, srcDir)
+		var filename string
+		filename, id, err = FindPkg(path, srcDir)
 		if filename == "" {
 			if path == "unsafe" {
 				return types.Unsafe, nil
 			}
-			return nil, fmt.Errorf("can't find import: %q", id)
+			return nil, err
 		}
 
 		// no need to re-import if the package was imported completely before
@@ -210,57 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
 	}
 	defer rc.Close()
 
-	var hdr string
-	var size int64
 	buf := bufio.NewReader(rc)
-	if hdr, size, err = FindExportData(buf); err != nil {
+	data, err := ReadUnified(buf)
+	if err != nil {
+		err = fmt.Errorf("import %q: %v", path, err)
 		return
 	}
 
-	switch hdr {
-	case "$$B\n":
-		var data []byte
-		data, err = io.ReadAll(buf)
-		if err != nil {
-			break
-		}
-
-		// TODO(gri): allow clients of go/importer to provide a FileSet.
-		// Or, define a new standard go/types/gcexportdata package.
-		fset := token.NewFileSet()
-
-		// Select appropriate importer.
-		if len(data) > 0 {
-			switch data[0] {
-			case 'v', 'c', 'd': // binary, till go1.10
-				return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
-
-			case 'i': // indexed, till go1.19
-				_, pkg, err := IImportData(fset, packages, data[1:], id)
-				return pkg, err
-
-			case 'u': // unified, from go1.20
-				_, pkg, err := UImportData(fset, packages, data[1:size], id)
-				return pkg, err
-
-			default:
-				l := len(data)
-				if l > 10 {
-					l = 10
-				}
-				return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
-			}
-		}
-
-	default:
-		err = fmt.Errorf("unknown export data header: %q", hdr)
-	}
+	// unified: emitted by cmd/compile since go1.20.
+	_, pkg, err = UImportData(fset, packages, data, id)
 
 	return
 }
-
-type byPath []*types.Package
-
-func (a byPath) Len() int           { return len(a) }
-func (a byPath) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index deeb67f3..7dfc31a3 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -2,9 +2,227 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// Indexed binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
-// see that file for specification of the format.
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+//     Header struct {
+//         Tag        byte   // 'i'
+//         Version    uvarint
+//         StringSize uvarint
+//         DataSize   uvarint
+//     }
+//
+//     Strings [StringSize]byte
+//     Data    [DataSize]byte
+//
+//     MainIndex []struct{
+//         PkgPath   stringOff
+//         PkgName   stringOff
+//         PkgHeight uvarint
+//
+//         Decls []struct{
+//             Name   stringOff
+//             Offset declOff
+//         }
+//     }
+//
+//     Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+//     Len   uvarint
+//     Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+//     type Var struct {
+//         Tag  byte // 'V'
+//         Pos  Pos
+//         Type typeOff
+//     }
+//
+//     type Func struct {
+//         Tag       byte // 'F' or 'G'
+//         Pos       Pos
+//         TypeParams []typeOff  // only present if Tag == 'G'
+//         Signature Signature
+//     }
+//
+//     type Const struct {
+//         Tag   byte // 'C'
+//         Pos   Pos
+//         Value Value
+//     }
+//
+//     type Type struct {
+//         Tag        byte // 'T' or 'U'
+//         Pos        Pos
+//         TypeParams []typeOff  // only present if Tag == 'U'
+//         Underlying typeOff
+//
+//         Methods []struct{  // omitted if Underlying is an interface type
+//             Pos       Pos
+//             Name      stringOff
+//             Recv      Param
+//             Signature Signature
+//         }
+//     }
+//
+//     type Alias struct {
+//         Tag  byte // 'A' or 'B'
+//         Pos  Pos
+//         TypeParams []typeOff  // only present if Tag == 'B'
+//         Type typeOff
+//     }
+//
+//     // "Automatic" declaration of each typeparam
+//     type TypeParam struct {
+//         Tag        byte // 'P'
+//         Pos        Pos
+//         Implicit   bool
+//         Constraint typeOff
+//     }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+//     type DefinedType struct {
+//         Tag     itag // definedType
+//         Name    stringOff
+//         PkgPath stringOff
+//     }
+//
+//     type PointerType struct {
+//         Tag  itag // pointerType
+//         Elem typeOff
+//     }
+//
+//     type SliceType struct {
+//         Tag  itag // sliceType
+//         Elem typeOff
+//     }
+//
+//     type ArrayType struct {
+//         Tag  itag // arrayType
+//         Len  uint64
+//         Elem typeOff
+//     }
+//
+//     type ChanType struct {
+//         Tag  itag   // chanType
+//         Dir  uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+//         Elem typeOff
+//     }
+//
+//     type MapType struct {
+//         Tag  itag // mapType
+//         Key  typeOff
+//         Elem typeOff
+//     }
+//
+//     type FuncType struct {
+//         Tag       itag // signatureType
+//         PkgPath   stringOff
+//         Signature Signature
+//     }
+//
+//     type StructType struct {
+//         Tag     itag // structType
+//         PkgPath stringOff
+//         Fields []struct {
+//             Pos      Pos
+//             Name     stringOff
+//             Type     typeOff
+//             Embedded bool
+//             Note     stringOff
+//         }
+//     }
+//
+//     type InterfaceType struct {
+//         Tag     itag // interfaceType
+//         PkgPath stringOff
+//         Embeddeds []struct {
+//             Pos  Pos
+//             Type typeOff
+//         }
+//         Methods []struct {
+//             Pos       Pos
+//             Name      stringOff
+//             Signature Signature
+//         }
+//     }
+//
+//     // Reference to a type param declaration
+//     type TypeParamType struct {
+//         Tag     itag // typeParamType
+//         Name    stringOff
+//         PkgPath stringOff
+//     }
+//
+//     // Instantiation of a generic type (like List[T2] or List[int])
+//     type InstanceType struct {
+//         Tag     itag // instanceType
+//         Pos     pos
+//         TypeArgs []typeOff
+//         BaseType typeOff
+//     }
+//
+//     type UnionType struct {
+//         Tag     itag // interfaceType
+//         Terms   []struct {
+//             tilde bool
+//             Type  typeOff
+//         }
+//     }
+//
+//
+//
+//     type Signature struct {
+//         Params   []Param
+//         Results  []Param
+//         Variadic bool  // omitted if Results is empty
+//     }
+//
+//     type Param struct {
+//         Pos  Pos
+//         Name stringOff
+//         Type typOff
+//     }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
 
 package gcimporter
 
@@ -24,11 +242,30 @@ import (
 
 	"golang.org/x/tools/go/types/objectpath"
 	"golang.org/x/tools/internal/aliases"
-	"golang.org/x/tools/internal/tokeninternal"
 )
 
 // IExportShallow encodes "shallow" export data for the specified package.
 //
+// For types, we use "shallow" export data. Historically, the Go
+// compiler always produced a summary of the types for a given package
+// that included types from other packages that it indirectly
+// referenced: "deep" export data. This had the advantage that the
+// compiler (and analogous tools such as gopls) need only load one
+// file per direct import.  However, it meant that the files tended to
+// get larger based on the level of the package in the import
+// graph. For example, higher-level packages in the kubernetes module
+// have over 1MB of "deep" export data, even when they have almost no
+// content of their own, merely because they mention a major type that
+// references many others. In pathological cases the export data was
+// 300x larger than the source for a package due to this quadratic
+// growth.
+//
+// "Shallow" export data means that the serialized types describe only
+// a single package. If those types mention types from other packages,
+// the type checker may need to request additional packages beyond
+// just the direct imports. Type information for the entire transitive
+// closure of imports is provided (lazily) by the DAG.
+//
 // No promises are made about the encoding other than that it can be decoded by
 // the same version of IIExportShallow. If you plan to save export data in the
 // file system, be sure to include a cryptographic digest of the executable in
@@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
 }
 
 // IImportShallow decodes "shallow" types.Package data encoded by
-// IExportShallow in the same executable. This function cannot import data from
-// cmd/compile or gcexportdata.Write.
+// [IExportShallow] in the same executable. This function cannot import data
+// from cmd/compile or gcexportdata.Write.
 //
 // The importer calls getPackages to obtain package symbols for all
 // packages mentioned in the export data, including the one being
@@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64)
 	// Sort the set of needed offsets. Duplicates are harmless.
 	sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
 
-	lines := tokeninternal.GetLines(file) // byte offset of each line start
+	lines := file.Lines() // byte offset of each line start
 	w.uint64(uint64(len(lines)))
 
 	// Rather than record the entire array of line start offsets,
@@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) {
 	case *types.TypeName:
 		t := obj.Type()
 
-		if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
+		if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
 			w.tag(typeParamTag)
 			w.pos(obj.Pos())
 			constraint := tparam.Constraint()
 			if p.version >= iexportVersionGo1_18 {
 				implicit := false
-				if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
+				if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
 					implicit = iface.IsImplicit()
 				}
 				w.bool(implicit)
@@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) {
 		}
 
 		if obj.IsAlias() {
-			w.tag(aliasTag)
+			alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
+
+			var tparams *types.TypeParamList
+			if materialized {
+				tparams = aliases.TypeParams(alias)
+			}
+			if tparams.Len() == 0 {
+				w.tag(aliasTag)
+			} else {
+				w.tag(genericAliasTag)
+			}
 			w.pos(obj.Pos())
-			if alias, ok := t.(*aliases.Alias); ok {
+			if tparams.Len() > 0 {
+				w.tparamList(obj.Name(), tparams, obj.Pkg())
+			}
+			if materialized {
 				// Preserve materialized aliases,
 				// even of non-exported types.
 				t = aliases.Rhs(alias)
@@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
 		}()
 	}
 	switch t := t.(type) {
-	case *aliases.Alias:
-		// TODO(adonovan): support parameterized aliases, following *types.Named.
+	case *types.Alias:
+		if targs := aliases.TypeArgs(t); targs.Len() > 0 {
+			w.startType(instanceType)
+			w.pos(t.Obj().Pos())
+			w.typeList(targs, pkg)
+			w.typ(aliases.Origin(t), pkg)
+			return
+		}
 		w.startType(aliasType)
 		w.qualifiedType(t.Obj())
 
@@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
 		for i := 0; i < n; i++ {
 			ft := t.EmbeddedType(i)
 			tPkg := pkg
-			if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
+			if named, _ := types.Unalias(ft).(*types.Named); named != nil {
 				w.pos(named.Obj().Pos())
 			} else {
 				w.pos(token.NoPos)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 136aa036..12943927 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -3,9 +3,7 @@
 // license that can be found in the LICENSE file.
 
 // Indexed package import.
-// See cmd/compile/internal/gc/iexport.go for the export data format.
-
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+// See iexport.go for the export data format.
 
 package gcimporter
 
@@ -53,6 +51,7 @@ const (
 	iexportVersionPosCol   = 1
 	iexportVersionGo1_18   = 2
 	iexportVersionGenerics = 2
+	iexportVersion         = iexportVersionGenerics
 
 	iexportVersionCurrent = 2
 )
@@ -540,7 +539,7 @@ func canReuse(def *types.Named, rhs types.Type) bool {
 	if def == nil {
 		return true
 	}
-	iface, _ := aliases.Unalias(rhs).(*types.Interface)
+	iface, _ := types.Unalias(rhs).(*types.Interface)
 	if iface == nil {
 		return true
 	}
@@ -557,19 +556,28 @@ type importReader struct {
 	prevColumn int64
 }
 
+// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
+//
+// If TypeNames are not marked black (in the sense of go/types cycle
+// detection), they may be mutated when dot-imported. Fix this by punching a
+// hole through the type, when compiling with Go 1.23. (The bug has been fixed
+// for 1.24, but the fix was not worth back-porting).
+var markBlack = func(name *types.TypeName) {}
+
 func (r *importReader) obj(name string) {
 	tag := r.byte()
 	pos := r.pos()
 
 	switch tag {
-	case aliasTag:
+	case aliasTag, genericAliasTag:
+		var tparams []*types.TypeParam
+		if tag == genericAliasTag {
+			tparams = r.tparamList()
+		}
 		typ := r.typ()
-		// TODO(adonovan): support generic aliases:
-		// if tag == genericAliasTag {
-		// 	tparams := r.tparamList()
-		// 	alias.SetTypeParams(tparams)
-		// }
-		r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
+		obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+		markBlack(obj) // workaround for golang/go#69912
+		r.declare(obj)
 
 	case constTag:
 		typ, val := r.value()
@@ -589,6 +597,9 @@ func (r *importReader) obj(name string) {
 		// declaration before recursing.
 		obj := types.NewTypeName(pos, r.currPkg, name, nil)
 		named := types.NewNamed(obj, nil, nil)
+
+		markBlack(obj) // workaround for golang/go#69912
+
 		// Declare obj before calling r.tparamList, so the new type name is recognized
 		// if used in the constraint of one of its own typeparams (see #48280).
 		r.declare(obj)
@@ -615,7 +626,7 @@ func (r *importReader) obj(name string) {
 				if targs.Len() > 0 {
 					rparams = make([]*types.TypeParam, targs.Len())
 					for i := range rparams {
-						rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
+						rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
 					}
 				}
 				msig := r.signature(recv, rparams, nil)
@@ -645,7 +656,7 @@ func (r *importReader) obj(name string) {
 		}
 		constraint := r.typ()
 		if implicit {
-			iface, _ := aliases.Unalias(constraint).(*types.Interface)
+			iface, _ := types.Unalias(constraint).(*types.Interface)
 			if iface == nil {
 				errorf("non-interface constraint marked implicit")
 			}
@@ -660,7 +671,9 @@ func (r *importReader) obj(name string) {
 	case varTag:
 		typ := r.typ()
 
-		r.declare(types.NewVar(pos, r.currPkg, name, typ))
+		v := types.NewVar(pos, r.currPkg, name, typ)
+		typesinternal.SetVarKind(v, typesinternal.PackageVar)
+		r.declare(v)
 
 	default:
 		errorf("unexpected tag: %v", tag)
@@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type {
 }
 
 func isInterface(t types.Type) bool {
-	_, ok := aliases.Unalias(t).(*types.Interface)
+	_, ok := types.Unalias(t).(*types.Interface)
 	return ok
 }
 
@@ -862,7 +875,7 @@ func (r *importReader) string() string      { return r.p.stringAt(r.uint64()) }
 func (r *importReader) doType(base *types.Named) (res types.Type) {
 	k := r.kind()
 	if debug {
-		r.p.trace("importing type %d (base: %s)", k, base)
+		r.p.trace("importing type %d (base: %v)", k, base)
 		r.p.indent++
 		defer func() {
 			r.p.indent--
@@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {
 			methods[i] = method
 		}
 
-		typ := newInterface(methods, embeddeds)
+		typ := types.NewInterfaceType(methods, embeddeds)
 		r.p.interfaceList = append(r.p.interfaceList, typ)
 		return typ
 
@@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam {
 	for i := range xs {
 		// Note: the standard library importer is tolerant of nil types here,
 		// though would panic in SetTypeParams.
-		xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
+		xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
 	}
 	return xs
 }
@@ -1098,3 +1111,9 @@ func (r *importReader) byte() byte {
 	}
 	return x
 }
+
+type byPath []*types.Package
+
+func (a byPath) Len() int           { return len(a) }
+func (a byPath) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
new file mode 100644
index 00000000..7586bfac
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22 && !go1.24
+
+package gcimporter
+
+import (
+	"go/token"
+	"go/types"
+	"unsafe"
+)
+
+// TODO(rfindley): delete this workaround once go1.24 is assured.
+
+func init() {
+	// Update markBlack so that it correctly sets the color
+	// of imported TypeNames.
+	//
+	// See the doc comment for markBlack for details.
+
+	type color uint32
+	const (
+		white color = iota
+		black
+		grey
+	)
+	type object struct {
+		_      *types.Scope
+		_      token.Pos
+		_      *types.Package
+		_      string
+		_      types.Type
+		_      uint32
+		color_ color
+		_      token.Pos
+	}
+	type typeName struct {
+		object
+	}
+
+	// If the size of types.TypeName changes, this will fail to compile.
+	const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
+	var _ [-delta * delta]int
+
+	markBlack = func(obj *types.TypeName) {
+		type uP = unsafe.Pointer
+		var ptr *typeName
+		*(*uP)(uP(&ptr)) = uP(obj)
+		ptr.color_ = black
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
deleted file mode 100644
index 8b163e3d..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.11
-// +build !go1.11
-
-package gcimporter
-
-import "go/types"
-
-func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
-	named := make([]*types.Named, len(embeddeds))
-	for i, e := range embeddeds {
-		var ok bool
-		named[i], ok = e.(*types.Named)
-		if !ok {
-			panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
-		}
-	}
-	return types.NewInterface(methods, named)
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
deleted file mode 100644
index 49984f40..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.11
-// +build go1.11
-
-package gcimporter
-
-import "go/types"
-
-func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
-	return types.NewInterfaceType(methods, embeddeds)
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
new file mode 100644
index 00000000..907c8557
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
@@ -0,0 +1,91 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+	"go/types"
+	"sync"
+)
+
+// predecl is a cache for the predeclared types in types.Universe.
+//
+// Cache a distinct result based on the runtime value of any.
+// The pointer value of the any type varies based on GODEBUG settings.
+var predeclMu sync.Mutex
+var predecl map[types.Type][]types.Type
+
+func predeclared() []types.Type {
+	anyt := types.Universe.Lookup("any").Type()
+
+	predeclMu.Lock()
+	defer predeclMu.Unlock()
+
+	if pre, ok := predecl[anyt]; ok {
+		return pre
+	}
+
+	if predecl == nil {
+		predecl = make(map[types.Type][]types.Type)
+	}
+
+	decls := []types.Type{ // basic types
+		types.Typ[types.Bool],
+		types.Typ[types.Int],
+		types.Typ[types.Int8],
+		types.Typ[types.Int16],
+		types.Typ[types.Int32],
+		types.Typ[types.Int64],
+		types.Typ[types.Uint],
+		types.Typ[types.Uint8],
+		types.Typ[types.Uint16],
+		types.Typ[types.Uint32],
+		types.Typ[types.Uint64],
+		types.Typ[types.Uintptr],
+		types.Typ[types.Float32],
+		types.Typ[types.Float64],
+		types.Typ[types.Complex64],
+		types.Typ[types.Complex128],
+		types.Typ[types.String],
+
+		// basic type aliases
+		types.Universe.Lookup("byte").Type(),
+		types.Universe.Lookup("rune").Type(),
+
+		// error
+		types.Universe.Lookup("error").Type(),
+
+		// untyped types
+		types.Typ[types.UntypedBool],
+		types.Typ[types.UntypedInt],
+		types.Typ[types.UntypedRune],
+		types.Typ[types.UntypedFloat],
+		types.Typ[types.UntypedComplex],
+		types.Typ[types.UntypedString],
+		types.Typ[types.UntypedNil],
+
+		// package unsafe
+		types.Typ[types.UnsafePointer],
+
+		// invalid type
+		types.Typ[types.Invalid], // only appears in packages with errors
+
+		// used internally by gc; never used by this package or in .a files
+		anyType{},
+
+		// comparable
+		types.Universe.Lookup("comparable").Type(),
+
+		// any
+		anyt,
+	}
+
+	predecl[anyt] = decls
+	return decls
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string         { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go
new file mode 100644
index 00000000..4af810dc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+	"bufio"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
+func readArchiveHeader(b *bufio.Reader, name string) int {
+	// architecture-independent object file output
+	const HeaderSize = 60
+
+	var buf [HeaderSize]byte
+	if _, err := io.ReadFull(b, buf[:]); err != nil {
+		return -1
+	}
+	aname := strings.Trim(string(buf[0:16]), " ")
+	if !strings.HasPrefix(aname, name) {
+		return -1
+	}
+	asize := strings.Trim(string(buf[48:58]), " ")
+	i, _ := strconv.Atoi(asize)
+	return i
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
deleted file mode 100644
index 0cd3b91b..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gcimporter
-
-import "go/types"
-
-const iexportVersion = iexportVersionGenerics
-
-// additionalPredeclared returns additional predeclared types in go.1.18.
-func additionalPredeclared() []types.Type {
-	return []types.Type{
-		// comparable
-		types.Universe.Lookup("comparable").Type(),
-
-		// any
-		types.Universe.Lookup("any").Type(),
-	}
-}
-
-// See cmd/compile/internal/types.SplitVargenSuffix.
-func splitVargenSuffix(name string) (base, suffix string) {
-	i := len(name)
-	for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
-		i--
-	}
-	const dot = "·"
-	if i >= len(dot) && name[i-len(dot):i] == dot {
-		i -= len(dot)
-		return name[:i], name[i:]
-	}
-	return name, ""
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
deleted file mode 100644
index 38b624ca..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.unified
-// +build !goexperiment.unified
-
-package gcimporter
-
-const unifiedIR = false
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
deleted file mode 100644
index b5118d0b..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.unified
-// +build goexperiment.unified
-
-package gcimporter
-
-const unifiedIR = true
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 2c077068..522287d1 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -11,10 +11,10 @@ import (
 	"go/token"
 	"go/types"
 	"sort"
-	"strings"
 
 	"golang.org/x/tools/internal/aliases"
 	"golang.org/x/tools/internal/pkgbits"
+	"golang.org/x/tools/internal/typesinternal"
 )
 
 // A pkgReader holds the shared state for reading a unified IR package
@@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) {
 
 // See cmd/compile/internal/noder.derivedInfo.
 type derivedInfo struct {
-	idx    pkgbits.Index
-	needed bool
+	idx pkgbits.Index
 }
 
 // See cmd/compile/internal/noder.typeInfo.
@@ -72,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
 	}
 
 	s := string(data)
-	s = s[:strings.LastIndex(s, "\n$$\n")]
 	input := pkgbits.NewPkgDecoder(path, s)
 	pkg = readUnifiedPackage(fset, nil, imports, input)
 	return
@@ -110,13 +108,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
 
 	r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
 	pkg := r.pkg()
-	r.Bool() // has init
+	if r.Version().Has(pkgbits.HasInit) {
+		r.Bool()
+	}
 
 	for i, n := 0, r.Len(); i < n; i++ {
 		// As if r.obj(), but avoiding the Scope.Lookup call,
 		// to avoid eager loading of imports.
 		r.Sync(pkgbits.SyncObject)
-		assert(!r.Bool())
+		if r.Version().Has(pkgbits.DerivedFuncInstance) {
+			assert(!r.Bool())
+		}
 		r.p.objIdx(r.Reloc(pkgbits.RelocObj))
 		assert(r.Len() == 0)
 	}
@@ -165,7 +167,7 @@ type readerDict struct {
 	// tparams is a slice of the constructed TypeParams for the element.
 	tparams []*types.TypeParam
 
-	// devived is a slice of types derived from tparams, which may be
+	// derived is a slice of types derived from tparams, which may be
 	// instantiated while reading the current element.
 	derived      []derivedInfo
 	derivedTypes []types.Type // lazily instantiated from derived
@@ -263,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
 func (r *reader) doPkg() *types.Package {
 	path := r.String()
 	switch path {
-	case "":
+	// cmd/compile emits path="main" for main packages because
+	// that's the linker symbol prefix it used; but we need
+	// the package's path as it would be reported by go list,
+	// hence "main" below.
+	// See test at go/packages.TestMainPackagePathInModeTypes.
+	case "", "main":
 		path = r.p.PkgPath()
 	case "builtin":
 		return nil // universe
@@ -471,7 +478,9 @@ func (r *reader) param() *types.Var {
 func (r *reader) obj() (types.Object, []types.Type) {
 	r.Sync(pkgbits.SyncObject)
 
-	assert(!r.Bool())
+	if r.Version().Has(pkgbits.DerivedFuncInstance) {
+		assert(!r.Bool())
+	}
 
 	pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
 	obj := pkgScope(pkg).Lookup(name)
@@ -525,8 +534,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
 
 		case pkgbits.ObjAlias:
 			pos := r.pos()
+			var tparams []*types.TypeParam
+			if r.Version().Has(pkgbits.AliasTypeParamNames) {
+				tparams = r.typeParamNames()
+			}
 			typ := r.typ()
-			declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
+			declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
 
 		case pkgbits.ObjConst:
 			pos := r.pos()
@@ -553,13 +566,14 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
 				// If the underlying type is an interface, we need to
 				// duplicate its methods so we can replace the receiver
 				// parameter's type (#49906).
-				if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+				if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
 					methods := make([]*types.Func, iface.NumExplicitMethods())
 					for i := range methods {
 						fn := iface.ExplicitMethod(i)
 						sig := fn.Type().(*types.Signature)
 
 						recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+						typesinternal.SetVarKind(recv, typesinternal.RecvVar)
 						methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
 					}
 
@@ -607,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
 		case pkgbits.ObjVar:
 			pos := r.pos()
 			typ := r.typ()
-			declare(types.NewVar(pos, objPkg, objName, typ))
+			v := types.NewVar(pos, objPkg, objName, typ)
+			typesinternal.SetVarKind(v, typesinternal.PackageVar)
+			declare(v)
 		}
 	}
 
@@ -632,7 +648,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
 		dict.derived = make([]derivedInfo, r.Len())
 		dict.derivedTypes = make([]types.Type, len(dict.derived))
 		for i := range dict.derived {
-			dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+			dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
+			if r.Version().Has(pkgbits.DerivedInfoNeeded) {
+				assert(!r.Bool())
+			}
 		}
 
 		pr.retireReader(r)
@@ -726,3 +745,17 @@ func pkgScope(pkg *types.Package) *types.Scope {
 	}
 	return types.Universe
 }
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+	i := len(name)
+	for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+		i--
+	}
+	const dot = "·"
+	if i >= len(dot) && name[i-len(dot):i] == dot {
+		i -= len(dot)
+		return name[:i], name[i:]
+	}
+	return name, ""
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index af0ee6c6..7ea90134 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -16,7 +16,6 @@ import (
 	"os"
 	"os/exec"
 	"path/filepath"
-	"reflect"
 	"regexp"
 	"runtime"
 	"strconv"
@@ -29,7 +28,7 @@ import (
 	"golang.org/x/tools/internal/event/label"
 )
 
-// An Runner will run go command invocations and serialize
+// A Runner will run go command invocations and serialize
 // them if it sees a concurrency error.
 type Runner struct {
 	// once guards the runner initialization.
@@ -180,7 +179,7 @@ type Invocation struct {
 	CleanEnv   bool
 	Env        []string
 	WorkingDir string
-	Logf       func(format string, args ...interface{})
+	Logf       func(format string, args ...any)
 }
 
 // Postcondition: both error results have same nilness.
@@ -200,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io
 	return
 }
 
-func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
-	log := i.Logf
-	if log == nil {
-		log = func(string, ...interface{}) {}
+// logf logs if i.Logf is non-nil.
+func (i *Invocation) logf(format string, args ...any) {
+	if i.Logf != nil {
+		i.Logf(format, args...)
 	}
+}
 
+func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
 	goArgs := []string{i.Verb}
 
 	appendModFile := func() {
@@ -248,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
 	cmd.Stdout = stdout
 	cmd.Stderr = stderr
 
-	// cmd.WaitDelay was added only in go1.20 (see #50436).
-	if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
-		// https://go.dev/issue/59541: don't wait forever copying stderr
-		// after the command has exited.
-		// After CL 484741 we copy stdout manually, so we we'll stop reading that as
-		// soon as ctx is done. However, we also don't want to wait around forever
-		// for stderr. Give a much-longer-than-reasonable delay and then assume that
-		// something has wedged in the kernel or runtime.
-		waitDelay.Set(reflect.ValueOf(30 * time.Second))
-	}
+	// https://go.dev/issue/59541: don't wait forever copying stderr
+	// after the command has exited.
+	// After CL 484741 we copy stdout manually, so we we'll stop reading that as
+	// soon as ctx is done. However, we also don't want to wait around forever
+	// for stderr. Give a much-longer-than-reasonable delay and then assume that
+	// something has wedged in the kernel or runtime.
+	cmd.WaitDelay = 30 * time.Second
 
 	// The cwd gets resolved to the real path. On Darwin, where
 	// /tmp is a symlink, this breaks anything that expects the
@@ -277,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
 		cmd.Dir = i.WorkingDir
 	}
 
-	defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+	debugStr := cmdDebugStr(cmd)
+	i.logf("starting %v", debugStr)
+	start := time.Now()
+	defer func() {
+		i.logf("%s for %v", time.Since(start), debugStr)
+	}()
 
 	return runCmdContext(ctx, cmd)
 }
@@ -385,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
 		case err := <-resChan:
 			return err
 		case <-timer.C:
-			HandleHangingGoCommand(startTime, cmd)
+			// HandleHangingGoCommand terminates this process.
+			// Pass off resChan in case we can collect the command error.
+			handleHangingGoCommand(startTime, cmd, resChan)
 		case <-ctx.Done():
 		}
 	} else {
@@ -410,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
 	}
 
 	// Didn't shut down in response to interrupt. Kill it hard.
-	// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
-	// on certain platforms, such as unix.
 	if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
 		log.Printf("error killing the Go command: %v", err)
 	}
@@ -419,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
 	return <-resChan
 }
 
-func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
+// handleHangingGoCommand outputs debugging information to help diagnose the
+// cause of a hanging Go command, and then exits with log.Fatalf.
+func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
 	switch runtime.GOOS {
-	case "linux", "darwin", "freebsd", "netbsd":
+	case "linux", "darwin", "freebsd", "netbsd", "openbsd":
 		fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
 
-The gopls test runner has detected a hanging go command. In order to debug
-this, the output of ps and lsof/fstat is printed below.
+			The gopls test runner has detected a hanging go command. In order to debug
+			this, the output of ps and lsof/fstat is printed below.
 
-See golang/go#54461 for more details.`)
+			See golang/go#54461 for more details.`)
 
 		fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
 		fmt.Fprintln(os.Stderr, "-------------------------")
@@ -435,7 +440,7 @@ See golang/go#54461 for more details.`)
 		psCmd.Stdout = os.Stderr
 		psCmd.Stderr = os.Stderr
 		if err := psCmd.Run(); err != nil {
-			panic(fmt.Sprintf("running ps: %v", err))
+			log.Printf("Handling hanging Go command: running ps: %v", err)
 		}
 
 		listFiles := "lsof"
@@ -449,10 +454,24 @@ See golang/go#54461 for more details.`)
 		listFilesCmd.Stdout = os.Stderr
 		listFilesCmd.Stderr = os.Stderr
 		if err := listFilesCmd.Run(); err != nil {
-			panic(fmt.Sprintf("running %s: %v", listFiles, err))
+			log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
+		}
+		// Try to extract information about the slow go process by issuing a SIGQUIT.
+		if err := cmd.Process.Signal(sigStuckProcess); err == nil {
+			select {
+			case err := <-resChan:
+				stderr := "not a bytes.Buffer"
+				if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
+					stderr = buf.String()
+				}
+				log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
+			case <-time.After(5 * time.Second):
+			}
+		} else {
+			log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
 		}
 	}
-	panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
+	log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
 }
 
 func cmdDebugStr(cmd *exec.Cmd) string {
@@ -514,7 +533,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(),
 	for k, v := range overlay {
 		// Use a unique basename for each file (001-foo.go),
 		// to avoid creating nested directories.
-		base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
+		base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
 		filename := filepath.Join(dir, base)
 		err := os.WriteFile(filename, v, 0666)
 		if err != nil {
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
new file mode 100644
index 00000000..469c648e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package gocommand
+
+import "os"
+
+// sigStuckProcess is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var sigStuckProcess = os.Kill
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
new file mode 100644
index 00000000..169d37c8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package gocommand
+
+import "syscall"
+
+// Sigstuckprocess is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var sigStuckProcess = syscall.SIGQUIT
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 4569313a..bf6b0aad 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -90,18 +90,6 @@ type ImportFix struct {
 	Relevance float64 // see pkg
 }
 
-// An ImportInfo represents a single import statement.
-type ImportInfo struct {
-	ImportPath string // import path, e.g. "crypto/rand".
-	Name       string // import name, e.g. "crand", or "" if none.
-}
-
-// A packageInfo represents what's known about a package.
-type packageInfo struct {
-	name    string          // real package name, if known.
-	exports map[string]bool // known exports.
-}
-
 // parseOtherFiles parses all the Go files in srcDir except filename, including
 // test files if filename looks like a test.
 //
@@ -130,7 +118,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename
 			continue
 		}
 
-		f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
+		f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution)
 		if err != nil {
 			continue
 		}
@@ -161,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
 
 // collectReferences builds a map of selector expressions, from
 // left hand side (X) to a set of right hand sides (Sel).
-func collectReferences(f *ast.File) references {
-	refs := references{}
+func collectReferences(f *ast.File) References {
+	refs := References{}
 
 	var visitor visitFn
 	visitor = func(node ast.Node) ast.Visitor {
@@ -232,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
 
 		allFound := true
 		for right := range syms {
-			if !pkgInfo.exports[right] {
+			if !pkgInfo.Exports[right] {
 				allFound = false
 				break
 			}
@@ -245,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
 	return nil
 }
 
-// references is set of references found in a Go file. The first map key is the
-// left hand side of a selector expression, the second key is the right hand
-// side, and the value should always be true.
-type references map[string]map[string]bool
-
 // A pass contains all the inputs and state necessary to fix a file's imports.
 // It can be modified in some ways during use; see comments below.
 type pass struct {
@@ -257,27 +240,29 @@ type pass struct {
 	fset                 *token.FileSet // fset used to parse f and its siblings.
 	f                    *ast.File      // the file being fixed.
 	srcDir               string         // the directory containing f.
-	env                  *ProcessEnv    // the environment to use for go commands, etc.
-	loadRealPackageNames bool           // if true, load package names from disk rather than guessing them.
-	otherFiles           []*ast.File    // sibling files.
+	logf                 func(string, ...any)
+	source               Source      // the environment to use for go commands, etc.
+	loadRealPackageNames bool        // if true, load package names from disk rather than guessing them.
+	otherFiles           []*ast.File // sibling files.
+	goroot               string
 
 	// Intermediate state, generated by load.
 	existingImports map[string][]*ImportInfo
-	allRefs         references
-	missingRefs     references
+	allRefs         References
+	missingRefs     References
 
 	// Inputs to fix. These can be augmented between successive fix calls.
 	lastTry       bool                    // indicates that this is the last call and fix should clean up as best it can.
 	candidates    []*ImportInfo           // candidate imports in priority order.
-	knownPackages map[string]*packageInfo // information about all known packages.
+	knownPackages map[string]*PackageInfo // information about all known packages.
 }
 
 // loadPackageNames saves the package names for everything referenced by imports.
-func (p *pass) loadPackageNames(imports []*ImportInfo) error {
-	if p.env.Logf != nil {
-		p.env.Logf("loading package names for %v packages", len(imports))
+func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
+	if p.logf != nil {
+		p.logf("loading package names for %v packages", len(imports))
 		defer func() {
-			p.env.Logf("done loading package names for %v packages", len(imports))
+			p.logf("done loading package names for %v packages", len(imports))
 		}()
 	}
 	var unknown []string
@@ -288,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
 		unknown = append(unknown, imp.ImportPath)
 	}
 
-	resolver, err := p.env.GetResolver()
-	if err != nil {
-		return err
-	}
-
-	names, err := resolver.loadPackageNames(unknown, p.srcDir)
+	names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
 	if err != nil {
 		return err
 	}
 
+	// TODO(rfindley): revisit this. Why do we need to store known packages with
+	// no exports? The inconsistent data is confusing.
 	for path, name := range names {
-		p.knownPackages[path] = &packageInfo{
-			name:    name,
-			exports: map[string]bool{},
+		p.knownPackages[path] = &PackageInfo{
+			Name:    name,
+			Exports: map[string]bool{},
 		}
 	}
 	return nil
@@ -329,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
 		return imp.Name
 	}
 	known := p.knownPackages[imp.ImportPath]
-	if known != nil && known.name != "" {
-		return withoutVersion(known.name)
+	if known != nil && known.Name != "" {
+		return withoutVersion(known.Name)
 	}
 	return ImportPathToAssumedName(imp.ImportPath)
 }
@@ -338,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
 // load reads in everything necessary to run a pass, and reports whether the
 // file already has all the imports it needs. It fills in p.missingRefs with the
 // file's missing symbols, if any, or removes unused imports if not.
-func (p *pass) load() ([]*ImportFix, bool) {
-	p.knownPackages = map[string]*packageInfo{}
-	p.missingRefs = references{}
+func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
+	p.knownPackages = map[string]*PackageInfo{}
+	p.missingRefs = References{}
 	p.existingImports = map[string][]*ImportInfo{}
 
 	// Load basic information about the file in question.
@@ -363,10 +345,10 @@ func (p *pass) load() ([]*ImportFix, bool) {
 	// f's imports by the identifier they introduce.
 	imports := collectImports(p.f)
 	if p.loadRealPackageNames {
-		err := p.loadPackageNames(append(imports, p.candidates...))
+		err := p.loadPackageNames(ctx, append(imports, p.candidates...))
 		if err != nil {
-			if p.env.Logf != nil {
-				p.env.Logf("loading package names: %v", err)
+			if p.logf != nil {
+				p.logf("loading package names: %v", err)
 			}
 			return nil, false
 		}
@@ -536,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
 					// We have the stdlib in memory; no need to guess.
 					rights = symbolNameSet(m)
 				}
-				p.addCandidate(imp, &packageInfo{
+				// TODO(rfindley): we should set package name here, for consistency.
+				p.addCandidate(imp, &PackageInfo{
 					// no name; we already know it.
-					exports: rights,
+					Exports: rights,
 				})
 			}
 		}
@@ -547,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
 
 // addCandidate adds a candidate import to p, and merges in the information
 // in pkg.
-func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
+func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
 	p.candidates = append(p.candidates, imp)
 	if existing, ok := p.knownPackages[imp.ImportPath]; ok {
-		if existing.name == "" {
-			existing.name = pkg.name
+		if existing.Name == "" {
+			existing.Name = pkg.Name
 		}
-		for export := range pkg.exports {
-			existing.exports[export] = true
+		for export := range pkg.Exports {
+			existing.Exports[export] = true
 		}
 	} else {
 		p.knownPackages[imp.ImportPath] = pkg
@@ -563,7 +546,14 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
 
 // fixImports adds and removes imports from f so that all its references are
 // satisfied and there are no unused imports.
-func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
+//
+// This is declared as a variable rather than a function so goimports can
+// easily be extended by adding a file with an init function.
+//
+// DO NOT REMOVE: used internally at Google.
+var fixImports = fixImportsDefault
+
+func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
 	fixes, err := getFixes(context.Background(), fset, f, filename, env)
 	if err != nil {
 		return err
@@ -575,21 +565,42 @@ func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessE
 // getFixes gets the import fixes that need to be made to f in order to fix the imports.
 // It does not modify the ast.
 func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
+	source, err := NewProcessEnvSource(env, filename, f.Name.Name)
+	if err != nil {
+		return nil, err
+	}
+	goEnv, err := env.goEnv()
+	if err != nil {
+		return nil, err
+	}
+	return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
+}
+
+func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
+	// This logic is defensively duplicated from getFixes.
 	abs, err := filepath.Abs(filename)
 	if err != nil {
 		return nil, err
 	}
 	srcDir := filepath.Dir(abs)
-	if env.Logf != nil {
-		env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
+
+	if logf != nil {
+		logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
 	}
 
 	// First pass: looking only at f, and using the naive algorithm to
 	// derive package names from import paths, see if the file is already
 	// complete. We can't add any imports yet, because we don't know
 	// if missing references are actually package vars.
-	p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
-	if fixes, done := p.load(); done {
+	p := &pass{
+		fset:   fset,
+		f:      f,
+		srcDir: srcDir,
+		logf:   logf,
+		goroot: goroot,
+		source: source,
+	}
+	if fixes, done := p.load(ctx); done {
 		return fixes, nil
 	}
 
@@ -601,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
 	// Second pass: add information from other files in the same package,
 	// like their package vars and imports.
 	p.otherFiles = otherFiles
-	if fixes, done := p.load(); done {
+	if fixes, done := p.load(ctx); done {
 		return fixes, nil
 	}
 
@@ -614,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
 
 	// Third pass: get real package names where we had previously used
 	// the naive algorithm.
-	p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
+	p = &pass{
+		fset:   fset,
+		f:      f,
+		srcDir: srcDir,
+		logf:   logf,
+		goroot: goroot,
+		source: p.source, // safe to reuse, as it's just a wrapper around env
+	}
 	p.loadRealPackageNames = true
 	p.otherFiles = otherFiles
-	if fixes, done := p.load(); done {
+	if fixes, done := p.load(ctx); done {
 		return fixes, nil
 	}
 
@@ -762,7 +780,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix
 			return true
 		},
 		dirFound: func(pkg *pkg) bool {
-			if !canUse(filename, pkg.dir) {
+			if !CanUse(filename, pkg.dir) {
 				return false
 			}
 			// Try the assumed package name first, then a simpler path match
@@ -797,7 +815,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix,
 			return true
 		},
 		dirFound: func(pkg *pkg) bool {
-			if !canUse(filename, pkg.dir) {
+			if !CanUse(filename, pkg.dir) {
 				return false
 			}
 			return strings.HasPrefix(pkg.importPathShort, searchPrefix)
@@ -831,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
 			return true
 		},
 		dirFound: func(pkg *pkg) bool {
-			return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
+			return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
 		},
 		packageNameLoaded: func(pkg *pkg) bool {
 			return pkg.packageName == searchPkg
@@ -909,7 +927,7 @@ type ProcessEnv struct {
 	WorkingDir string
 
 	// If Logf is non-nil, debug logging is enabled through this function.
-	Logf func(format string, args ...interface{})
+	Logf func(format string, args ...any)
 
 	// If set, ModCache holds a shared cache of directory info to use across
 	// multiple ProcessEnvs.
@@ -1014,16 +1032,26 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
 		// already know the view type.
 		if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
 			e.resolver = newGopathResolver(e)
+			e.logf("created gopath resolver")
 		} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
 			e.resolverErr = err
+			e.logf("failed to create module resolver: %v", err)
 		} else {
 			e.resolver = Resolver(r)
+			e.logf("created module resolver")
 		}
 	}
 
 	return e.resolver, e.resolverErr
 }
 
+// logf logs if e.Logf is non-nil.
+func (e *ProcessEnv) logf(format string, args ...any) {
+	if e.Logf != nil {
+		e.Logf(format, args...)
+	}
+}
+
 // buildContext returns the build.Context to use for matching files.
 //
 // TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
@@ -1072,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
 	return e.GocmdRunner.Run(ctx, inv)
 }
 
-func addStdlibCandidates(pass *pass, refs references) error {
-	goenv, err := pass.env.goEnv()
-	if err != nil {
-		return err
-	}
+func addStdlibCandidates(pass *pass, refs References) error {
 	localbase := func(nm string) string {
 		ans := path.Base(nm)
 		if ans[0] == 'v' {
@@ -1091,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
 	}
 	add := func(pkg string) {
 		// Prevent self-imports.
-		if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
+		if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
 			return
 		}
 		exports := symbolNameSet(stdlib.PackageSymbols[pkg])
 		pass.addCandidate(
 			&ImportInfo{ImportPath: pkg},
-			&packageInfo{name: localbase(pkg), exports: exports})
+			&PackageInfo{Name: localbase(pkg), Exports: exports})
 	}
 	for left := range refs {
 		if left == "rand" {
@@ -1108,6 +1132,9 @@ func addStdlibCandidates(pass *pass, refs references) error {
 			// but we have no way of figuring out what the user is using
 			// TODO: investigate using the toolchain version to disambiguate in the stdlib
 			add("math/rand/v2")
+			// math/rand has an overlapping API
+			// TestIssue66407 fails without this
+			add("math/rand")
 			continue
 		}
 		for importPath := range stdlib.PackageSymbols {
@@ -1127,8 +1154,8 @@ type Resolver interface {
 	// scan works with callback to search for packages. See scanCallback for details.
 	scan(ctx context.Context, callback *scanCallback) error
 
-	// loadExports returns the set of exported symbols in the package at dir.
-	// loadExports may be called concurrently.
+	// loadExports returns the package name and set of exported symbols in the
+	// package at dir. loadExports may be called concurrently.
 	loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
 
 	// scoreImportPath returns the relevance for an import path.
@@ -1161,101 +1188,22 @@ type scanCallback struct {
 	exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
 }
 
-func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
+func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
 	ctx, done := event.Start(ctx, "imports.addExternalCandidates")
 	defer done()
 
-	var mu sync.Mutex
-	found := make(map[string][]pkgDistance)
-	callback := &scanCallback{
-		rootFound: func(gopathwalk.Root) bool {
-			return true // We want everything.
-		},
-		dirFound: func(pkg *pkg) bool {
-			return pkgIsCandidate(filename, refs, pkg)
-		},
-		packageNameLoaded: func(pkg *pkg) bool {
-			if _, want := refs[pkg.packageName]; !want {
-				return false
-			}
-			if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
-				// The candidate is in the same directory and has the
-				// same package name. Don't try to import ourselves.
-				return false
-			}
-			if !canUse(filename, pkg.dir) {
-				return false
-			}
-			mu.Lock()
-			defer mu.Unlock()
-			found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
-			return false // We'll do our own loading after we sort.
-		},
-	}
-	resolver, err := pass.env.GetResolver()
+	results, err := pass.source.ResolveReferences(ctx, filename, refs)
 	if err != nil {
 		return err
 	}
-	if err = resolver.scan(ctx, callback); err != nil {
-		return err
-	}
 
-	// Search for imports matching potential package references.
-	type result struct {
-		imp *ImportInfo
-		pkg *packageInfo
-	}
-	results := make(chan result, len(refs))
-
-	ctx, cancel := context.WithCancel(ctx)
-	var wg sync.WaitGroup
-	defer func() {
-		cancel()
-		wg.Wait()
-	}()
-	var (
-		firstErr     error
-		firstErrOnce sync.Once
-	)
-	for pkgName, symbols := range refs {
-		wg.Add(1)
-		go func(pkgName string, symbols map[string]bool) {
-			defer wg.Done()
-
-			found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
-
-			if err != nil {
-				firstErrOnce.Do(func() {
-					firstErr = err
-					cancel()
-				})
-				return
-			}
-
-			if found == nil {
-				return // No matching package.
-			}
-
-			imp := &ImportInfo{
-				ImportPath: found.importPathShort,
-			}
-
-			pkg := &packageInfo{
-				name:    pkgName,
-				exports: symbols,
-			}
-			results <- result{imp, pkg}
-		}(pkgName, symbols)
-	}
-	go func() {
-		wg.Wait()
-		close(results)
-	}()
-
-	for result := range results {
+	for _, result := range results {
+		if result == nil {
+			continue
+		}
 		// Don't offer completions that would shadow predeclared
 		// names, such as github.com/coreos/etcd/error.
-		if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
+		if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
 			// Ideally we would skip this candidate only
 			// if the predeclared name is actually
 			// referenced by the file, but that's a lot
@@ -1264,9 +1212,9 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
 			// user before long.
 			continue
 		}
-		pass.addCandidate(result.imp, result.pkg)
+		pass.addCandidate(result.Import, result.Package)
 	}
-	return firstErr
+	return nil
 }
 
 // notIdentifier reports whether ch is an invalid identifier character.
@@ -1608,11 +1556,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
 		}
 
 		fullFile := filepath.Join(dir, fi.Name())
+		// Legacy ast.Object resolution is needed here.
 		f, err := parser.ParseFile(fset, fullFile, nil, 0)
 		if err != nil {
-			if env.Logf != nil {
-				env.Logf("error parsing %v: %v", fullFile, err)
-			}
+			env.logf("error parsing %v: %v", fullFile, err)
 			continue
 		}
 		if f.Name.Name == "documentation" {
@@ -1648,9 +1595,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
 	}
 	sortSymbols(exports)
 
-	if env.Logf != nil {
-		env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
-	}
+	env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
 	return pkgName, exports, nil
 }
 
@@ -1660,25 +1605,39 @@ func sortSymbols(syms []stdlib.Symbol) {
 	})
 }
 
-// findImport searches for a package with the given symbols.
-// If no package is found, findImport returns ("", false, nil)
-func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
+// A symbolSearcher searches for a package with a set of symbols, among a set
+// of candidates. See [symbolSearcher.search].
+//
+// The search occurs within the scope of a single file, with context captured
+// in srcDir and xtest.
+type symbolSearcher struct {
+	logf        func(string, ...any)
+	srcDir      string // directory containing the file
+	xtest       bool   // if set, the file containing is an x_test file
+	loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
+}
+
+// search searches the provided candidates for a package containing all
+// exported symbols.
+//
+// If successful, returns the resulting package.
+func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
 	// Sort the candidates by their import package length,
 	// assuming that shorter package names are better than long
 	// ones.  Note that this sorts by the de-vendored name, so
 	// there's no "penalty" for vendoring.
 	sort.Sort(byDistanceOrImportPathShortLength(candidates))
-	if pass.env.Logf != nil {
+	if s.logf != nil {
 		for i, c := range candidates {
-			pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
+			s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
 		}
 	}
-	resolver, err := pass.env.GetResolver()
-	if err != nil {
-		return nil, err
-	}
 
-	// Collect exports for packages with matching names.
+	// Arrange rescv so that we can we can await results in order of relevance
+	// and exit as soon as we find the first match.
+	//
+	// Search with bounded concurrency, returning as soon as the first result
+	// among rescv is non-nil.
 	rescv := make([]chan *pkg, len(candidates))
 	for i := range candidates {
 		rescv[i] = make(chan *pkg, 1)
@@ -1686,6 +1645,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
 	const maxConcurrentPackageImport = 4
 	loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
 
+	// Ensure that all work is completed at exit.
 	ctx, cancel := context.WithCancel(ctx)
 	var wg sync.WaitGroup
 	defer func() {
@@ -1693,6 +1653,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
 		wg.Wait()
 	}()
 
+	// Start the search.
 	wg.Add(1)
 	go func() {
 		defer wg.Done()
@@ -1703,55 +1664,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
 				return
 			}
 
+			i := i
+			c := c
 			wg.Add(1)
-			go func(c pkgDistance, resc chan<- *pkg) {
+			go func() {
 				defer func() {
 					<-loadExportsSem
 					wg.Done()
 				}()
-
-				if pass.env.Logf != nil {
-					pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
+				if s.logf != nil {
+					s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
 				}
-				// If we're an x_test, load the package under test's test variant.
-				includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
-				_, exports, err := resolver.loadExports(ctx, c.pkg, includeTest)
+				pkg, err := s.searchOne(ctx, c, symbols)
 				if err != nil {
-					if pass.env.Logf != nil {
-						pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
+					if s.logf != nil && ctx.Err() == nil {
+						s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
 					}
-					resc <- nil
-					return
+					pkg = nil
 				}
-
-				exportsMap := make(map[string]bool, len(exports))
-				for _, sym := range exports {
-					exportsMap[sym.Name] = true
-				}
-
-				// If it doesn't have the right
-				// symbols, send nil to mean no match.
-				for symbol := range symbols {
-					if !exportsMap[symbol] {
-						resc <- nil
-						return
-					}
-				}
-				resc <- c.pkg
-			}(c, rescv[i])
+				rescv[i] <- pkg // may be nil
+			}()
 		}
 	}()
 
+	// Await the first (best) result.
 	for _, resc := range rescv {
-		pkg := <-resc
-		if pkg == nil {
-			continue
+		select {
+		case r := <-resc:
+			if r != nil {
+				return r, nil
+			}
+		case <-ctx.Done():
+			return nil, ctx.Err()
 		}
-		return pkg, nil
 	}
 	return nil, nil
 }
 
+func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) {
+	if ctx.Err() != nil {
+		return nil, ctx.Err()
+	}
+	// If we're considering the package under test from an x_test, load the
+	// test variant.
+	includeTest := s.xtest && c.pkg.dir == s.srcDir
+	_, exports, err := s.loadExports(ctx, c.pkg, includeTest)
+	if err != nil {
+		return nil, err
+	}
+
+	exportsMap := make(map[string]bool, len(exports))
+	for _, sym := range exports {
+		exportsMap[sym.Name] = true
+	}
+	for symbol := range symbols {
+		if !exportsMap[symbol] {
+			return nil, nil // no match
+		}
+	}
+	return c.pkg, nil
+}
+
 // pkgIsCandidate reports whether pkg is a candidate for satisfying the
 // finding which package pkgIdent in the file named by filename is trying
 // to refer to.
@@ -1764,68 +1737,34 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
 // filename is the file being formatted.
 // pkgIdent is the package being searched for, like "client" (if
 // searching for "client.New")
-func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
+func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
 	// Check "internal" and "vendor" visibility:
-	if !canUse(filename, pkg.dir) {
+	if !CanUse(filename, pkg.dir) {
 		return false
 	}
 
 	// Speed optimization to minimize disk I/O:
-	// the last two components on disk must contain the
-	// package name somewhere.
 	//
-	// This permits mismatch naming like directory
-	// "go-foo" being package "foo", or "pkg.v3" being "pkg",
-	// or directory "google.golang.org/api/cloudbilling/v1"
-	// being package "cloudbilling", but doesn't
-	// permit a directory "foo" to be package
-	// "bar", which is strongly discouraged
-	// anyway. There's no reason goimports needs
-	// to be slow just to accommodate that.
+	// Use the matchesPath heuristic to filter to package paths that could
+	// reasonably match a dangling reference.
+	//
+	// This permits mismatch naming like directory "go-foo" being package "foo",
+	// or "pkg.v3" being "pkg", or directory
+	// "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but
+	// doesn't permit a directory "foo" to be package "bar", which is strongly
+	// discouraged anyway. There's no reason goimports needs to be slow just to
+	// accommodate that.
 	for pkgIdent := range refs {
-		lastTwo := lastTwoComponents(pkg.importPathShort)
-		if strings.Contains(lastTwo, pkgIdent) {
-			return true
-		}
-		if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
-			lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
-			if strings.Contains(lastTwo, pkgIdent) {
-				return true
-			}
-		}
-	}
-	return false
-}
-
-func hasHyphenOrUpperASCII(s string) bool {
-	for i := 0; i < len(s); i++ {
-		b := s[i]
-		if b == '-' || ('A' <= b && b <= 'Z') {
+		if matchesPath(pkgIdent, pkg.importPathShort) {
 			return true
 		}
 	}
 	return false
 }
 
-func lowerASCIIAndRemoveHyphen(s string) (ret string) {
-	buf := make([]byte, 0, len(s))
-	for i := 0; i < len(s); i++ {
-		b := s[i]
-		switch {
-		case b == '-':
-			continue
-		case 'A' <= b && b <= 'Z':
-			buf = append(buf, b+('a'-'A'))
-		default:
-			buf = append(buf, b)
-		}
-	}
-	return string(buf)
-}
-
-// canUse reports whether the package in dir is usable from filename,
+// CanUse reports whether the package in dir is usable from filename,
 // respecting the Go "internal" and "vendor" visibility rules.
-func canUse(filename, dir string) bool {
+func CanUse(filename, dir string) bool {
 	// Fast path check, before any allocations. If it doesn't contain vendor
 	// or internal, it's not tricky:
 	// Note that this can false-negative on directories like "notinternal",
@@ -1863,19 +1802,84 @@ func canUse(filename, dir string) bool {
 	return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
 }
 
-// lastTwoComponents returns at most the last two path components
-// of v, using either / or \ as the path separator.
-func lastTwoComponents(v string) string {
+// matchesPath reports whether ident may match a potential package name
+// referred to by path, using heuristics to filter out unidiomatic package
+// names.
+//
+// Specifically, it checks whether either of the last two '/'- or '\'-delimited
+// path segments matches the identifier. The segment-matching heuristic must
+// allow for various conventions around segment naming, including go-foo,
+// foo-go, and foo.v3. To handle all of these, matching considers both (1) the
+// entire segment, ignoring '-' and '.', as well as (2) the last subsegment
+// separated by '-' or '.'. So the segment foo-go matches all of the following
+// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII
+// identifiers).
+//
+// See the docstring for [pkgIsCandidate] for an explanation of how this
+// heuristic filters potential candidate packages.
+func matchesPath(ident, path string) bool {
+	// Ignore case, for ASCII.
+	lowerIfASCII := func(b byte) byte {
+		if 'A' <= b && b <= 'Z' {
+			return b + ('a' - 'A')
+		}
+		return b
+	}
+
+	// match reports whether path[start:end] matches ident, ignoring [.-].
+	match := func(start, end int) bool {
+		ii := len(ident) - 1 // current byte in ident
+		pi := end - 1        // current byte in path
+		for ; pi >= start && ii >= 0; pi-- {
+			pb := path[pi]
+			if pb == '-' || pb == '.' {
+				continue
+			}
+			pb = lowerIfASCII(pb)
+			ib := lowerIfASCII(ident[ii])
+			if pb != ib {
+				return false
+			}
+			ii--
+		}
+		return ii < 0 && pi < start // all bytes matched
+	}
+
+	// segmentEnd and subsegmentEnd hold the end points of the current segment
+	// and subsegment intervals.
+	segmentEnd := len(path)
+	subsegmentEnd := len(path)
+
+	// Count slashes; we only care about the last two segments.
 	nslash := 0
-	for i := len(v) - 1; i >= 0; i-- {
-		if v[i] == '/' || v[i] == '\\' {
+
+	for i := len(path) - 1; i >= 0; i-- {
+		switch b := path[i]; b {
+		// TODO(rfindley): we handle backlashes here only because the previous
+		// heuristic handled backslashes. This is perhaps overly defensive, but is
+		// the result of many lessons regarding Chesterton's fence and the
+		// goimports codebase.
+		//
+		// However, this function is only ever called with something called an
+		// 'importPath'. Is it possible that this is a real import path, and
+		// therefore we need only consider forward slashes?
+		case '/', '\\':
+			if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) {
+				return true
+			}
 			nslash++
 			if nslash == 2 {
-				return v[i:]
+				return false // did not match above
 			}
+			segmentEnd, subsegmentEnd = i, i // reset
+		case '-', '.':
+			if match(i+1, subsegmentEnd) {
+				return true
+			}
+			subsegmentEnd = i
 		}
 	}
-	return v
+	return match(0, segmentEnd) || match(0, subsegmentEnd)
 }
 
 type visitFn func(node ast.Node) ast.Visitor
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index f8346552..2215a128 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -47,7 +47,14 @@ type Options struct {
 // Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
 func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
 	fileSet := token.NewFileSet()
-	file, adjust, err := parse(fileSet, filename, src, opt)
+	var parserMode parser.Mode
+	if opt.Comments {
+		parserMode |= parser.ParseComments
+	}
+	if opt.AllErrors {
+		parserMode |= parser.AllErrors
+	}
+	file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
 	if err != nil {
 		return nil, err
 	}
@@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
 //
 // Note that filename's directory influences which imports can be chosen,
 // so it is important that filename be accurate.
-func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
+func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
 	ctx, done := event.Start(ctx, "imports.FixImports")
 	defer done()
 
 	fileSet := token.NewFileSet()
-	file, _, err := parse(fileSet, filename, src, opt)
+	// TODO(rfindley): these default values for ParseComments and AllErrors were
+	// extracted from gopls, but are they even needed?
+	file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
 	if err != nil {
 		return nil, err
 	}
 
-	return getFixes(ctx, fileSet, file, filename, opt.Env)
+	return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
 }
 
 // ApplyFixes applies all of the fixes to the file and formats it. extraMode
@@ -86,7 +95,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
 	// Don't use parse() -- we don't care about fragments or statement lists
 	// here, and we need to work with unparseable files.
 	fileSet := token.NewFileSet()
-	parserMode := parser.Mode(0)
+	parserMode := parser.SkipObjectResolution
 	if opt.Comments {
 		parserMode |= parser.ParseComments
 	}
@@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
 // formatted file, and returns the postpocessed result.
 func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
 	mergeImports(file)
-	sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
+	sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
 	var spacesBefore []string // import paths we need spaces before
 	for _, impSection := range astutil.Imports(fset, file) {
 		// Within each block of contiguous imports, see if any
@@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
 
 // parse parses src, which was read from filename,
 // as a Go source file or statement list.
-func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
-	parserMode := parser.Mode(0)
-	if opt.Comments {
-		parserMode |= parser.ParseComments
-	}
-	if opt.AllErrors {
-		parserMode |= parser.AllErrors
+func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
+	if parserMode&parser.SkipObjectResolution != 0 {
+		panic("legacy ast.Object resolution is required")
 	}
 
 	// Try as whole source file.
@@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
 	// If the error is that the source file didn't begin with a
 	// package line and we accept fragmented input, fall through to
 	// try as a source fragment.  Stop and return on any other error.
-	if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
+	if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
 		return nil, nil, err
 	}
 
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 82fe644a..8555e3f8 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe
 //  2. Use this to separate module cache scanning from other scanning.
 func gomodcacheForEnv(goenv map[string]string) string {
 	if gmc := goenv["GOMODCACHE"]; gmc != "" {
-		return gmc
+		// golang/go#67156: ensure that the module cache is clean, since it is
+		// assumed as a prefix to directories scanned by gopathwalk, which are
+		// themselves clean.
+		return filepath.Clean(gmc)
 	}
 	gopaths := filepath.SplitList(goenv["GOPATH"])
 	if len(gopaths) == 0 {
@@ -265,9 +268,7 @@ func (r *ModuleResolver) initAllMods() error {
 			return err
 		}
 		if mod.Dir == "" {
-			if r.env.Logf != nil {
-				r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
-			}
+			r.env.logf("module %v has not been downloaded and will be ignored", mod.Path)
 			// Can't do anything with a module that's not downloaded.
 			continue
 		}
@@ -742,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest
 
 func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
 	subdir := ""
-	if dir != root.Path {
-		subdir = dir[len(root.Path)+len("/"):]
+	if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) {
+		subdir = dir[len(prefix):]
 	}
 	importPath := filepath.ToSlash(subdir)
 	if strings.HasPrefix(importPath, "vendor/") {
@@ -766,9 +767,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
 		}
 		modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
 		if err != nil {
-			if r.env.Logf != nil {
-				r.env.Logf("decoding module cache path %q: %v", subdir, err)
-			}
+			r.env.logf("decoding module cache path %q: %v", subdir, err)
 			return directoryPackageInfo{
 				status: directoryScanned,
 				err:    fmt.Errorf("decoding module cache path %q: %v", subdir, err),
diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go
new file mode 100644
index 00000000..cbe4f3c5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source.go
@@ -0,0 +1,63 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "context"
+
+// These types document the APIs below.
+//
+// TODO(rfindley): consider making these defined types rather than aliases.
+type (
+	ImportPath  = string
+	PackageName = string
+	Symbol      = string
+
+	// References is set of References found in a Go file. The first map key is the
+	// left hand side of a selector expression, the second key is the right hand
+	// side, and the value should always be true.
+	References = map[PackageName]map[Symbol]bool
+)
+
+// A Result satisfies a missing import.
+//
+// The Import field describes the missing import spec, and the Package field
+// summarizes the package exports.
+type Result struct {
+	Import  *ImportInfo
+	Package *PackageInfo
+}
+
+// An ImportInfo represents a single import statement.
+type ImportInfo struct {
+	ImportPath string // import path, e.g. "crypto/rand".
+	Name       string // import name, e.g. "crand", or "" if none.
+}
+
+// A PackageInfo represents what's known about a package.
+type PackageInfo struct {
+	Name    string          // package name in the package declaration, if known
+	Exports map[string]bool // set of names of known package level sortSymbols
+}
+
+// A Source provides imports to satisfy unresolved references in the file being
+// fixed.
+type Source interface {
+	// LoadPackageNames queries PackageName information for the requested import
+	// paths, when operating from the provided srcDir.
+	//
+	// TODO(rfindley): try to refactor to remove this operation.
+	LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
+
+	// ResolveReferences asks the Source for the best package name to satisfy
+	// each of the missing references, in the context of fixing the given
+	// filename.
+	//
+	// Returns a map from package name to a [Result] for that package name that
+	// provides the required symbols. Keys may be omitted in the map if no
+	// candidates satisfy all missing references for that package name. It is up
+	// to each data source to select the best result for each entry in the
+	// missing map.
+	ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error)
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go
new file mode 100644
index 00000000..ec996c3c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source_env.go
@@ -0,0 +1,129 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+	"context"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"golang.org/x/sync/errgroup"
+	"golang.org/x/tools/internal/gopathwalk"
+)
+
+// ProcessEnvSource implements the [Source] interface using the legacy
+// [ProcessEnv] abstraction.
+type ProcessEnvSource struct {
+	env      *ProcessEnv
+	srcDir   string
+	filename string
+	pkgName  string
+}
+
+// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
+// env, to be used for fixing imports in the file with name filename in package
+// named pkgName.
+func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
+	abs, err := filepath.Abs(filename)
+	if err != nil {
+		return nil, err
+	}
+	srcDir := filepath.Dir(abs)
+	return &ProcessEnvSource{
+		env:      env,
+		srcDir:   srcDir,
+		filename: filename,
+		pkgName:  pkgName,
+	}, nil
+}
+
+func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
+	r, err := s.env.GetResolver()
+	if err != nil {
+		return nil, err
+	}
+	return r.loadPackageNames(unknown, srcDir)
+}
+
+func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) {
+	var mu sync.Mutex
+	found := make(map[string][]pkgDistance)
+	callback := &scanCallback{
+		rootFound: func(gopathwalk.Root) bool {
+			return true // We want everything.
+		},
+		dirFound: func(pkg *pkg) bool {
+			return pkgIsCandidate(filename, refs, pkg)
+		},
+		packageNameLoaded: func(pkg *pkg) bool {
+			if _, want := refs[pkg.packageName]; !want {
+				return false
+			}
+			if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
+				// The candidate is in the same directory and has the
+				// same package name. Don't try to import ourselves.
+				return false
+			}
+			if !CanUse(filename, pkg.dir) {
+				return false
+			}
+			mu.Lock()
+			defer mu.Unlock()
+			found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
+			return false // We'll do our own loading after we sort.
+		},
+	}
+	resolver, err := s.env.GetResolver()
+	if err != nil {
+		return nil, err
+	}
+	if err := resolver.scan(ctx, callback); err != nil {
+		return nil, err
+	}
+
+	g, ctx := errgroup.WithContext(ctx)
+
+	searcher := symbolSearcher{
+		logf:        s.env.logf,
+		srcDir:      s.srcDir,
+		xtest:       strings.HasSuffix(s.pkgName, "_test"),
+		loadExports: resolver.loadExports,
+	}
+
+	var resultMu sync.Mutex
+	results := make(map[string]*Result, len(refs))
+	for pkgName, symbols := range refs {
+		g.Go(func() error {
+			found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
+			if err != nil {
+				return err
+			}
+			if found == nil {
+				return nil // No matching package.
+			}
+
+			imp := &ImportInfo{
+				ImportPath: found.importPathShort,
+			}
+			pkg := &PackageInfo{
+				Name:    pkgName,
+				Exports: symbols,
+			}
+			resultMu.Lock()
+			results[pkgName] = &Result{Import: imp, Package: pkg}
+			resultMu.Unlock()
+			return nil
+		})
+	}
+	if err := g.Wait(); err != nil {
+		return nil, err
+	}
+	var ans []*Result
+	for _, x := range results {
+		ans = append(ans, x)
+	}
+	return ans, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go
new file mode 100644
index 00000000..05229f06
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go
@@ -0,0 +1,103 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"golang.org/x/tools/internal/modindex"
+)
+
+// This code is here rather than in the modindex package
+// to avoid import loops
+
+// implements Source using modindex, so only for module cache.
+//
+// this is perhaps over-engineered. A new Index is read at first use.
+// And then Update is called after every 15 minutes, and a new Index
+// is read if the index changed. It is not clear the Mutex is needed.
+type IndexSource struct {
+	modcachedir string
+	mutex       sync.Mutex
+	ix          *modindex.Index
+	expires     time.Time
+}
+
+// create a new Source. Called from NewView in cache/session.go.
+func NewIndexSource(cachedir string) *IndexSource {
+	return &IndexSource{modcachedir: cachedir}
+}
+
+func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) {
+	/// This is used by goimports to resolve the package names of imports of the
+	// current package, which is irrelevant for the module cache.
+	return nil, nil
+}
+
+func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) {
+	if err := s.maybeReadIndex(); err != nil {
+		return nil, err
+	}
+	var cs []modindex.Candidate
+	for pkg, nms := range missing {
+		for nm := range nms {
+			x := s.ix.Lookup(pkg, nm, false)
+			cs = append(cs, x...)
+		}
+	}
+	found := make(map[string]*Result)
+	for _, c := range cs {
+		var x *Result
+		if x = found[c.ImportPath]; x == nil {
+			x = &Result{
+				Import: &ImportInfo{
+					ImportPath: c.ImportPath,
+					Name:       "",
+				},
+				Package: &PackageInfo{
+					Name:    c.PkgName,
+					Exports: make(map[string]bool),
+				},
+			}
+			found[c.ImportPath] = x
+		}
+		x.Package.Exports[c.Name] = true
+	}
+	var ans []*Result
+	for _, x := range found {
+		ans = append(ans, x)
+	}
+	return ans, nil
+}
+
+func (s *IndexSource) maybeReadIndex() error {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+
+	var readIndex bool
+	if time.Now().After(s.expires) {
+		ok, err := modindex.Update(s.modcachedir)
+		if err != nil {
+			return err
+		}
+		if ok {
+			readIndex = true
+		}
+	}
+
+	if readIndex || s.ix == nil {
+		ix, err := modindex.ReadIndex(s.modcachedir)
+		if err != nil {
+			return err
+		}
+		s.ix = ix
+		// for now refresh every 15 minutes
+		s.expires = time.Now().Add(time.Minute * 15)
+	}
+
+	return nil
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go
new file mode 100644
index 00000000..1e1a02f2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/directories.go
@@ -0,0 +1,135 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"path/filepath"
+	"regexp"
+	"slices"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/mod/semver"
+	"golang.org/x/tools/internal/gopathwalk"
+)
+
+type directory struct {
+	path       Relpath
+	importPath string
+	version    string // semantic version
+	syms       []symbol
+}
+
+// filterDirs groups the directories by import path,
+// sorting the ones with the same import path by semantic version,
+// most recent first.
+func byImportPath(dirs []Relpath) (map[string][]*directory, error) {
+	ans := make(map[string][]*directory) // key is import path
+	for _, d := range dirs {
+		ip, sv, err := DirToImportPathVersion(d)
+		if err != nil {
+			return nil, err
+		}
+		ans[ip] = append(ans[ip], &directory{
+			path:       d,
+			importPath: ip,
+			version:    sv,
+		})
+	}
+	for k, v := range ans {
+		semanticSort(v)
+		ans[k] = v
+	}
+	return ans, nil
+}
+
+// sort the directories by semantic version, latest first
+func semanticSort(v []*directory) {
+	slices.SortFunc(v, func(l, r *directory) int {
+		if n := semver.Compare(l.version, r.version); n != 0 {
+			return -n // latest first
+		}
+		return strings.Compare(string(l.path), string(r.path))
+	})
+}
+
+// modCacheRegexp splits a relpathpath into module, module version, and package.
+var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
+
+// DirToImportPathVersion computes import path and semantic version
+func DirToImportPathVersion(dir Relpath) (string, string, error) {
+	m := modCacheRegexp.FindStringSubmatch(string(dir))
+	// m[1] is the module path
+	// m[2] is the version major.minor.patch(-
 1 && flds[1][1] == 'D',
+			}
+			if px.Type == Func {
+				n, err := strconv.Atoi(flds[2])
+				if err != nil {
+					continue // should never happen
+				}
+				px.Results = int16(n)
+				if len(flds) >= 4 {
+					sig := strings.Split(flds[3], " ")
+					for i := 0; i < len(sig); i++ {
+						// $ cannot otherwise occur. removing the spaces
+						// almost works, but for chan struct{}, e.g.
+						sig[i] = strings.Replace(sig[i], "$", " ", -1)
+					}
+					px.Sig = toFields(sig)
+				}
+			}
+			ans = append(ans, px)
+		}
+	}
+	return ans
+}
+
+func toFields(sig []string) []Field {
+	ans := make([]Field, len(sig)/2)
+	for i := 0; i < len(ans); i++ {
+		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
+	}
+	return ans
+}
+
+// benchmarks show this is measurably better than strings.Split
+// split into first 4 fields separated by single space
+func fastSplit(x string) []string {
+	ans := make([]string, 0, 4)
+	nxt := 0
+	start := 0
+	for i := 0; i < len(x); i++ {
+		if x[i] != ' ' {
+			continue
+		}
+		ans = append(ans, x[start:i])
+		nxt++
+		start = i + 1
+		if nxt >= 3 {
+			break
+		}
+	}
+	ans = append(ans, x[start:])
+	return ans
+}
+
+func asLexType(c byte) LexType {
+	switch c {
+	case 'C':
+		return Const
+	case 'V':
+		return Var
+	case 'T':
+		return Type
+	case 'F':
+		return Func
+	}
+	return -1
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
new file mode 100644
index 00000000..355a53e7
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
@@ -0,0 +1,164 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modindex contains code for building and searching an index to
+// the Go module cache. The directory containing the index, returned by
+// IndexDir(), contains a file index-name- that contains the name
+// of the current index. We believe writing that short file is atomic.
+// ReadIndex reads that file to get the file name of the index.
+// WriteIndex writes an index with a unique name and then
+// writes that name into a new version of index-name-.
+// ( stands for the CurrentVersion of the index format.)
+package modindex
+
+import (
+	"path/filepath"
+	"slices"
+	"strings"
+	"time"
+
+	"golang.org/x/mod/semver"
+)
+
+// Create always creates a new index for the go module cache that is in cachedir.
+func Create(cachedir string) error {
+	_, err := indexModCache(cachedir, true)
+	return err
+}
+
+// Update the index for the go module cache that is in cachedir,
+// If there is no existing index it will build one.
+// If there are changed directories since the last index, it will
+// write a new one and return true. Otherwise it returns false.
+func Update(cachedir string) (bool, error) {
+	return indexModCache(cachedir, false)
+}
+
+// indexModCache writes an index current as of when it is called.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and the updates to the cache. It returns true if it wrote an index,
+// false otherwise.
+func indexModCache(cachedir string, clear bool) (bool, error) {
+	cachedir, err := filepath.Abs(cachedir)
+	if err != nil {
+		return false, err
+	}
+	cd := Abspath(cachedir)
+	future := time.Now().Add(24 * time.Hour) // safely in the future
+	ok, err := modindexTimed(future, cd, clear)
+	if err != nil {
+		return false, err
+	}
+	return ok, nil
+}
+
+// modindexTimed writes an index current as of onlyBefore.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and all the updates to the cache before onlyBefore.
+// It returns true if it wrote a new index, false if it wrote nothing.
+func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
+	var curIndex *Index
+	if !clear {
+		var err error
+		curIndex, err = ReadIndex(string(cachedir))
+		if clear && err != nil {
+			return false, err
+		}
+		// TODO(pjw): check that most of those directories still exist
+	}
+	cfg := &work{
+		onlyBefore: onlyBefore,
+		oldIndex:   curIndex,
+		cacheDir:   cachedir,
+	}
+	if curIndex != nil {
+		cfg.onlyAfter = curIndex.Changed
+	}
+	if err := cfg.buildIndex(); err != nil {
+		return false, err
+	}
+	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
+		// no changes from existing curIndex, don't write a new index
+		return false, nil
+	}
+	if err := cfg.writeIndex(); err != nil {
+		return false, err
+	}
+	return true, nil
+}
+
+type work struct {
+	onlyBefore time.Time // do not use directories later than this
+	onlyAfter  time.Time // only interested in directories after this
+	// directories from before onlyAfter come from oldIndex
+	oldIndex *Index
+	newIndex *Index
+	cacheDir Abspath
+}
+
+func (w *work) buildIndex() error {
+	// The effective date of the new index should be at least
+	// slightly earlier than when the directories are scanned
+	// so set it now.
+	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
+	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
+	if len(dirs) == 0 {
+		return nil
+	}
+	newdirs, err := byImportPath(dirs)
+	if err != nil {
+		return err
+	}
+	// for each import path it might occur only in newdirs,
+	// only in w.oldIndex, or in both.
+	// If it occurs in both, use the semantically later one
+	if w.oldIndex != nil {
+		for _, e := range w.oldIndex.Entries {
+			found, ok := newdirs[e.ImportPath]
+			if !ok {
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				continue // use this one, there is no new one
+			}
+			if semver.Compare(found[0].version, e.Version) > 0 {
+				// use the new one
+			} else {
+				// use the old one, forget the new one
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				delete(newdirs, e.ImportPath)
+			}
+		}
+	}
+	// get symbol information for all the new diredtories
+	getSymbols(w.cacheDir, newdirs)
+	// assemble the new index entries
+	for k, v := range newdirs {
+		d := v[0]
+		pkg, names := processSyms(d.syms)
+		if pkg == "" {
+			continue // PJW: does this ever happen?
+		}
+		entry := Entry{
+			PkgName:    pkg,
+			Dir:        d.path,
+			ImportPath: k,
+			Version:    d.version,
+			Names:      names,
+		}
+		w.newIndex.Entries = append(w.newIndex.Entries, entry)
+	}
+	// sort the entries in the new index
+	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
+		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+			return n
+		}
+		return strings.Compare(l.ImportPath, r.ImportPath)
+	})
+	return nil
+}
+
+func (w *work) writeIndex() error {
+	return writeIndex(w.cacheDir, w.newIndex)
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
new file mode 100644
index 00000000..b918529d
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -0,0 +1,218 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"os"
+	"path/filepath"
+	"runtime"
+	"slices"
+	"strings"
+
+	"golang.org/x/sync/errgroup"
+)
+
+// The name of a symbol contains information about the symbol:
+//  T for types, TD if the type is deprecated
+//  C for consts, CD if the const is deprecated
+//  V for vars, VD if the var is deprecated
+// and for funcs:  F  ( )*
+// any spaces in  are replaced by $s so that the fields
+// of the name are space separated. F is replaced by FD if the func
+// is deprecated.
+type symbol struct {
+	pkg  string // name of the symbols's package
+	name string // declared name
+	kind string // T, C, V, or F, follwed by D if deprecated
+	sig  string // signature information, for F
+}
+
+// find the symbols for the best directories
+func getSymbols(cd Abspath, dirs map[string][]*directory) {
+	var g errgroup.Group
+	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
+	for _, vv := range dirs {
+		// throttling some day?
+		d := vv[0]
+		g.Go(func() error {
+			thedir := filepath.Join(string(cd), string(d.path))
+			mode := parser.SkipObjectResolution | parser.ParseComments
+
+			fi, err := os.ReadDir(thedir)
+			if err != nil {
+				return nil // log this someday?
+			}
+			for _, fx := range fi {
+				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+					continue
+				}
+				fname := filepath.Join(thedir, fx.Name())
+				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
+				if err != nil {
+					continue // ignore errors, someday log them?
+				}
+				d.syms = append(d.syms, getFileExports(tr)...)
+			}
+			return nil
+		})
+	}
+	g.Wait()
+}
+
+func getFileExports(f *ast.File) []symbol {
+	pkg := f.Name.Name
+	if pkg == "main" {
+		return nil
+	}
+	var ans []symbol
+	// should we look for //go:build ignore?
+	for _, decl := range f.Decls {
+		switch decl := decl.(type) {
+		case *ast.FuncDecl:
+			if decl.Recv != nil {
+				// ignore methods, as we are completing package selections
+				continue
+			}
+			name := decl.Name.Name
+			dtype := decl.Type
+			// not looking at dtype.TypeParams. That is, treating
+			// generic functions just like non-generic ones.
+			sig := dtype.Params
+			kind := "F"
+			if isDeprecated(decl.Doc) {
+				kind += "D"
+			}
+			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
+			for _, x := range sig.List {
+				// This code creates a string representing the type.
+				// TODO(pjw): it may be fragile:
+				// 1. x.Type could be nil, perhaps in ill-formed code
+				// 2. ExprString might someday change incompatibly to
+				//    include struct tags, which can be arbitrary strings
+				if x.Type == nil {
+					// Can this happen without a parse error? (Files with parse
+					// errors are ignored in getSymbols)
+					continue // maybe report this someday
+				}
+				tp := types.ExprString(x.Type)
+				if len(tp) == 0 {
+					// Can this happen?
+					continue // maybe report this someday
+				}
+				// This is only safe if ExprString never returns anything with a $
+				// The only place a $ can occur seems to be in a struct tag, which
+				// can be an arbitrary string literal, and ExprString does not presently
+				// print struct tags. So for this to happen the type of a formal parameter
+				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// would have to show the struct tag. Even testing for this case seems
+				// a waste of effort, but let's remember the possibility
+				if strings.Contains(tp, "$") {
+					continue
+				}
+				tp = strings.Replace(tp, " ", "$", -1)
+				if len(x.Names) == 0 {
+					result = append(result, "_")
+					result = append(result, tp)
+				} else {
+					for _, y := range x.Names {
+						result = append(result, y.Name)
+						result = append(result, tp)
+					}
+				}
+			}
+			sigs := strings.Join(result, " ")
+			if s := newsym(pkg, name, kind, sigs); s != nil {
+				ans = append(ans, *s)
+			}
+		case *ast.GenDecl:
+			depr := isDeprecated(decl.Doc)
+			switch decl.Tok {
+			case token.CONST, token.VAR:
+				tp := "V"
+				if decl.Tok == token.CONST {
+					tp = "C"
+				}
+				if depr {
+					tp += "D"
+				}
+				for _, sp := range decl.Specs {
+					for _, x := range sp.(*ast.ValueSpec).Names {
+						if s := newsym(pkg, x.Name, tp, ""); s != nil {
+							ans = append(ans, *s)
+						}
+					}
+				}
+			case token.TYPE:
+				tp := "T"
+				if depr {
+					tp += "D"
+				}
+				for _, sp := range decl.Specs {
+					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
+						ans = append(ans, *s)
+					}
+				}
+			}
+		}
+	}
+	return ans
+}
+
+func newsym(pkg, name, kind, sig string) *symbol {
+	if len(name) == 0 || !ast.IsExported(name) {
+		return nil
+	}
+	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
+	return &sym
+}
+
+func isDeprecated(doc *ast.CommentGroup) bool {
+	if doc == nil {
+		return false
+	}
+	// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
+	// This code fails for /* Deprecated: */, but it's the code from
+	// gopls/internal/analysis/deprecated
+	lines := strings.Split(doc.Text(), "\n\n")
+	for _, line := range lines {
+		if strings.HasPrefix(line, "Deprecated:") {
+			return true
+		}
+	}
+	return false
+}
+
+// return the package name and the value for the symbols.
+// if there are multiple packages, choose one arbitrarily
+// the returned slice is sorted lexicographically
+func processSyms(syms []symbol) (string, []string) {
+	if len(syms) == 0 {
+		return "", nil
+	}
+	slices.SortFunc(syms, func(l, r symbol) int {
+		return strings.Compare(l.name, r.name)
+	})
+	pkg := syms[0].pkg
+	var names []string
+	for _, s := range syms {
+		var nx string
+		if s.pkg == pkg {
+			if s.sig != "" {
+				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
+			} else {
+				nx = fmt.Sprintf("%s %s", s.name, s.kind)
+			}
+			names = append(names, nx)
+		} else {
+			continue // PJW: do we want to keep track of these?
+		}
+	}
+	return pkg, names
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
new file mode 100644
index 00000000..ece44886
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/types.go
@@ -0,0 +1,25 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"strings"
+)
+
+// some special types to avoid confusions
+
+// distinguish various types of directory names. It's easy to get confused.
+type Abspath string // absolute paths
+type Relpath string // paths with GOMODCACHE prefix removed
+
+func toRelpath(cachedir Abspath, s string) Relpath {
+	if strings.HasPrefix(s, string(cachedir)) {
+		if s == string(cachedir) {
+			return Relpath("")
+		}
+		return Relpath(s[len(cachedir)+1:])
+	}
+	return Relpath(s)
+}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 44719de1..78460591 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,8 +5,7 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
-var GetForTest = func(p interface{}) string { return "" }
-var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
+var GetDepsErrors = func(p any) []*PackageError { return nil }
 
 type PackageError struct {
 	ImportStack []string // shortest path from package named on command line to this one
@@ -16,7 +15,6 @@ type PackageError struct {
 
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
-var ForTest int    // must be set as a LoadMode to call GetForTest
 
-var SetModFlag = func(config interface{}, value string) {}
+var SetModFlag = func(config any, value string) {}
 var SetModFile = func(config interface{}, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index 2acd8585..f6cb37c5 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -21,10 +21,7 @@ import (
 // export data.
 type PkgDecoder struct {
 	// version is the file format version.
-	version uint32
-
-	// aliases determines whether types.Aliases should be created
-	aliases bool
+	version Version
 
 	// sync indicates whether the file uses sync markers.
 	sync bool
@@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
 // NewPkgDecoder returns a PkgDecoder initialized to read the Unified
 // IR export data from input. pkgPath is the package path for the
 // compilation unit that produced the export data.
-//
-// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
 func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 	pr := PkgDecoder{
 		pkgPath: pkgPath,
-		//aliases: aliases.Enabled(),
 	}
 
 	// TODO(mdempsky): Implement direct indexing of input string to
@@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 
 	r := strings.NewReader(input)
 
-	assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+	var ver uint32
+	assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+	pr.version = Version(ver)
 
-	switch pr.version {
-	default:
-		panic(fmt.Errorf("unsupported version: %v", pr.version))
-	case 0:
-		// no flags
-	case 1:
+	if pr.version >= numVersions {
+		panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+	}
+
+	if pr.version.Has(Flags) {
 		var flags uint32
 		assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
 		pr.sync = flags&flagSyncMarkers != 0
@@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
 	assert(err == nil)
 
 	pr.elemData = input[pos:]
-	assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+	const fingerprintSize = 8
+	assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
 
 	return pr
 }
@@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
 		absIdx += int(pr.elemEndsEnds[k-1])
 	}
 	if absIdx >= int(pr.elemEndsEnds[k]) {
-		errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+		panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
 	}
 	return absIdx
 }
@@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
 		Idx:    idx,
 	}
 
-	// TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
-	r.Data = *strings.NewReader(pr.DataIdx(k, idx))
-
+	r.Data.Reset(pr.DataIdx(k, idx))
 	r.Sync(SyncRelocs)
 	r.Relocs = make([]RelocEnt, r.Len())
 	for i := range r.Relocs {
@@ -248,7 +243,7 @@ type Decoder struct {
 
 func (r *Decoder) checkErr(err error) {
 	if err != nil {
-		errorf("unexpected decoding error: %w", err)
+		panicf("unexpected decoding error: %w", err)
 	}
 }
 
@@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
 
 	return path, name, tag
 }
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
index 6482617a..c17a1239 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -12,18 +12,15 @@ import (
 	"io"
 	"math/big"
 	"runtime"
+	"strings"
 )
 
-// currentVersion is the current version number.
-//
-//   - v0: initial prototype
-//
-//   - v1: adds the flags uint32 word
-const currentVersion uint32 = 1
-
 // A PkgEncoder provides methods for encoding a package's Unified IR
 // export data.
 type PkgEncoder struct {
+	// version of the bitstream.
+	version Version
+
 	// elems holds the bitstream for previously encoded elements.
 	elems [numRelocs][]string
 
@@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
 // export data files, but can help diagnosing desync errors in
 // higher-level Unified IR reader/writer code. If syncFrames is
 // negative, then sync markers are omitted entirely.
-func NewPkgEncoder(syncFrames int) PkgEncoder {
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
 	return PkgEncoder{
+		version:    version,
 		stringsIdx: make(map[string]Index),
 		syncFrames: syncFrames,
 	}
@@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
 		assert(binary.Write(out, binary.LittleEndian, x) == nil)
 	}
 
-	writeUint32(currentVersion)
+	writeUint32(uint32(pw.version))
 
-	var flags uint32
-	if pw.SyncMarkers() {
-		flags |= flagSyncMarkers
+	if pw.version.Has(Flags) {
+		var flags uint32
+		if pw.SyncMarkers() {
+			flags |= flagSyncMarkers
+		}
+		writeUint32(flags)
 	}
-	writeUint32(flags)
 
 	// Write elemEndsEnds.
 	var sum uint32
@@ -159,7 +159,7 @@ type Encoder struct {
 
 // Flush finalizes the element's bitstream and returns its Index.
 func (w *Encoder) Flush() Index {
-	var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+	var sb strings.Builder
 
 	// Backup the data so we write the relocations at the front.
 	var tmp bytes.Buffer
@@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
 
 func (w *Encoder) checkErr(err error) {
 	if err != nil {
-		errorf("unexpected encoding error: %v", err)
+		panicf("unexpected encoding error: %v", err)
 	}
 }
 
@@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
 // section (if not already present), and then writing a relocation
 // into the element bitstream.
 func (w *Encoder) String(s string) {
+	w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
 	w.Sync(SyncString)
-	w.Reloc(RelocString, w.p.StringIdx(s))
+	w.Reloc(RelocString, idx)
 }
 
 // Strings encodes and writes a variable-length slice of strings into
@@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
 func (w *Encoder) scalar(val constant.Value) {
 	switch v := constant.Val(val).(type) {
 	default:
-		errorf("unhandled %v (%v)", val, val.Kind())
+		panicf("unhandled %v (%v)", val, val.Kind())
 	case bool:
 		w.Code(ValBool)
 		w.Bool(v)
@@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
 	b := v.Append(nil, 'p', -1)
 	w.String(string(b)) // TODO: More efficient encoding.
 }
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
deleted file mode 100644
index 5294f6a6..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.7
-// +build !go1.7
-
-// TODO(mdempsky): Remove after #44505 is resolved
-
-package pkgbits
-
-import "runtime"
-
-func walkFrames(pcs []uintptr, visit frameVisitor) {
-	for _, pc := range pcs {
-		fn := runtime.FuncForPC(pc)
-		file, line := fn.FileLine(pc)
-
-		visit(file, line, fn.Name(), pc-fn.Entry())
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
deleted file mode 100644
index 2324ae7a..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.7
-// +build go1.7
-
-package pkgbits
-
-import "runtime"
-
-// walkFrames calls visit for each call frame represented by pcs.
-//
-// pcs should be a slice of PCs, as returned by runtime.Callers.
-func walkFrames(pcs []uintptr, visit frameVisitor) {
-	if len(pcs) == 0 {
-		return
-	}
-
-	frames := runtime.CallersFrames(pcs)
-	for {
-		frame, more := frames.Next()
-		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
-		if !more {
-			return
-		}
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
index ad26d3b2..50534a29 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -12,6 +12,6 @@ func assert(b bool) {
 	}
 }
 
-func errorf(format string, args ...interface{}) {
+func panicf(format string, args ...any) {
 	panic(fmt.Errorf(format, args...))
 }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
index 5bd51ef7..1520b73a 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -6,6 +6,7 @@ package pkgbits
 
 import (
 	"fmt"
+	"runtime"
 	"strings"
 )
 
@@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
 
 type frameVisitor func(file string, line int, name string, offset uintptr)
 
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+	if len(pcs) == 0 {
+		return
+	}
+
+	frames := runtime.CallersFrames(pcs)
+	for {
+		frame, more := frames.Next()
+		visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+		if !more {
+			return
+		}
+	}
+}
+
 // SyncMarker is an enum type that represents markers that may be
 // written to export data to ensure the reader and writer stay
 // synchronized.
@@ -110,4 +129,8 @@ const (
 	SyncStmtsEnd
 	SyncLabel
 	SyncOptLabel
+
+	SyncMultiExpr
+	SyncRType
+	SyncConvRTTI
 )
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
index 4a5b0ca5..582ad56d 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -74,11 +74,14 @@ func _() {
 	_ = x[SyncStmtsEnd-64]
 	_ = x[SyncLabel-65]
 	_ = x[SyncOptLabel-66]
+	_ = x[SyncMultiExpr-67]
+	_ = x[SyncRType-68]
+	_ = x[SyncConvRTTI-69]
 }
 
-const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
 
-var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
 
 func (i SyncMarker) String() string {
 	i -= 1
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 00000000..53af9df2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+	// V0: initial prototype.
+	//
+	// All data that is not assigned a Field is in version V0
+	// and has not been deprecated.
+	V0 Version = iota
+
+	// V1: adds the Flags uint32 word
+	V1
+
+	// V2: removes unused legacy fields and supports type parameters for aliases.
+	// - remove the legacy "has init" bool from the public root
+	// - remove obj's "derived func instance" bool
+	// - add a TypeParamNames field to ObjAlias
+	// - remove derived info "needed" bool
+	V2
+
+	numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+	// Flags in a uint32 in the header of a bitstream
+	// that is used to indicate whether optional features are enabled.
+	Flags Field = iota
+
+	// Deprecated: HasInit was a bool indicating whether a package
+	// has any init functions.
+	HasInit
+
+	// Deprecated: DerivedFuncInstance was a bool indicating
+	// whether an object was a function instance.
+	DerivedFuncInstance
+
+	// ObjAlias has a list of TypeParamNames.
+	AliasTypeParamNames
+
+	// Deprecated: DerivedInfoNeeded was a bool indicating
+	// whether a type was a derived type.
+	DerivedInfoNeeded
+
+	numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+	Flags:               V1,
+	AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+	HasInit:             V2,
+	DerivedFuncInstance: V2,
+	DerivedInfoNeeded:   V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+	return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index fd689207..9f0b871f 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ErrWriteAfterClose", Var, 0},
 		{"ErrWriteTooLong", Var, 0},
 		{"FileInfoHeader", Func, 1},
+		{"FileInfoNames", Type, 23},
 		{"Format", Type, 10},
 		{"FormatGNU", Const, 10},
 		{"FormatPAX", Const, 10},
@@ -267,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"ErrTooLarge", Var, 0},
 		{"Fields", Func, 0},
 		{"FieldsFunc", Func, 0},
+		{"FieldsFuncSeq", Func, 24},
+		{"FieldsSeq", Func, 24},
 		{"HasPrefix", Func, 0},
 		{"HasSuffix", Func, 0},
 		{"Index", Func, 0},
@@ -279,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"LastIndexAny", Func, 0},
 		{"LastIndexByte", Func, 5},
 		{"LastIndexFunc", Func, 0},
+		{"Lines", Func, 24},
 		{"Map", Func, 0},
 		{"MinRead", Const, 0},
 		{"NewBuffer", Func, 0},
@@ -292,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Split", Func, 0},
 		{"SplitAfter", Func, 0},
 		{"SplitAfterN", Func, 0},
+		{"SplitAfterSeq", Func, 24},
 		{"SplitN", Func, 0},
+		{"SplitSeq", Func, 24},
 		{"Title", Func, 0},
 		{"ToLower", Func, 0},
 		{"ToLowerSpecial", Func, 0},
@@ -534,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"NewCTR", Func, 0},
 		{"NewGCM", Func, 2},
 		{"NewGCMWithNonceSize", Func, 5},
+		{"NewGCMWithRandomNonce", Func, 24},
 		{"NewGCMWithTagSize", Func, 11},
 		{"NewOFB", Func, 0},
 		{"Stream", Type, 0},
@@ -672,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"Unmarshal", Func, 0},
 		{"UnmarshalCompressed", Func, 15},
 	},
+	"crypto/fips140": {
+		{"Enabled", Func, 24},
+	},
+	"crypto/hkdf": {
+		{"Expand", Func, 24},
+		{"Extract", Func, 24},
+		{"Key", Func, 24},
+	},
 	"crypto/hmac": {
 		{"Equal", Func, 1},
 		{"New", Func, 0},
@@ -682,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
 		{"Size", Const, 0},
 		{"Sum", Func, 2},
 	},
+	"crypto/mlkem": {
+		{"(*DecapsulationKey1024).Bytes", Method, 24},
+		{"(*DecapsulationKey1024).Decapsulate", Method, 24},
+		{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
+		{"(*DecapsulationKey768).Bytes", Method, 24},
+		{"(*DecapsulationKey768).Decapsulate", Method, 24},
+		{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
+		{"(*EncapsulationKey1024).Bytes", Method, 24},
+		{"(*EncapsulationKey1024).Encapsulate", Method, 24},
+		{"(*EncapsulationKey768).Bytes", Method, 24},
+		{"(*EncapsulationKey768).Encapsulate", Method, 24},
+		{"CiphertextSize1024", Const, 24},
+		{"CiphertextSize768", Const, 24},
+		{"DecapsulationKey1024", Type, 24},
+		{"DecapsulationKey768", Type, 24},
+		{"EncapsulationKey1024", Type, 24},
+		{"EncapsulationKey768", Type, 24},
+		{"EncapsulationKeySize1024", Const, 24},
+		{"EncapsulationKeySize768", Const, 24},
+		{"GenerateKey1024", Func, 24},
+		{"GenerateKey768", Func, 24},
+		{"NewDecapsulationKey1024", Func, 24},
+		{"NewDecapsulationKey768", Func, 24},
+		{"NewEncapsulationKey1024", Func, 24},
+		{"NewEncapsulationKey768", Func, 24},
+		{"SeedSize", Const, 24},
+		{"SharedKeySize", Const, 24},
+	},
+	"crypto/pbkdf2": {
+		{"Key", Func, 24},
+	},
 	"crypto/rand": {
 		{"Int", Func, 0},
 		{"Prime", Func, 0},
 		{"Read", Func, 0},
 		{"Reader", Var, 0},
+		{"Text", Func, 24},
 	},
 	"crypto/rc4": {
 		{"(*Cipher).Reset", Method, 0},
@@ -765,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
 		{"Sum224", Func, 2},
 		{"Sum256", Func, 2},
 	},
+	"crypto/sha3": {
+		{"(*SHA3).AppendBinary", Method, 24},
+		{"(*SHA3).BlockSize", Method, 24},
+		{"(*SHA3).MarshalBinary", Method, 24},
+		{"(*SHA3).Reset", Method, 24},
+		{"(*SHA3).Size", Method, 24},
+		{"(*SHA3).Sum", Method, 24},
+		{"(*SHA3).UnmarshalBinary", Method, 24},
+		{"(*SHA3).Write", Method, 24},
+		{"(*SHAKE).AppendBinary", Method, 24},
+		{"(*SHAKE).BlockSize", Method, 24},
+		{"(*SHAKE).MarshalBinary", Method, 24},
+		{"(*SHAKE).Read", Method, 24},
+		{"(*SHAKE).Reset", Method, 24},
+		{"(*SHAKE).UnmarshalBinary", Method, 24},
+		{"(*SHAKE).Write", Method, 24},
+		{"New224", Func, 24},
+		{"New256", Func, 24},
+		{"New384", Func, 24},
+		{"New512", Func, 24},
+		{"NewCSHAKE128", Func, 24},
+		{"NewCSHAKE256", Func, 24},
+		{"NewSHAKE128", Func, 24},
+		{"NewSHAKE256", Func, 24},
+		{"SHA3", Type, 24},
+		{"SHAKE", Type, 24},
+		{"Sum224", Func, 24},
+		{"Sum256", Func, 24},
+		{"Sum384", Func, 24},
+		{"Sum512", Func, 24},
+		{"SumSHAKE128", Func, 24},
+		{"SumSHAKE256", Func, 24},
+	},
 	"crypto/sha512": {
 		{"BlockSize", Const, 0},
 		{"New", Func, 0},
@@ -787,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ConstantTimeEq", Func, 0},
 		{"ConstantTimeLessOrEq", Func, 2},
 		{"ConstantTimeSelect", Func, 0},
+		{"WithDataIndependentTiming", Func, 24},
 		{"XORBytes", Func, 20},
 	},
 	"crypto/tls": {
@@ -820,6 +901,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
 		{"(*Dialer).Dial", Method, 15},
 		{"(*Dialer).DialContext", Method, 15},
+		{"(*ECHRejectionError).Error", Method, 23},
 		{"(*QUICConn).Close", Method, 21},
 		{"(*QUICConn).ConnectionState", Method, 21},
 		{"(*QUICConn).HandleData", Method, 21},
@@ -827,6 +909,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*QUICConn).SendSessionTicket", Method, 21},
 		{"(*QUICConn).SetTransportParameters", Method, 21},
 		{"(*QUICConn).Start", Method, 21},
+		{"(*QUICConn).StoreSession", Method, 23},
 		{"(*SessionState).Bytes", Method, 21},
 		{"(AlertError).Error", Method, 21},
 		{"(ClientAuthType).String", Method, 15},
@@ -861,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ClientHelloInfo", Type, 4},
 		{"ClientHelloInfo.CipherSuites", Field, 4},
 		{"ClientHelloInfo.Conn", Field, 8},
+		{"ClientHelloInfo.Extensions", Field, 24},
 		{"ClientHelloInfo.ServerName", Field, 4},
 		{"ClientHelloInfo.SignatureSchemes", Field, 8},
 		{"ClientHelloInfo.SupportedCurves", Field, 4},
@@ -877,6 +961,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Config.ClientSessionCache", Field, 3},
 		{"Config.CurvePreferences", Field, 3},
 		{"Config.DynamicRecordSizingDisabled", Field, 7},
+		{"Config.EncryptedClientHelloConfigList", Field, 23},
+		{"Config.EncryptedClientHelloKeys", Field, 24},
+		{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
 		{"Config.GetCertificate", Field, 4},
 		{"Config.GetClientCertificate", Field, 8},
 		{"Config.GetConfigForClient", Field, 8},
@@ -902,6 +989,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ConnectionState", Type, 0},
 		{"ConnectionState.CipherSuite", Field, 0},
 		{"ConnectionState.DidResume", Field, 1},
+		{"ConnectionState.ECHAccepted", Field, 23},
 		{"ConnectionState.HandshakeComplete", Field, 0},
 		{"ConnectionState.NegotiatedProtocol", Field, 0},
 		{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
@@ -925,7 +1013,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"ECDSAWithP384AndSHA384", Const, 8},
 		{"ECDSAWithP521AndSHA512", Const, 8},
 		{"ECDSAWithSHA1", Const, 10},
+		{"ECHRejectionError", Type, 23},
+		{"ECHRejectionError.RetryConfigList", Field, 23},
 		{"Ed25519", Const, 13},
+		{"EncryptedClientHelloKey", Type, 24},
+		{"EncryptedClientHelloKey.Config", Field, 24},
+		{"EncryptedClientHelloKey.PrivateKey", Field, 24},
+		{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
 		{"InsecureCipherSuites", Func, 14},
 		{"Listen", Func, 0},
 		{"LoadX509KeyPair", Func, 0},
@@ -943,6 +1037,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParseSessionState", Func, 21},
 		{"QUICClient", Func, 21},
 		{"QUICConfig", Type, 21},
+		{"QUICConfig.EnableSessionEvents", Field, 23},
 		{"QUICConfig.TLSConfig", Field, 21},
 		{"QUICConn", Type, 21},
 		{"QUICEncryptionLevel", Type, 21},
@@ -954,16 +1049,20 @@ var PackageSymbols = map[string][]Symbol{
 		{"QUICEvent.Data", Field, 21},
 		{"QUICEvent.Kind", Field, 21},
 		{"QUICEvent.Level", Field, 21},
+		{"QUICEvent.SessionState", Field, 23},
 		{"QUICEvent.Suite", Field, 21},
 		{"QUICEventKind", Type, 21},
 		{"QUICHandshakeDone", Const, 21},
 		{"QUICNoEvent", Const, 21},
 		{"QUICRejectedEarlyData", Const, 21},
+		{"QUICResumeSession", Const, 23},
 		{"QUICServer", Func, 21},
 		{"QUICSessionTicketOptions", Type, 21},
 		{"QUICSessionTicketOptions.EarlyData", Field, 21},
+		{"QUICSessionTicketOptions.Extra", Field, 23},
 		{"QUICSetReadSecret", Const, 21},
 		{"QUICSetWriteSecret", Const, 21},
+		{"QUICStoreSession", Const, 23},
 		{"QUICTransportParameters", Const, 21},
 		{"QUICTransportParametersRequired", Const, 21},
 		{"QUICWriteData", Const, 21},
@@ -1019,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"VersionTLS12", Const, 2},
 		{"VersionTLS13", Const, 12},
 		{"X25519", Const, 8},
+		{"X25519MLKEM768", Const, 24},
 		{"X509KeyPair", Func, 0},
 	},
 	"crypto/x509": {
@@ -1036,13 +1136,19 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Certificate).Verify", Method, 0},
 		{"(*Certificate).VerifyHostname", Method, 0},
 		{"(*CertificateRequest).CheckSignature", Method, 5},
+		{"(*OID).UnmarshalBinary", Method, 23},
+		{"(*OID).UnmarshalText", Method, 23},
 		{"(*RevocationList).CheckSignatureFrom", Method, 19},
 		{"(CertificateInvalidError).Error", Method, 0},
 		{"(ConstraintViolationError).Error", Method, 0},
 		{"(HostnameError).Error", Method, 0},
 		{"(InsecureAlgorithmError).Error", Method, 6},
+		{"(OID).AppendBinary", Method, 24},
+		{"(OID).AppendText", Method, 24},
 		{"(OID).Equal", Method, 22},
 		{"(OID).EqualASN1OID", Method, 22},
+		{"(OID).MarshalBinary", Method, 23},
+		{"(OID).MarshalText", Method, 23},
 		{"(OID).String", Method, 22},
 		{"(PublicKeyAlgorithm).String", Method, 10},
 		{"(SignatureAlgorithm).String", Method, 6},
@@ -1067,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.Extensions", Field, 2},
 		{"Certificate.ExtraExtensions", Field, 2},
 		{"Certificate.IPAddresses", Field, 1},
+		{"Certificate.InhibitAnyPolicy", Field, 24},
+		{"Certificate.InhibitAnyPolicyZero", Field, 24},
+		{"Certificate.InhibitPolicyMapping", Field, 24},
+		{"Certificate.InhibitPolicyMappingZero", Field, 24},
 		{"Certificate.IsCA", Field, 0},
 		{"Certificate.Issuer", Field, 0},
 		{"Certificate.IssuingCertificateURL", Field, 2},
@@ -1083,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.PermittedURIDomains", Field, 10},
 		{"Certificate.Policies", Field, 22},
 		{"Certificate.PolicyIdentifiers", Field, 0},
+		{"Certificate.PolicyMappings", Field, 24},
 		{"Certificate.PublicKey", Field, 0},
 		{"Certificate.PublicKeyAlgorithm", Field, 0},
 		{"Certificate.Raw", Field, 0},
@@ -1090,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"Certificate.RawSubject", Field, 0},
 		{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
 		{"Certificate.RawTBSCertificate", Field, 0},
+		{"Certificate.RequireExplicitPolicy", Field, 24},
+		{"Certificate.RequireExplicitPolicyZero", Field, 24},
 		{"Certificate.SerialNumber", Field, 0},
 		{"Certificate.Signature", Field, 0},
 		{"Certificate.SignatureAlgorithm", Field, 0},
@@ -1181,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"NameConstraintsWithoutSANs", Const, 10},
 		{"NameMismatch", Const, 8},
 		{"NewCertPool", Func, 0},
+		{"NoValidChains", Const, 24},
 		{"NotAuthorizedToSign", Const, 0},
 		{"OID", Type, 22},
 		{"OIDFromInts", Func, 22},
@@ -1196,11 +1310,15 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParseCertificates", Func, 0},
 		{"ParseDERCRL", Func, 0},
 		{"ParseECPrivateKey", Func, 1},
+		{"ParseOID", Func, 23},
 		{"ParsePKCS1PrivateKey", Func, 0},
 		{"ParsePKCS1PublicKey", Func, 10},
 		{"ParsePKCS8PrivateKey", Func, 0},
 		{"ParsePKIXPublicKey", Func, 0},
 		{"ParseRevocationList", Func, 19},
+		{"PolicyMapping", Type, 24},
+		{"PolicyMapping.IssuerDomainPolicy", Field, 24},
+		{"PolicyMapping.SubjectDomainPolicy", Field, 24},
 		{"PublicKeyAlgorithm", Type, 0},
 		{"PureEd25519", Const, 13},
 		{"RSA", Const, 0},
@@ -1247,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"UnknownPublicKeyAlgorithm", Const, 0},
 		{"UnknownSignatureAlgorithm", Const, 0},
 		{"VerifyOptions", Type, 0},
+		{"VerifyOptions.CertificatePolicies", Field, 24},
 		{"VerifyOptions.CurrentTime", Field, 0},
 		{"VerifyOptions.DNSName", Field, 0},
 		{"VerifyOptions.Intermediates", Field, 0},
@@ -1957,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*File).DynString", Method, 1},
 		{"(*File).DynValue", Method, 21},
 		{"(*File).DynamicSymbols", Method, 4},
+		{"(*File).DynamicVersionNeeds", Method, 24},
+		{"(*File).DynamicVersions", Method, 24},
 		{"(*File).ImportedLibraries", Method, 0},
 		{"(*File).ImportedSymbols", Method, 0},
 		{"(*File).Section", Method, 0},
@@ -2222,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{
 		{"DynFlag", Type, 0},
 		{"DynFlag1", Type, 21},
 		{"DynTag", Type, 0},
+		{"DynamicVersion", Type, 24},
+		{"DynamicVersion.Deps", Field, 24},
+		{"DynamicVersion.Flags", Field, 24},
+		{"DynamicVersion.Index", Field, 24},
+		{"DynamicVersion.Name", Field, 24},
+		{"DynamicVersionDep", Type, 24},
+		{"DynamicVersionDep.Dep", Field, 24},
+		{"DynamicVersionDep.Flags", Field, 24},
+		{"DynamicVersionDep.Index", Field, 24},
+		{"DynamicVersionFlag", Type, 24},
+		{"DynamicVersionNeed", Type, 24},
+		{"DynamicVersionNeed.Name", Field, 24},
+		{"DynamicVersionNeed.Needs", Field, 24},
 		{"EI_ABIVERSION", Const, 0},
 		{"EI_CLASS", Const, 0},
 		{"EI_DATA", Const, 0},
@@ -2541,6 +2675,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"PT_NOTE", Const, 0},
 		{"PT_NULL", Const, 0},
 		{"PT_OPENBSD_BOOTDATA", Const, 16},
+		{"PT_OPENBSD_NOBTCFI", Const, 23},
 		{"PT_OPENBSD_RANDOMIZE", Const, 16},
 		{"PT_OPENBSD_WXNEEDED", Const, 16},
 		{"PT_PAX_FLAGS", Const, 16},
@@ -3620,13 +3755,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"STT_COMMON", Const, 0},
 		{"STT_FILE", Const, 0},
 		{"STT_FUNC", Const, 0},
+		{"STT_GNU_IFUNC", Const, 23},
 		{"STT_HIOS", Const, 0},
 		{"STT_HIPROC", Const, 0},
 		{"STT_LOOS", Const, 0},
 		{"STT_LOPROC", Const, 0},
 		{"STT_NOTYPE", Const, 0},
 		{"STT_OBJECT", Const, 0},
+		{"STT_RELC", Const, 23},
 		{"STT_SECTION", Const, 0},
+		{"STT_SRELC", Const, 23},
 		{"STT_TLS", Const, 0},
 		{"STV_DEFAULT", Const, 0},
 		{"STV_HIDDEN", Const, 0},
@@ -3704,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{
 		{"Symbol.Size", Field, 0},
 		{"Symbol.Value", Field, 0},
 		{"Symbol.Version", Field, 13},
+		{"Symbol.VersionIndex", Field, 24},
+		{"Symbol.VersionScope", Field, 24},
+		{"SymbolVersionScope", Type, 24},
 		{"Type", Type, 0},
+		{"VER_FLG_BASE", Const, 24},
+		{"VER_FLG_INFO", Const, 24},
+		{"VER_FLG_WEAK", Const, 24},
 		{"Version", Type, 0},
+		{"VersionScopeGlobal", Const, 24},
+		{"VersionScopeHidden", Const, 24},
+		{"VersionScopeLocal", Const, 24},
+		{"VersionScopeNone", Const, 24},
+		{"VersionScopeSpecific", Const, 24},
 	},
 	"debug/gosym": {
 		{"(*DecodingError).Error", Method, 0},
@@ -4431,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"FS", Type, 16},
 	},
 	"encoding": {
+		{"BinaryAppender", Type, 24},
 		{"BinaryMarshaler", Type, 2},
 		{"BinaryUnmarshaler", Type, 2},
+		{"TextAppender", Type, 24},
 		{"TextMarshaler", Type, 2},
 		{"TextUnmarshaler", Type, 2},
 	},
@@ -4544,11 +4695,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"URLEncoding", Var, 0},
 	},
 	"encoding/binary": {
+		{"Append", Func, 23},
 		{"AppendByteOrder", Type, 19},
 		{"AppendUvarint", Func, 19},
 		{"AppendVarint", Func, 19},
 		{"BigEndian", Var, 0},
 		{"ByteOrder", Type, 0},
+		{"Decode", Func, 23},
+		{"Encode", Func, 23},
 		{"LittleEndian", Var, 0},
 		{"MaxVarintLen16", Const, 0},
 		{"MaxVarintLen32", Const, 0},
@@ -5308,6 +5462,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"ParenExpr.Rparen", Field, 0},
 		{"ParenExpr.X", Field, 0},
 		{"Pkg", Const, 0},
+		{"Preorder", Func, 23},
 		{"Print", Func, 0},
 		{"RECV", Const, 0},
 		{"RangeStmt", Type, 0},
@@ -5898,7 +6053,12 @@ var PackageSymbols = map[string][]Symbol{
 	},
 	"go/types": {
 		{"(*Alias).Obj", Method, 22},
+		{"(*Alias).Origin", Method, 23},
+		{"(*Alias).Rhs", Method, 23},
+		{"(*Alias).SetTypeParams", Method, 23},
 		{"(*Alias).String", Method, 22},
+		{"(*Alias).TypeArgs", Method, 23},
+		{"(*Alias).TypeParams", Method, 23},
 		{"(*Alias).Underlying", Method, 22},
 		{"(*ArgumentError).Error", Method, 18},
 		{"(*ArgumentError).Unwrap", Method, 18},
@@ -5943,6 +6103,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Func).Pkg", Method, 5},
 		{"(*Func).Pos", Method, 5},
 		{"(*Func).Scope", Method, 5},
+		{"(*Func).Signature", Method, 23},
 		{"(*Func).String", Method, 5},
 		{"(*Func).Type", Method, 5},
 		{"(*Info).ObjectOf", Method, 5},
@@ -5952,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Interface).Complete", Method, 5},
 		{"(*Interface).Embedded", Method, 5},
 		{"(*Interface).EmbeddedType", Method, 11},
+		{"(*Interface).EmbeddedTypes", Method, 24},
 		{"(*Interface).Empty", Method, 5},
 		{"(*Interface).ExplicitMethod", Method, 5},
+		{"(*Interface).ExplicitMethods", Method, 24},
 		{"(*Interface).IsComparable", Method, 18},
 		{"(*Interface).IsImplicit", Method, 18},
 		{"(*Interface).IsMethodSet", Method, 18},
 		{"(*Interface).MarkImplicit", Method, 18},
 		{"(*Interface).Method", Method, 5},
+		{"(*Interface).Methods", Method, 24},
 		{"(*Interface).NumEmbeddeds", Method, 5},
 		{"(*Interface).NumExplicitMethods", Method, 5},
 		{"(*Interface).NumMethods", Method, 5},
@@ -5979,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*MethodSet).At", Method, 5},
 		{"(*MethodSet).Len", Method, 5},
 		{"(*MethodSet).Lookup", Method, 5},
+		{"(*MethodSet).Methods", Method, 24},
 		{"(*MethodSet).String", Method, 5},
 		{"(*Named).AddMethod", Method, 5},
 		{"(*Named).Method", Method, 5},
+		{"(*Named).Methods", Method, 24},
 		{"(*Named).NumMethods", Method, 5},
 		{"(*Named).Obj", Method, 5},
 		{"(*Named).Origin", Method, 18},
@@ -6022,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Pointer).String", Method, 5},
 		{"(*Pointer).Underlying", Method, 5},
 		{"(*Scope).Child", Method, 5},
+		{"(*Scope).Children", Method, 24},
 		{"(*Scope).Contains", Method, 5},
 		{"(*Scope).End", Method, 5},
 		{"(*Scope).Innermost", Method, 5},
@@ -6057,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*StdSizes).Offsetsof", Method, 5},
 		{"(*StdSizes).Sizeof", Method, 5},
 		{"(*Struct).Field", Method, 5},
+		{"(*Struct).Fields", Method, 24},
 		{"(*Struct).NumFields", Method, 5},
 		{"(*Struct).String", Method, 5},
 		{"(*Struct).Tag", Method, 5},
@@ -6068,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Tuple).Len", Method, 5},
 		{"(*Tuple).String", Method, 5},
 		{"(*Tuple).Underlying", Method, 5},
+		{"(*Tuple).Variables", Method, 24},
 		{"(*TypeList).At", Method, 18},
 		{"(*TypeList).Len", Method, 18},
+		{"(*TypeList).Types", Method, 24},
 		{"(*TypeName).Exported", Method, 5},
 		{"(*TypeName).Id", Method, 5},
 		{"(*TypeName).IsAlias", Method, 9},
@@ -6087,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*TypeParam).Underlying", Method, 18},
 		{"(*TypeParamList).At", Method, 18},
 		{"(*TypeParamList).Len", Method, 18},
+		{"(*TypeParamList).TypeParams", Method, 24},
 		{"(*Union).Len", Method, 18},
 		{"(*Union).String", Method, 18},
 		{"(*Union).Term", Method, 18},
+		{"(*Union).Terms", Method, 24},
 		{"(*Union).Underlying", Method, 18},
 		{"(*Var).Anonymous", Method, 5},
 		{"(*Var).Embedded", Method, 11},
@@ -6360,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Hash).WriteByte", Method, 14},
 		{"(*Hash).WriteString", Method, 14},
 		{"Bytes", Func, 19},
+		{"Comparable", Func, 24},
 		{"Hash", Type, 14},
 		{"MakeSeed", Func, 14},
 		{"Seed", Type, 14},
 		{"String", Func, 19},
+		{"WriteComparable", Func, 24},
 	},
 	"html": {
 		{"EscapeString", Func, 0},
@@ -6992,6 +7166,12 @@ var PackageSymbols = map[string][]Symbol{
 		{"TempFile", Func, 0},
 		{"WriteFile", Func, 0},
 	},
+	"iter": {
+		{"Pull", Func, 23},
+		{"Pull2", Func, 23},
+		{"Seq", Type, 23},
+		{"Seq2", Type, 23},
+	},
 	"log": {
 		{"(*Logger).Fatal", Method, 0},
 		{"(*Logger).Fatalf", Method, 0},
@@ -7044,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*JSONHandler).WithGroup", Method, 21},
 		{"(*Level).UnmarshalJSON", Method, 21},
 		{"(*Level).UnmarshalText", Method, 21},
+		{"(*LevelVar).AppendText", Method, 24},
 		{"(*LevelVar).Level", Method, 21},
 		{"(*LevelVar).MarshalText", Method, 21},
 		{"(*LevelVar).Set", Method, 21},
@@ -7072,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Attr).Equal", Method, 21},
 		{"(Attr).String", Method, 21},
 		{"(Kind).String", Method, 21},
+		{"(Level).AppendText", Method, 24},
 		{"(Level).Level", Method, 21},
 		{"(Level).MarshalJSON", Method, 21},
 		{"(Level).MarshalText", Method, 21},
@@ -7102,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Debug", Func, 21},
 		{"DebugContext", Func, 21},
 		{"Default", Func, 21},
+		{"DiscardHandler", Var, 24},
 		{"Duration", Func, 21},
 		{"DurationValue", Func, 21},
 		{"Error", Func, 21},
@@ -7222,11 +7405,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"Writer", Type, 0},
 	},
 	"maps": {
+		{"All", Func, 23},
 		{"Clone", Func, 21},
+		{"Collect", Func, 23},
 		{"Copy", Func, 21},
 		{"DeleteFunc", Func, 21},
 		{"Equal", Func, 21},
 		{"EqualFunc", Func, 21},
+		{"Insert", Func, 23},
+		{"Keys", Func, 23},
+		{"Values", Func, 23},
 	},
 	"math": {
 		{"Abs", Func, 0},
@@ -7332,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Float).Acc", Method, 5},
 		{"(*Float).Add", Method, 5},
 		{"(*Float).Append", Method, 5},
+		{"(*Float).AppendText", Method, 24},
 		{"(*Float).Cmp", Method, 5},
 		{"(*Float).Copy", Method, 5},
 		{"(*Float).Float32", Method, 5},
@@ -7378,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Int).And", Method, 0},
 		{"(*Int).AndNot", Method, 0},
 		{"(*Int).Append", Method, 6},
+		{"(*Int).AppendText", Method, 24},
 		{"(*Int).Binomial", Method, 0},
 		{"(*Int).Bit", Method, 0},
 		{"(*Int).BitLen", Method, 0},
@@ -7434,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Int).Xor", Method, 0},
 		{"(*Rat).Abs", Method, 0},
 		{"(*Rat).Add", Method, 0},
+		{"(*Rat).AppendText", Method, 24},
 		{"(*Rat).Cmp", Method, 0},
 		{"(*Rat).Denom", Method, 0},
 		{"(*Rat).Float32", Method, 4},
@@ -7616,10 +7807,13 @@ var PackageSymbols = map[string][]Symbol{
 		{"Zipf", Type, 0},
 	},
 	"math/rand/v2": {
+		{"(*ChaCha8).AppendBinary", Method, 24},
 		{"(*ChaCha8).MarshalBinary", Method, 22},
+		{"(*ChaCha8).Read", Method, 23},
 		{"(*ChaCha8).Seed", Method, 22},
 		{"(*ChaCha8).Uint64", Method, 22},
 		{"(*ChaCha8).UnmarshalBinary", Method, 22},
+		{"(*PCG).AppendBinary", Method, 24},
 		{"(*PCG).MarshalBinary", Method, 22},
 		{"(*PCG).Seed", Method, 22},
 		{"(*PCG).Uint64", Method, 22},
@@ -7636,6 +7830,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Rand).NormFloat64", Method, 22},
 		{"(*Rand).Perm", Method, 22},
 		{"(*Rand).Shuffle", Method, 22},
+		{"(*Rand).Uint", Method, 23},
 		{"(*Rand).Uint32", Method, 22},
 		{"(*Rand).Uint32N", Method, 22},
 		{"(*Rand).Uint64", Method, 22},
@@ -7663,6 +7858,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Rand", Type, 22},
 		{"Shuffle", Func, 22},
 		{"Source", Type, 22},
+		{"Uint", Func, 23},
 		{"Uint32", Func, 22},
 		{"Uint32N", Func, 22},
 		{"Uint64", Func, 22},
@@ -7743,6 +7939,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*DNSError).Error", Method, 0},
 		{"(*DNSError).Temporary", Method, 0},
 		{"(*DNSError).Timeout", Method, 0},
+		{"(*DNSError).Unwrap", Method, 23},
 		{"(*Dialer).Dial", Method, 1},
 		{"(*Dialer).DialContext", Method, 7},
 		{"(*Dialer).MultipathTCP", Method, 21},
@@ -7809,6 +8006,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*TCPConn).RemoteAddr", Method, 0},
 		{"(*TCPConn).SetDeadline", Method, 0},
 		{"(*TCPConn).SetKeepAlive", Method, 0},
+		{"(*TCPConn).SetKeepAliveConfig", Method, 23},
 		{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
 		{"(*TCPConn).SetLinger", Method, 0},
 		{"(*TCPConn).SetNoDelay", Method, 0},
@@ -7883,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*UnixListener).SyscallConn", Method, 10},
 		{"(Flags).String", Method, 0},
 		{"(HardwareAddr).String", Method, 0},
+		{"(IP).AppendText", Method, 24},
 		{"(IP).DefaultMask", Method, 0},
 		{"(IP).Equal", Method, 0},
 		{"(IP).IsGlobalUnicast", Method, 0},
@@ -7922,6 +8121,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"DNSError.IsTimeout", Field, 0},
 		{"DNSError.Name", Field, 0},
 		{"DNSError.Server", Field, 0},
+		{"DNSError.UnwrapErr", Field, 23},
 		{"DefaultResolver", Var, 8},
 		{"Dial", Func, 0},
 		{"DialIP", Func, 0},
@@ -7937,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Dialer.DualStack", Field, 2},
 		{"Dialer.FallbackDelay", Field, 5},
 		{"Dialer.KeepAlive", Field, 3},
+		{"Dialer.KeepAliveConfig", Field, 23},
 		{"Dialer.LocalAddr", Field, 1},
 		{"Dialer.Resolver", Field, 8},
 		{"Dialer.Timeout", Field, 1},
@@ -7989,10 +8190,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"Interfaces", Func, 0},
 		{"InvalidAddrError", Type, 0},
 		{"JoinHostPort", Func, 0},
+		{"KeepAliveConfig", Type, 23},
+		{"KeepAliveConfig.Count", Field, 23},
+		{"KeepAliveConfig.Enable", Field, 23},
+		{"KeepAliveConfig.Idle", Field, 23},
+		{"KeepAliveConfig.Interval", Field, 23},
 		{"Listen", Func, 0},
 		{"ListenConfig", Type, 11},
 		{"ListenConfig.Control", Field, 11},
 		{"ListenConfig.KeepAlive", Field, 13},
+		{"ListenConfig.KeepAliveConfig", Field, 23},
 		{"ListenIP", Func, 0},
 		{"ListenMulticastUDP", Func, 0},
 		{"ListenPacket", Func, 0},
@@ -8075,12 +8282,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*MaxBytesError).Error", Method, 19},
 		{"(*ProtocolError).Error", Method, 0},
 		{"(*ProtocolError).Is", Method, 21},
+		{"(*Protocols).SetHTTP1", Method, 24},
+		{"(*Protocols).SetHTTP2", Method, 24},
+		{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
 		{"(*Request).AddCookie", Method, 0},
 		{"(*Request).BasicAuth", Method, 4},
 		{"(*Request).Clone", Method, 13},
 		{"(*Request).Context", Method, 7},
 		{"(*Request).Cookie", Method, 0},
 		{"(*Request).Cookies", Method, 0},
+		{"(*Request).CookiesNamed", Method, 23},
 		{"(*Request).FormFile", Method, 0},
 		{"(*Request).FormValue", Method, 0},
 		{"(*Request).MultipartReader", Method, 0},
@@ -8133,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Header).Values", Method, 14},
 		{"(Header).Write", Method, 0},
 		{"(Header).WriteSubset", Method, 0},
+		{"(Protocols).HTTP1", Method, 24},
+		{"(Protocols).HTTP2", Method, 24},
+		{"(Protocols).String", Method, 24},
+		{"(Protocols).UnencryptedHTTP2", Method, 24},
 		{"AllowQuerySemicolons", Func, 17},
 		{"CanonicalHeaderKey", Func, 0},
 		{"Client", Type, 0},
@@ -8148,7 +8363,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Cookie.HttpOnly", Field, 0},
 		{"Cookie.MaxAge", Field, 0},
 		{"Cookie.Name", Field, 0},
+		{"Cookie.Partitioned", Field, 23},
 		{"Cookie.Path", Field, 0},
+		{"Cookie.Quoted", Field, 23},
 		{"Cookie.Raw", Field, 0},
 		{"Cookie.RawExpires", Field, 0},
 		{"Cookie.SameSite", Field, 11},
@@ -8193,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{
 		{"FileSystem", Type, 0},
 		{"Flusher", Type, 0},
 		{"Get", Func, 0},
+		{"HTTP2Config", Type, 24},
+		{"HTTP2Config.CountError", Field, 24},
+		{"HTTP2Config.MaxConcurrentStreams", Field, 24},
+		{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
+		{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
+		{"HTTP2Config.MaxReadFrameSize", Field, 24},
+		{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
+		{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
+		{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
+		{"HTTP2Config.PingTimeout", Field, 24},
+		{"HTTP2Config.SendPingTimeout", Field, 24},
+		{"HTTP2Config.WriteByteTimeout", Field, 24},
 		{"Handle", Func, 0},
 		{"HandleFunc", Func, 0},
 		{"Handler", Type, 0},
@@ -8225,12 +8454,15 @@ var PackageSymbols = map[string][]Symbol{
 		{"NoBody", Var, 8},
 		{"NotFound", Func, 0},
 		{"NotFoundHandler", Func, 0},
+		{"ParseCookie", Func, 23},
 		{"ParseHTTPVersion", Func, 0},
+		{"ParseSetCookie", Func, 23},
 		{"ParseTime", Func, 1},
 		{"Post", Func, 0},
 		{"PostForm", Func, 0},
 		{"ProtocolError", Type, 0},
 		{"ProtocolError.ErrorString", Field, 0},
+		{"Protocols", Type, 24},
 		{"ProxyFromEnvironment", Func, 0},
 		{"ProxyURL", Func, 0},
 		{"PushOptions", Type, 8},
@@ -8252,6 +8484,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Request.Host", Field, 0},
 		{"Request.Method", Field, 0},
 		{"Request.MultipartForm", Field, 0},
+		{"Request.Pattern", Field, 23},
 		{"Request.PostForm", Field, 1},
 		{"Request.Proto", Field, 0},
 		{"Request.ProtoMajor", Field, 0},
@@ -8299,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"Server.ConnState", Field, 3},
 		{"Server.DisableGeneralOptionsHandler", Field, 20},
 		{"Server.ErrorLog", Field, 3},
+		{"Server.HTTP2", Field, 24},
 		{"Server.Handler", Field, 0},
 		{"Server.IdleTimeout", Field, 8},
 		{"Server.MaxHeaderBytes", Field, 0},
+		{"Server.Protocols", Field, 24},
 		{"Server.ReadHeaderTimeout", Field, 8},
 		{"Server.ReadTimeout", Field, 0},
 		{"Server.TLSConfig", Field, 0},
@@ -8391,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"Transport.ExpectContinueTimeout", Field, 6},
 		{"Transport.ForceAttemptHTTP2", Field, 13},
 		{"Transport.GetProxyConnectHeader", Field, 16},
+		{"Transport.HTTP2", Field, 24},
 		{"Transport.IdleConnTimeout", Field, 7},
 		{"Transport.MaxConnsPerHost", Field, 11},
 		{"Transport.MaxIdleConns", Field, 7},
 		{"Transport.MaxIdleConnsPerHost", Field, 0},
 		{"Transport.MaxResponseHeaderBytes", Field, 7},
 		{"Transport.OnProxyConnectResponse", Field, 20},
+		{"Transport.Protocols", Field, 24},
 		{"Transport.Proxy", Field, 0},
 		{"Transport.ProxyConnectHeader", Field, 8},
 		{"Transport.ReadBufferSize", Field, 13},
@@ -8453,6 +8690,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"DefaultRemoteAddr", Const, 0},
 		{"NewRecorder", Func, 0},
 		{"NewRequest", Func, 7},
+		{"NewRequestWithContext", Func, 23},
 		{"NewServer", Func, 0},
 		{"NewTLSServer", Func, 0},
 		{"NewUnstartedServer", Func, 0},
@@ -8583,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*AddrPort).UnmarshalText", Method, 18},
 		{"(*Prefix).UnmarshalBinary", Method, 18},
 		{"(*Prefix).UnmarshalText", Method, 18},
+		{"(Addr).AppendBinary", Method, 24},
+		{"(Addr).AppendText", Method, 24},
 		{"(Addr).AppendTo", Method, 18},
 		{"(Addr).As16", Method, 18},
 		{"(Addr).As4", Method, 18},
@@ -8613,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Addr).WithZone", Method, 18},
 		{"(Addr).Zone", Method, 18},
 		{"(AddrPort).Addr", Method, 18},
+		{"(AddrPort).AppendBinary", Method, 24},
+		{"(AddrPort).AppendText", Method, 24},
 		{"(AddrPort).AppendTo", Method, 18},
 		{"(AddrPort).Compare", Method, 22},
 		{"(AddrPort).IsValid", Method, 18},
@@ -8621,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(AddrPort).Port", Method, 18},
 		{"(AddrPort).String", Method, 18},
 		{"(Prefix).Addr", Method, 18},
+		{"(Prefix).AppendBinary", Method, 24},
+		{"(Prefix).AppendText", Method, 24},
 		{"(Prefix).AppendTo", Method, 18},
 		{"(Prefix).Bits", Method, 18},
 		{"(Prefix).Contains", Method, 18},
@@ -8805,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Error).Temporary", Method, 6},
 		{"(*Error).Timeout", Method, 6},
 		{"(*Error).Unwrap", Method, 13},
+		{"(*URL).AppendBinary", Method, 24},
 		{"(*URL).EscapedFragment", Method, 15},
 		{"(*URL).EscapedPath", Method, 5},
 		{"(*URL).Hostname", Method, 8},
@@ -8904,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*ProcessState).SysUsage", Method, 0},
 		{"(*ProcessState).SystemTime", Method, 0},
 		{"(*ProcessState).UserTime", Method, 0},
+		{"(*Root).Close", Method, 24},
+		{"(*Root).Create", Method, 24},
+		{"(*Root).FS", Method, 24},
+		{"(*Root).Lstat", Method, 24},
+		{"(*Root).Mkdir", Method, 24},
+		{"(*Root).Name", Method, 24},
+		{"(*Root).Open", Method, 24},
+		{"(*Root).OpenFile", Method, 24},
+		{"(*Root).OpenRoot", Method, 24},
+		{"(*Root).Remove", Method, 24},
+		{"(*Root).Stat", Method, 24},
 		{"(*SyscallError).Error", Method, 0},
 		{"(*SyscallError).Timeout", Method, 10},
 		{"(*SyscallError).Unwrap", Method, 13},
@@ -8917,6 +9173,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Chown", Func, 0},
 		{"Chtimes", Func, 0},
 		{"Clearenv", Func, 0},
+		{"CopyFS", Func, 23},
 		{"Create", Func, 0},
 		{"CreateTemp", Func, 16},
 		{"DevNull", Const, 0},
@@ -8996,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"O_WRONLY", Const, 0},
 		{"Open", Func, 0},
 		{"OpenFile", Func, 0},
+		{"OpenInRoot", Func, 24},
+		{"OpenRoot", Func, 24},
 		{"PathError", Type, 0},
 		{"PathError.Err", Field, 0},
 		{"PathError.Op", Field, 0},
@@ -9017,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Remove", Func, 0},
 		{"RemoveAll", Func, 0},
 		{"Rename", Func, 0},
+		{"Root", Type, 24},
 		{"SEEK_CUR", Const, 0},
 		{"SEEK_END", Const, 0},
 		{"SEEK_SET", Const, 0},
@@ -9150,6 +9410,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"IsLocal", Func, 20},
 		{"Join", Func, 0},
 		{"ListSeparator", Const, 0},
+		{"Localize", Func, 23},
 		{"Match", Func, 0},
 		{"Rel", Func, 0},
 		{"Separator", Const, 0},
@@ -9232,6 +9493,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Value).Pointer", Method, 0},
 		{"(Value).Recv", Method, 0},
 		{"(Value).Send", Method, 0},
+		{"(Value).Seq", Method, 23},
+		{"(Value).Seq2", Method, 23},
 		{"(Value).Set", Method, 0},
 		{"(Value).SetBool", Method, 0},
 		{"(Value).SetBytes", Method, 0},
@@ -9314,6 +9577,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"SelectSend", Const, 1},
 		{"SendDir", Const, 0},
 		{"Slice", Const, 0},
+		{"SliceAt", Func, 23},
 		{"SliceHeader", Type, 0},
 		{"SliceHeader.Cap", Field, 0},
 		{"SliceHeader.Data", Field, 0},
@@ -9354,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Zero", Func, 0},
 	},
 	"regexp": {
+		{"(*Regexp).AppendText", Method, 24},
 		{"(*Regexp).Copy", Method, 6},
 		{"(*Regexp).Expand", Method, 0},
 		{"(*Regexp).ExpandString", Method, 0},
@@ -9534,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*StackRecord).Stack", Method, 0},
 		{"(*TypeAssertionError).Error", Method, 0},
 		{"(*TypeAssertionError).RuntimeError", Method, 0},
+		{"(Cleanup).Stop", Method, 24},
+		{"AddCleanup", Func, 24},
 		{"BlockProfile", Func, 1},
 		{"BlockProfileRecord", Type, 1},
 		{"BlockProfileRecord.Count", Field, 1},
@@ -9544,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Caller", Func, 0},
 		{"Callers", Func, 0},
 		{"CallersFrames", Func, 7},
+		{"Cleanup", Type, 24},
 		{"Compiler", Const, 0},
 		{"Error", Type, 0},
 		{"Frame", Type, 7},
@@ -9655,6 +9923,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"BuildSetting", Type, 18},
 		{"BuildSetting.Key", Field, 18},
 		{"BuildSetting.Value", Field, 18},
+		{"CrashOptions", Type, 23},
 		{"FreeOSMemory", Func, 1},
 		{"GCStats", Type, 1},
 		{"GCStats.LastGC", Field, 1},
@@ -9672,6 +9941,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"PrintStack", Func, 0},
 		{"ReadBuildInfo", Func, 12},
 		{"ReadGCStats", Func, 1},
+		{"SetCrashOutput", Func, 23},
 		{"SetGCPercent", Func, 1},
 		{"SetMaxStack", Func, 2},
 		{"SetMaxThreads", Func, 2},
@@ -9742,10 +10012,15 @@ var PackageSymbols = map[string][]Symbol{
 		{"WithRegion", Func, 11},
 	},
 	"slices": {
+		{"All", Func, 23},
+		{"AppendSeq", Func, 23},
+		{"Backward", Func, 23},
 		{"BinarySearch", Func, 21},
 		{"BinarySearchFunc", Func, 21},
+		{"Chunk", Func, 23},
 		{"Clip", Func, 21},
 		{"Clone", Func, 21},
+		{"Collect", Func, 23},
 		{"Compact", Func, 21},
 		{"CompactFunc", Func, 21},
 		{"Compare", Func, 21},
@@ -9767,11 +10042,16 @@ var PackageSymbols = map[string][]Symbol{
 		{"MaxFunc", Func, 21},
 		{"Min", Func, 21},
 		{"MinFunc", Func, 21},
+		{"Repeat", Func, 23},
 		{"Replace", Func, 21},
 		{"Reverse", Func, 21},
 		{"Sort", Func, 21},
 		{"SortFunc", Func, 21},
 		{"SortStableFunc", Func, 21},
+		{"Sorted", Func, 23},
+		{"SortedFunc", Func, 23},
+		{"SortedStableFunc", Func, 23},
+		{"Values", Func, 23},
 	},
 	"sort": {
 		{"(Float64Slice).Len", Method, 0},
@@ -9894,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{
 		{"EqualFold", Func, 0},
 		{"Fields", Func, 0},
 		{"FieldsFunc", Func, 0},
+		{"FieldsFuncSeq", Func, 24},
+		{"FieldsSeq", Func, 24},
 		{"HasPrefix", Func, 0},
 		{"HasSuffix", Func, 0},
 		{"Index", Func, 0},
@@ -9906,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"LastIndexAny", Func, 0},
 		{"LastIndexByte", Func, 5},
 		{"LastIndexFunc", Func, 0},
+		{"Lines", Func, 24},
 		{"Map", Func, 0},
 		{"NewReader", Func, 0},
 		{"NewReplacer", Func, 0},
@@ -9917,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"Split", Func, 0},
 		{"SplitAfter", Func, 0},
 		{"SplitAfterN", Func, 0},
+		{"SplitAfterSeq", Func, 24},
 		{"SplitN", Func, 0},
+		{"SplitSeq", Func, 24},
 		{"Title", Func, 0},
 		{"ToLower", Func, 0},
 		{"ToLowerSpecial", Func, 0},
@@ -9936,10 +10221,14 @@ var PackageSymbols = map[string][]Symbol{
 		{"TrimSpace", Func, 0},
 		{"TrimSuffix", Func, 1},
 	},
+	"structs": {
+		{"HostLayout", Type, 23},
+	},
 	"sync": {
 		{"(*Cond).Broadcast", Method, 0},
 		{"(*Cond).Signal", Method, 0},
 		{"(*Cond).Wait", Method, 0},
+		{"(*Map).Clear", Method, 23},
 		{"(*Map).CompareAndDelete", Method, 20},
 		{"(*Map).CompareAndSwap", Method, 20},
 		{"(*Map).Delete", Method, 9},
@@ -9986,13 +10275,17 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Bool).Store", Method, 19},
 		{"(*Bool).Swap", Method, 19},
 		{"(*Int32).Add", Method, 19},
+		{"(*Int32).And", Method, 23},
 		{"(*Int32).CompareAndSwap", Method, 19},
 		{"(*Int32).Load", Method, 19},
+		{"(*Int32).Or", Method, 23},
 		{"(*Int32).Store", Method, 19},
 		{"(*Int32).Swap", Method, 19},
 		{"(*Int64).Add", Method, 19},
+		{"(*Int64).And", Method, 23},
 		{"(*Int64).CompareAndSwap", Method, 19},
 		{"(*Int64).Load", Method, 19},
+		{"(*Int64).Or", Method, 23},
 		{"(*Int64).Store", Method, 19},
 		{"(*Int64).Swap", Method, 19},
 		{"(*Pointer).CompareAndSwap", Method, 19},
@@ -10000,18 +10293,24 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*Pointer).Store", Method, 19},
 		{"(*Pointer).Swap", Method, 19},
 		{"(*Uint32).Add", Method, 19},
+		{"(*Uint32).And", Method, 23},
 		{"(*Uint32).CompareAndSwap", Method, 19},
 		{"(*Uint32).Load", Method, 19},
+		{"(*Uint32).Or", Method, 23},
 		{"(*Uint32).Store", Method, 19},
 		{"(*Uint32).Swap", Method, 19},
 		{"(*Uint64).Add", Method, 19},
+		{"(*Uint64).And", Method, 23},
 		{"(*Uint64).CompareAndSwap", Method, 19},
 		{"(*Uint64).Load", Method, 19},
+		{"(*Uint64).Or", Method, 23},
 		{"(*Uint64).Store", Method, 19},
 		{"(*Uint64).Swap", Method, 19},
 		{"(*Uintptr).Add", Method, 19},
+		{"(*Uintptr).And", Method, 23},
 		{"(*Uintptr).CompareAndSwap", Method, 19},
 		{"(*Uintptr).Load", Method, 19},
+		{"(*Uintptr).Or", Method, 23},
 		{"(*Uintptr).Store", Method, 19},
 		{"(*Uintptr).Swap", Method, 19},
 		{"(*Value).CompareAndSwap", Method, 17},
@@ -10023,6 +10322,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"AddUint32", Func, 0},
 		{"AddUint64", Func, 0},
 		{"AddUintptr", Func, 0},
+		{"AndInt32", Func, 23},
+		{"AndInt64", Func, 23},
+		{"AndUint32", Func, 23},
+		{"AndUint64", Func, 23},
+		{"AndUintptr", Func, 23},
 		{"Bool", Type, 19},
 		{"CompareAndSwapInt32", Func, 0},
 		{"CompareAndSwapInt64", Func, 0},
@@ -10038,6 +10342,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"LoadUint32", Func, 0},
 		{"LoadUint64", Func, 0},
 		{"LoadUintptr", Func, 0},
+		{"OrInt32", Func, 23},
+		{"OrInt64", Func, 23},
+		{"OrUint32", Func, 23},
+		{"OrUint64", Func, 23},
+		{"OrUintptr", Func, 23},
 		{"Pointer", Type, 19},
 		{"StoreInt32", Func, 0},
 		{"StoreInt64", Func, 0},
@@ -16200,6 +16509,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"WSAEACCES", Const, 2},
 		{"WSAECONNABORTED", Const, 9},
 		{"WSAECONNRESET", Const, 3},
+		{"WSAENOPROTOOPT", Const, 23},
 		{"WSAEnumProtocols", Func, 2},
 		{"WSAID_CONNECTEX", Var, 1},
 		{"WSAIoctl", Func, 0},
@@ -16308,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"ValueOf", Func, 0},
 	},
 	"testing": {
+		{"(*B).Chdir", Method, 24},
 		{"(*B).Cleanup", Method, 14},
+		{"(*B).Context", Method, 24},
 		{"(*B).Elapsed", Method, 20},
 		{"(*B).Error", Method, 0},
 		{"(*B).Errorf", Method, 0},
@@ -16320,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*B).Helper", Method, 9},
 		{"(*B).Log", Method, 0},
 		{"(*B).Logf", Method, 0},
+		{"(*B).Loop", Method, 24},
 		{"(*B).Name", Method, 8},
 		{"(*B).ReportAllocs", Method, 1},
 		{"(*B).ReportMetric", Method, 13},
@@ -16337,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*B).StopTimer", Method, 0},
 		{"(*B).TempDir", Method, 15},
 		{"(*F).Add", Method, 18},
+		{"(*F).Chdir", Method, 24},
 		{"(*F).Cleanup", Method, 18},
+		{"(*F).Context", Method, 24},
 		{"(*F).Error", Method, 18},
 		{"(*F).Errorf", Method, 18},
 		{"(*F).Fail", Method, 18},
@@ -16358,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(*F).TempDir", Method, 18},
 		{"(*M).Run", Method, 4},
 		{"(*PB).Next", Method, 3},
+		{"(*T).Chdir", Method, 24},
 		{"(*T).Cleanup", Method, 14},
+		{"(*T).Context", Method, 24},
 		{"(*T).Deadline", Method, 15},
 		{"(*T).Error", Method, 0},
 		{"(*T).Errorf", Method, 0},
@@ -16849,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"(Time).Add", Method, 0},
 		{"(Time).AddDate", Method, 0},
 		{"(Time).After", Method, 0},
+		{"(Time).AppendBinary", Method, 24},
 		{"(Time).AppendFormat", Method, 5},
+		{"(Time).AppendText", Method, 24},
 		{"(Time).Before", Method, 0},
 		{"(Time).Clock", Method, 0},
 		{"(Time).Compare", Method, 20},
@@ -17284,6 +17603,7 @@ var PackageSymbols = map[string][]Symbol{
 		{"Encode", Func, 0},
 		{"EncodeRune", Func, 0},
 		{"IsSurrogate", Func, 0},
+		{"RuneLen", Func, 23},
 	},
 	"unicode/utf8": {
 		{"AppendRune", Func, 18},
@@ -17306,6 +17626,11 @@ var PackageSymbols = map[string][]Symbol{
 		{"ValidRune", Func, 1},
 		{"ValidString", Func, 0},
 	},
+	"unique": {
+		{"(Handle).Value", Method, 23},
+		{"Handle", Type, 23},
+		{"Make", Func, 23},
+	},
 	"unsafe": {
 		{"Add", Func, 0},
 		{"Alignof", Func, 0},
@@ -17317,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{
 		{"String", Func, 0},
 		{"StringData", Func, 0},
 	},
+	"weak": {
+		{"(Pointer).Value", Method, 24},
+		{"Make", Func, 24},
+		{"Pointer", Type, 24},
+	},
 }
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
deleted file mode 100644
index ff9437a3..00000000
--- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package tokeninternal provides access to some internal features of the token
-// package.
-package tokeninternal
-
-import (
-	"fmt"
-	"go/token"
-	"sort"
-	"sync"
-	"unsafe"
-)
-
-// GetLines returns the table of line-start offsets from a token.File.
-func GetLines(file *token.File) []int {
-	// token.File has a Lines method on Go 1.21 and later.
-	if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
-		return file.Lines()
-	}
-
-	// This declaration must match that of token.File.
-	// This creates a risk of dependency skew.
-	// For now we check that the size of the two
-	// declarations is the same, on the (fragile) assumption
-	// that future changes would add fields.
-	type tokenFile119 struct {
-		_     string
-		_     int
-		_     int
-		mu    sync.Mutex // we're not complete monsters
-		lines []int
-		_     []struct{}
-	}
-
-	if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
-		panic("unexpected token.File size")
-	}
-	var ptr *tokenFile119
-	type uP = unsafe.Pointer
-	*(*uP)(uP(&ptr)) = uP(file)
-	ptr.mu.Lock()
-	defer ptr.mu.Unlock()
-	return ptr.lines
-}
-
-// AddExistingFiles adds the specified files to the FileSet if they
-// are not already present. It panics if any pair of files in the
-// resulting FileSet would overlap.
-func AddExistingFiles(fset *token.FileSet, files []*token.File) {
-	// Punch through the FileSet encapsulation.
-	type tokenFileSet struct {
-		// This type remained essentially consistent from go1.16 to go1.21.
-		mutex sync.RWMutex
-		base  int
-		files []*token.File
-		_     *token.File // changed to atomic.Pointer[token.File] in go1.19
-	}
-
-	// If the size of token.FileSet changes, this will fail to compile.
-	const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
-	var _ [-delta * delta]int
-
-	type uP = unsafe.Pointer
-	var ptr *tokenFileSet
-	*(*uP)(uP(&ptr)) = uP(fset)
-	ptr.mutex.Lock()
-	defer ptr.mutex.Unlock()
-
-	// Merge and sort.
-	newFiles := append(ptr.files, files...)
-	sort.Slice(newFiles, func(i, j int) bool {
-		return newFiles[i].Base() < newFiles[j].Base()
-	})
-
-	// Reject overlapping files.
-	// Discard adjacent identical files.
-	out := newFiles[:0]
-	for i, file := range newFiles {
-		if i > 0 {
-			prev := newFiles[i-1]
-			if file == prev {
-				continue
-			}
-			if prev.Base()+prev.Size()+1 > file.Base() {
-				panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
-					prev.Name(), prev.Base(), prev.Base()+prev.Size(),
-					file.Name(), file.Base(), file.Base()+file.Size()))
-			}
-		}
-		out = append(out, file)
-	}
-	newFiles = out
-
-	ptr.files = newFiles
-
-	// Advance FileSet.Base().
-	if len(newFiles) > 0 {
-		last := newFiles[len(newFiles)-1]
-		newBase := last.Base() + last.Size() + 1
-		if ptr.base < newBase {
-			ptr.base = newBase
-		}
-	}
-}
-
-// FileSetFor returns a new FileSet containing a sequence of new Files with
-// the same base, size, and line as the input files, for use in APIs that
-// require a FileSet.
-//
-// Precondition: the input files must be non-overlapping, and sorted in order
-// of their Base.
-func FileSetFor(files ...*token.File) *token.FileSet {
-	fset := token.NewFileSet()
-	for _, f := range files {
-		f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
-		lines := GetLines(f)
-		f2.SetLines(lines)
-	}
-	return fset
-}
-
-// CloneFileSet creates a new FileSet holding all files in fset. It does not
-// create copies of the token.Files in fset: they are added to the resulting
-// FileSet unmodified.
-func CloneFileSet(fset *token.FileSet) *token.FileSet {
-	var files []*token.File
-	fset.Iterate(func(f *token.File) bool {
-		files = append(files, f)
-		return true
-	})
-	newFileSet := token.NewFileSet()
-	AddExistingFiles(newFileSet, files)
-	return newFileSet
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 00000000..cdae2b8e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+	"go/ast"
+	"go/token"
+	"go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+	switch e := n.(type) {
+	case *ast.IndexExpr:
+		return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+	case *ast.IndexListExpr:
+		return e.X, e.Lbrack, e.Indices, e.Rbrack
+	}
+	return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+	switch len(indices) {
+	case 0:
+		panic("empty indices")
+	case 1:
+		return &ast.IndexExpr{
+			X:      x,
+			Lbrack: lbrack,
+			Index:  indices[0],
+			Rbrack: rbrack,
+		}
+	default:
+		return &ast.IndexListExpr{
+			X:       x,
+			Lbrack:  lbrack,
+			Indices: indices,
+			Rbrack:  rbrack,
+		}
+	}
+}
+
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
+func IsTypeParam(t types.Type) bool {
+	_, ok := types.Unalias(t).(*types.TypeParam)
+	return ok
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 00000000..27a2b179
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"fmt"
+	"go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+	U := T.Underlying()
+	if _, ok := U.(*types.Interface); !ok {
+		return U // for non-interface types,
+	}
+
+	terms, err := NormalTerms(U)
+	if len(terms) == 0 || err != nil {
+		// len(terms) -> empty type set of interface.
+		// err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+		return nil // no core type.
+	}
+
+	U = terms[0].Type().Underlying()
+	var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+	for identical = 1; identical < len(terms); identical++ {
+		if !types.Identical(U, terms[identical].Type().Underlying()) {
+			break
+		}
+	}
+
+	if identical == len(terms) {
+		// https://go.dev/ref/spec#Core_types
+		// "There is a single type U which is the underlying type of all types in the type set of T"
+		return U
+	}
+	ch, ok := U.(*types.Chan)
+	if !ok {
+		return nil // no core type as identical < len(terms) and U is not a channel.
+	}
+	// https://go.dev/ref/spec#Core_types
+	// "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+	// <-chan E depending on the direction of the directional channels present."
+	for chans := identical; chans < len(terms); chans++ {
+		curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+		if !ok {
+			return nil
+		}
+		if !types.Identical(ch.Elem(), curr.Elem()) {
+			return nil // channel elements are not identical.
+		}
+		if ch.Dir() == types.SendRecv {
+			// ch is bidirectional. We can safely always use curr's direction.
+			ch = curr
+		} else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+			// ch and curr are not bidirectional and not the same direction.
+			return nil
+		}
+	}
+	return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+//	type A interface{ ~string|~[]byte }
+//
+//	type B interface{ int|string }
+//
+//	type C interface { ~string|~int }
+//
+//	type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(T types.Type) ([]*types.Term, error) {
+	// typeSetOf(T) == typeSetOf(Unalias(T))
+	typ := types.Unalias(T)
+	if named, ok := typ.(*types.Named); ok {
+		typ = named.Underlying()
+	}
+	switch typ := typ.(type) {
+	case *types.TypeParam:
+		return StructuralTerms(typ)
+	case *types.Union:
+		return UnionTermSet(typ)
+	case *types.Interface:
+		return InterfaceTermSet(typ)
+	default:
+		return []*types.Term{types.NewTerm(false, T)}, nil
+	}
+}
+
+// Deref returns the type of the variable pointed to by t,
+// if t's core type is a pointer; otherwise it returns t.
+//
+// Do not assume that Deref(T)==T implies T is not a pointer:
+// consider "type T *T", for example.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func Deref(t types.Type) types.Type {
+	if ptr, ok := CoreType(t).(*types.Pointer); ok {
+		return ptr.Elem()
+	}
+	return t
+}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+	if ptr, ok := CoreType(t).(*types.Pointer); ok {
+		return ptr.Elem()
+	}
+	panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
new file mode 100644
index 00000000..0ade5c29
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -0,0 +1,131 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"go/types"
+
+	"golang.org/x/tools/internal/aliases"
+)
+
+// Free is a memoization of the set of free type parameters within a
+// type. It makes a sequence of calls to [Free.Has] for overlapping
+// types more efficient. The zero value is ready for use.
+//
+// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
+type Free struct {
+	seen map[types.Type]bool
+}
+
+// Has reports whether the specified type has a free type parameter.
+func (w *Free) Has(typ types.Type) (res bool) {
+	// detect cycles
+	if x, ok := w.seen[typ]; ok {
+		return x
+	}
+	if w.seen == nil {
+		w.seen = make(map[types.Type]bool)
+	}
+	w.seen[typ] = false
+	defer func() {
+		w.seen[typ] = res
+	}()
+
+	switch t := typ.(type) {
+	case nil, *types.Basic: // TODO(gri) should nil be handled here?
+		break
+
+	case *types.Alias:
+		if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
+			return true // This is an uninstantiated Alias.
+		}
+		// The expansion of an alias can have free type parameters,
+		// whether or not the alias itself has type parameters:
+		//
+		//   func _[K comparable]() {
+		//     type Set      = map[K]bool // free(Set)      = {K}
+		//     type MapTo[V] = map[K]V    // free(Map[foo]) = {V}
+		//   }
+		//
+		// So, we must Unalias.
+		return w.Has(types.Unalias(t))
+
+	case *types.Array:
+		return w.Has(t.Elem())
+
+	case *types.Slice:
+		return w.Has(t.Elem())
+
+	case *types.Struct:
+		for i, n := 0, t.NumFields(); i < n; i++ {
+			if w.Has(t.Field(i).Type()) {
+				return true
+			}
+		}
+
+	case *types.Pointer:
+		return w.Has(t.Elem())
+
+	case *types.Tuple:
+		n := t.Len()
+		for i := 0; i < n; i++ {
+			if w.Has(t.At(i).Type()) {
+				return true
+			}
+		}
+
+	case *types.Signature:
+		// t.tparams may not be nil if we are looking at a signature
+		// of a generic function type (or an interface method) that is
+		// part of the type we're testing. We don't care about these type
+		// parameters.
+		// Similarly, the receiver of a method may declare (rather than
+		// use) type parameters, we don't care about those either.
+		// Thus, we only need to look at the input and result parameters.
+		return w.Has(t.Params()) || w.Has(t.Results())
+
+	case *types.Interface:
+		for i, n := 0, t.NumMethods(); i < n; i++ {
+			if w.Has(t.Method(i).Type()) {
+				return true
+			}
+		}
+		terms, err := InterfaceTermSet(t)
+		if err != nil {
+			return false // ill typed
+		}
+		for _, term := range terms {
+			if w.Has(term.Type()) {
+				return true
+			}
+		}
+
+	case *types.Map:
+		return w.Has(t.Key()) || w.Has(t.Elem())
+
+	case *types.Chan:
+		return w.Has(t.Elem())
+
+	case *types.Named:
+		args := t.TypeArgs()
+		if params := t.TypeParams(); params.Len() > args.Len() {
+			return true // this is an uninstantiated named type.
+		}
+		for i, n := 0, args.Len(); i < n; i++ {
+			if w.Has(args.At(i)) {
+				return true
+			}
+		}
+		return w.Has(t.Underlying()) // recurse for types local to parameterized functions
+
+	case *types.TypeParam:
+		return true
+
+	default:
+		panic(t) // unreachable
+	}
+
+	return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 00000000..93c80fdc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+	"errors"
+	"fmt"
+	"go/types"
+	"os"
+	"strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+//	type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+//	type A interface{ ~string|~[]byte }
+//
+//	type B interface{ int|string }
+//
+//	type C interface { ~string|~int }
+//
+//	type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+	constraint := tparam.Constraint()
+	if constraint == nil {
+		return nil, fmt.Errorf("%s has nil constraint", tparam)
+	}
+	iface, _ := constraint.Underlying().(*types.Interface)
+	if iface == nil {
+		return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+	}
+	return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+	return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+	return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+	tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+	if err != nil {
+		return nil, err
+	}
+	if tset.terms.isEmpty() {
+		return nil, ErrEmptyTypeSet
+	}
+	if tset.terms.isAll() {
+		return nil, nil
+	}
+	var terms []*types.Term
+	for _, term := range tset.terms {
+		terms = append(terms, types.NewTerm(term.tilde, term.typ))
+	}
+	return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+	complete bool
+	terms    termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+	fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+	if t == nil {
+		panic("nil type")
+	}
+
+	if debug {
+		indentf(depth, "%s", t.String())
+		defer func() {
+			if err != nil {
+				indentf(depth, "=> %s", err)
+			} else {
+				indentf(depth, "=> %s", res.terms.String())
+			}
+		}()
+	}
+
+	const maxTermCount = 100
+	if tset, ok := seen[t]; ok {
+		if !tset.complete {
+			return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+		}
+		return tset, nil
+	}
+
+	// Mark the current type as seen to avoid infinite recursion.
+	tset := new(termSet)
+	defer func() {
+		tset.complete = true
+	}()
+	seen[t] = tset
+
+	switch u := t.Underlying().(type) {
+	case *types.Interface:
+		// The term set of an interface is the intersection of the term sets of its
+		// embedded types.
+		tset.terms = allTermlist
+		for i := 0; i < u.NumEmbeddeds(); i++ {
+			embedded := u.EmbeddedType(i)
+			if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+				return nil, fmt.Errorf("invalid embedded type %T", embedded)
+			}
+			tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+			if err != nil {
+				return nil, err
+			}
+			tset.terms = tset.terms.intersect(tset2.terms)
+		}
+	case *types.Union:
+		// The term set of a union is the union of term sets of its terms.
+		tset.terms = nil
+		for i := 0; i < u.Len(); i++ {
+			t := u.Term(i)
+			var terms termlist
+			switch t.Type().Underlying().(type) {
+			case *types.Interface:
+				tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+				if err != nil {
+					return nil, err
+				}
+				terms = tset2.terms
+			case *types.TypeParam, *types.Union:
+				// A stand-alone type parameter or union is not permitted as union
+				// term.
+				return nil, fmt.Errorf("invalid union term %T", t)
+			default:
+				if t.Type() == types.Typ[types.Invalid] {
+					continue
+				}
+				terms = termlist{{t.Tilde(), t.Type()}}
+			}
+			tset.terms = tset.terms.union(terms)
+			if len(tset.terms) > maxTermCount {
+				return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+			}
+		}
+	case *types.TypeParam:
+		panic("unreachable")
+	default:
+		// For all other types, the term set is just a single non-tilde term
+		// holding the type itself.
+		if u != types.Typ[types.Invalid] {
+			tset.terms = termlist{{false, t}}
+		}
+	}
+	return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+	return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 00000000..cbd12f80
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,163 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+	"bytes"
+	"go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+	if len(xl) == 0 {
+		return "∅"
+	}
+	var buf bytes.Buffer
+	for i, x := range xl {
+		if i > 0 {
+			buf.WriteString(" | ")
+		}
+		buf.WriteString(x.String())
+	}
+	return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+	// If there's a non-nil term, the entire list is not empty.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil {
+			return false
+		}
+	}
+	return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+	// If there's a 𝓤 term, the entire list is 𝓤.
+	// If the termlist is in normal form, this requires at most
+	// one iteration.
+	for _, x := range xl {
+		if x != nil && x.typ == nil {
+			return true
+		}
+	}
+	return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	used := make([]bool, len(xl))
+	var rl termlist
+	for i, xi := range xl {
+		if xi == nil || used[i] {
+			continue
+		}
+		for j := i + 1; j < len(xl); j++ {
+			xj := xl[j]
+			if xj == nil || used[j] {
+				continue
+			}
+			if u1, u2 := xi.union(xj); u2 == nil {
+				// If we encounter a 𝓤 term, the entire list is 𝓤.
+				// Exit early.
+				// (Note that this is not just an optimization;
+				// if we continue, we may end up with a 𝓤 term
+				// and other terms and the result would not be
+				// in normal form.)
+				if u1.typ == nil {
+					return allTermlist
+				}
+				xi = u1
+				used[j] = true // xj is now unioned into xi - ignore it in future iterations
+			}
+		}
+		rl = append(rl, xi)
+	}
+	return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+	return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+	if xl.isEmpty() || yl.isEmpty() {
+		return nil
+	}
+
+	// Quadratic algorithm, but good enough for now.
+	// TODO(gri) fix asymptotic performance
+	var rl termlist
+	for _, x := range xl {
+		for _, y := range yl {
+			if r := x.intersect(y); r != nil {
+				rl = append(rl, r)
+			}
+		}
+	}
+	return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+	// TODO(gri) this should be more efficient
+	return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+	for _, x := range xl {
+		if x.includes(t) {
+			return true
+		}
+	}
+	return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+	for _, x := range xl {
+		if y.subsetOf(x) {
+			return true
+		}
+	}
+	return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+	if yl.isEmpty() {
+		return xl.isEmpty()
+	}
+
+	// each term x of xl must be a subset of yl
+	for _, x := range xl {
+		if !yl.supersetOf(x) {
+			return false // x is not a subset yl
+		}
+	}
+	return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 00000000..7350bb70
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,169 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+//	 ∅:  (*term)(nil)     == ∅                      // set of no types (empty set)
+//	 𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse)
+//	 T:  &term{false, T}  == {T}                    // set of type T
+//	~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t
+type term struct {
+	tilde bool // valid if typ != nil
+	typ   types.Type
+}
+
+func (x *term) String() string {
+	switch {
+	case x == nil:
+		return "∅"
+	case x.typ == nil:
+		return "𝓤"
+	case x.tilde:
+		return "~" + x.typ.String()
+	default:
+		return x.typ.String()
+	}
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return x == y
+	case x.typ == nil || y.typ == nil:
+		return x.typ == y.typ
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+	// easy cases
+	switch {
+	case x == nil && y == nil:
+		return nil, nil // ∅ ∪ ∅ == ∅
+	case x == nil:
+		return y, nil // ∅ ∪ y == y
+	case y == nil:
+		return x, nil // x ∪ ∅ == x
+	case x.typ == nil:
+		return x, nil // 𝓤 ∪ y == 𝓤
+	case y.typ == nil:
+		return y, nil // x ∪ 𝓤 == 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∪ ~t == ~t
+	// ~t ∪  T == ~t
+	//  T ∪ ~t == ~t
+	//  T ∪  T ==  T
+	if x.tilde || !y.tilde {
+		return x, nil
+	}
+	return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+	// easy cases
+	switch {
+	case x == nil || y == nil:
+		return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+	case x.typ == nil:
+		return y // 𝓤 ∩ y == y
+	case y.typ == nil:
+		return x // x ∩ 𝓤 == x
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return nil // x ∩ y == ∅ if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ∩ ~t == ~t
+	// ~t ∩  T ==  T
+	//  T ∩ ~t ==  T
+	//  T ∩  T ==  T
+	if !x.tilde || y.tilde {
+		return x
+	}
+	return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return false // t ∈ ∅ == false
+	case x.typ == nil:
+		return true // t ∈ 𝓤 == true
+	}
+	// ∅ ⊂ x ⊂ 𝓤
+
+	u := t
+	if x.tilde {
+		u = under(u)
+	}
+	return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+	// easy cases
+	switch {
+	case x == nil:
+		return true // ∅ ⊆ y == true
+	case y == nil:
+		return false // x ⊆ ∅ == false since x != ∅
+	case y.typ == nil:
+		return true // x ⊆ 𝓤 == true
+	case x.typ == nil:
+		return false // 𝓤 ⊆ y == false since y != 𝓤
+	}
+	// ∅ ⊂ x, y ⊂ 𝓤
+
+	if x.disjoint(y) {
+		return false // x ⊆ y == false if x ∩ y == ∅
+	}
+	// x.typ == y.typ
+
+	// ~t ⊆ ~t == true
+	// ~t ⊆ T == false
+	//  T ⊆ ~t == true
+	//  T ⊆  T == true
+	return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+	if debug && (x.typ == nil || y.typ == nil) {
+		panic("invalid argument(s)")
+	}
+	ux := x.typ
+	if y.tilde {
+		ux = under(ux)
+	}
+	uy := y.typ
+	if x.tilde {
+		uy = under(uy)
+	}
+	return !types.Identical(ux, uy)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
new file mode 100644
index 00000000..4957f021
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/types"
+
+	"golang.org/x/tools/go/types/typeutil"
+)
+
+// ForEachElement calls f for type T and each type reachable from its
+// type through reflection. It does this by recursively stripping off
+// type constructors; in addition, for each named type N, the type *N
+// is added to the result as it may have additional methods.
+//
+// The caller must provide an initially empty set used to de-duplicate
+// identical types, potentially across multiple calls to ForEachElement.
+// (Its final value holds all the elements seen, matching the arguments
+// passed to f.)
+//
+// TODO(adonovan): share/harmonize with go/callgraph/rta.
+func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
+	var visit func(T types.Type, skip bool)
+	visit = func(T types.Type, skip bool) {
+		if !skip {
+			if seen, _ := rtypes.Set(T, true).(bool); seen {
+				return // de-dup
+			}
+
+			f(T) // notify caller of new element type
+		}
+
+		// Recursion over signatures of each method.
+		tmset := msets.MethodSet(T)
+		for i := 0; i < tmset.Len(); i++ {
+			sig := tmset.At(i).Type().(*types.Signature)
+			// It is tempting to call visit(sig, false)
+			// but, as noted in golang.org/cl/65450043,
+			// the Signature.Recv field is ignored by
+			// types.Identical and typeutil.Map, which
+			// is confusing at best.
+			//
+			// More importantly, the true signature rtype
+			// reachable from a method using reflection
+			// has no receiver but an extra ordinary parameter.
+			// For the Read method of io.Reader we want:
+			//   func(Reader, []byte) (int, error)
+			// but here sig is:
+			//   func([]byte) (int, error)
+			// with .Recv = Reader (though it is hard to
+			// notice because it doesn't affect Signature.String
+			// or types.Identical).
+			//
+			// TODO(adonovan): construct and visit the correct
+			// non-method signature with an extra parameter
+			// (though since unnamed func types have no methods
+			// there is essentially no actual demand for this).
+			//
+			// TODO(adonovan): document whether or not it is
+			// safe to skip non-exported methods (as RTA does).
+			visit(sig.Params(), true)  // skip the Tuple
+			visit(sig.Results(), true) // skip the Tuple
+		}
+
+		switch T := T.(type) {
+		case *types.Alias:
+			visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
+
+		case *types.Basic:
+			// nop
+
+		case *types.Interface:
+			// nop---handled by recursion over method set.
+
+		case *types.Pointer:
+			visit(T.Elem(), false)
+
+		case *types.Slice:
+			visit(T.Elem(), false)
+
+		case *types.Chan:
+			visit(T.Elem(), false)
+
+		case *types.Map:
+			visit(T.Key(), false)
+			visit(T.Elem(), false)
+
+		case *types.Signature:
+			if T.Recv() != nil {
+				panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
+			}
+			visit(T.Params(), true)  // skip the Tuple
+			visit(T.Results(), true) // skip the Tuple
+
+		case *types.Named:
+			// A pointer-to-named type can be derived from a named
+			// type via reflection.  It may have methods too.
+			visit(types.NewPointer(T), false)
+
+			// Consider 'type T struct{S}' where S has methods.
+			// Reflection provides no way to get from T to struct{S},
+			// only to S, so the method set of struct{S} is unwanted,
+			// so set 'skip' flag during recursion.
+			visit(T.Underlying(), true) // skip the unnamed type
+
+		case *types.Array:
+			visit(T.Elem(), false)
+
+		case *types.Struct:
+			for i, n := 0, T.NumFields(); i < n; i++ {
+				// TODO(adonovan): document whether or not
+				// it is safe to skip non-exported fields.
+				visit(T.Field(i).Type(), false)
+			}
+
+		case *types.Tuple:
+			for i, n := 0, T.Len(); i < n; i++ {
+				visit(T.At(i).Type(), false)
+			}
+
+		case *types.TypeParam, *types.Union:
+			// forEachReachable must not be called on parameterized types.
+			panic(T)
+
+		default:
+			panic(T)
+		}
+	}
+	visit(T, false)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 834e0538..235a6def 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -838,7 +838,7 @@ const (
 	// InvalidCap occurs when an argument to the cap built-in function is not of
 	// supported type.
 	//
-	// See https://golang.org/ref/spec#Lengthand_capacity for information on
+	// See https://golang.org/ref/spec#Length_and_capacity for information on
 	// which underlying types are supported as arguments to cap and len.
 	//
 	// Example:
@@ -859,7 +859,7 @@ const (
 	// InvalidCopy occurs when the arguments are not of slice type or do not
 	// have compatible type.
 	//
-	// See https://golang.org/ref/spec#Appendingand_copying_slices for more
+	// See https://golang.org/ref/spec#Appending_and_copying_slices for more
 	// information on the type requirements for the copy built-in.
 	//
 	// Example:
@@ -897,7 +897,7 @@ const (
 	// InvalidLen occurs when an argument to the len built-in function is not of
 	// supported type.
 	//
-	// See https://golang.org/ref/spec#Lengthand_capacity for information on
+	// See https://golang.org/ref/spec#Length_and_capacity for information on
 	// which underlying types are supported as arguments to cap and len.
 	//
 	// Example:
@@ -914,7 +914,7 @@ const (
 
 	// InvalidMake occurs when make is called with an unsupported type argument.
 	//
-	// See https://golang.org/ref/spec#Makingslices_maps_and_channels for
+	// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
 	// information on the types that may be created using make.
 	//
 	// Example:
@@ -966,7 +966,7 @@ const (
 	//  var _ = string(x)
 	InvalidConversion
 
-	// InvalidUntypedConversion occurs when an there is no valid implicit
+	// InvalidUntypedConversion occurs when there is no valid implicit
 	// conversion from an untyped value satisfying the type constraints of the
 	// context in which it is used.
 	//
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
new file mode 100644
index 00000000..b64f714e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -0,0 +1,46 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"go/ast"
+	"go/types"
+	"strconv"
+)
+
+// FileQualifier returns a [types.Qualifier] function that qualifies
+// imported symbols appropriately based on the import environment of a given
+// file.
+// If the same package is imported multiple times, the last appearance is
+// recorded.
+func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
+	// Construct mapping of import paths to their defined names.
+	// It is only necessary to look at renaming imports.
+	imports := make(map[string]string)
+	for _, imp := range f.Imports {
+		if imp.Name != nil && imp.Name.Name != "_" {
+			path, _ := strconv.Unquote(imp.Path.Value)
+			imports[path] = imp.Name.Name
+		}
+	}
+
+	// Define qualifier to replace full package paths with names of the imports.
+	return func(p *types.Package) string {
+		if p == nil || p == pkg {
+			return ""
+		}
+
+		if name, ok := imports[p.Path()]; ok {
+			if name == "." {
+				return ""
+			} else {
+				return name
+			}
+		}
+
+		// If there is no local renaming, fall back to the package name.
+		return p.Name()
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index fea7c8b7..8352ea76 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -6,20 +6,21 @@ package typesinternal
 
 import (
 	"go/types"
-
-	"golang.org/x/tools/internal/aliases"
 )
 
 // ReceiverNamed returns the named type (if any) associated with the
 // type of recv, which may be of the form N or *N, or aliases thereof.
 // It also reports whether a Pointer was present.
+//
+// The named result may be nil if recv is from a method on an
+// anonymous interface or struct types or in ill-typed code.
 func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
 	t := recv.Type()
-	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
 		isPtr = true
 		t = ptr.Elem()
 	}
-	named, _ = aliases.Unalias(t).(*types.Named)
+	named, _ = types.Unalias(t).(*types.Named)
 	return
 }
 
@@ -36,7 +37,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
 // indirection from the type, regardless of named types (analogous to
 // a LOAD instruction).
 func Unpointer(t types.Type) types.Type {
-	if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+	if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
 		return ptr.Elem()
 	}
 	return t
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 7c77c2fb..34534879 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,6 +11,8 @@ import (
 	"go/types"
 	"reflect"
 	"unsafe"
+
+	"golang.org/x/tools/internal/aliases"
 )
 
 func SetUsesCgo(conf *types.Config) bool {
@@ -48,3 +50,78 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
 	}
 	return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
 }
+
+// NameRelativeTo returns a types.Qualifier that qualifies members of
+// all packages other than pkg, using only the package name.
+// (By contrast, [types.RelativeTo] uses the complete package path,
+// which is often excessive.)
+//
+// If pkg is nil, it is equivalent to [*types.Package.Name].
+func NameRelativeTo(pkg *types.Package) types.Qualifier {
+	return func(other *types.Package) string {
+		if pkg != nil && pkg == other {
+			return "" // same package; unqualified
+		}
+		return other.Name()
+	}
+}
+
+// A NamedOrAlias is a [types.Type] that is named (as
+// defined by the spec) and capable of bearing type parameters: it
+// abstracts aliases ([types.Alias]) and defined types
+// ([types.Named]).
+//
+// Every type declared by an explicit "type" declaration is a
+// NamedOrAlias. (Built-in type symbols may additionally
+// have type [types.Basic], which is not a NamedOrAlias,
+// though the spec regards them as "named".)
+//
+// NamedOrAlias cannot expose the Origin method, because
+// [types.Alias.Origin] and [types.Named.Origin] have different
+// (covariant) result types; use [Origin] instead.
+type NamedOrAlias interface {
+	types.Type
+	Obj() *types.TypeName
+	// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
+}
+
+// TypeParams is a light shim around t.TypeParams().
+// (go/types.Alias).TypeParams requires >= 1.23.
+func TypeParams(t NamedOrAlias) *types.TypeParamList {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.TypeParams(t)
+	case *types.Named:
+		return t.TypeParams()
+	}
+	return nil
+}
+
+// TypeArgs is a light shim around t.TypeArgs().
+// (go/types.Alias).TypeArgs requires >= 1.23.
+func TypeArgs(t NamedOrAlias) *types.TypeList {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.TypeArgs(t)
+	case *types.Named:
+		return t.TypeArgs()
+	}
+	return nil
+}
+
+// Origin returns the generic type of the Named or Alias type t if it
+// is instantiated, otherwise it returns t.
+func Origin(t NamedOrAlias) NamedOrAlias {
+	switch t := t.(type) {
+	case *types.Alias:
+		return aliases.Origin(t)
+	case *types.Named:
+		return t.Origin()
+	}
+	return t
+}
+
+// IsPackageLevel reports whether obj is a package-level symbol.
+func IsPackageLevel(obj types.Object) bool {
+	return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
new file mode 100644
index 00000000..e5da0495
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
+// this API that actually does something.
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+	_          VarKind = iota // (not meaningful)
+	PackageVar                // a package-level variable
+	LocalVar                  // a local variable
+	RecvVar                   // a method receiver variable
+	ParamVar                  // a function parameter variable
+	ResultVar                 // a function result variable
+	FieldVar                  // a struct field
+)
+
+func (kind VarKind) String() string {
+	return [...]string{
+		0:          "VarKind(0)",
+		PackageVar: "PackageVar",
+		LocalVar:   "LocalVar",
+		RecvVar:    "RecvVar",
+		ParamVar:   "ParamVar",
+		ResultVar:  "ResultVar",
+		FieldVar:   "FieldVar",
+	}[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 00000000..d272949c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,392 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"strings"
+)
+
+// ZeroString returns the string representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroString may return a partially correct
+// string representation. The caller should use the returned isValid boolean
+// to determine the validity of the expression.
+//
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+//
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// References to named types are qualified by an appropriate (optional)
+// qualifier function.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+//
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return "false", true
+		case t.Info()&types.IsNumeric != 0:
+			return "0", true
+		case t.Info()&types.IsString != 0:
+			return `""`, true
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return "nil", true
+		case t.Kind() == types.Invalid:
+			return "invalid", false
+		default:
+			panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+		return "nil", true
+
+	case *types.Interface:
+		if !t.IsMethodSet() {
+			return "invalid", false
+		}
+		return "nil", true
+
+	case *types.Named:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return types.TypeString(t, qual) + "{}", true
+		default:
+			return ZeroString(under, qual)
+		}
+
+	case *types.Alias:
+		switch t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return types.TypeString(t, qual) + "{}", true
+		default:
+			// A type parameter can have alias but alias type's underlying type
+			// can never be a type parameter.
+			// Use types.Unalias to preserve the info of type parameter instead
+			// of call Underlying() going right through and get the underlying
+			// type of the type parameter which is always an interface.
+			return ZeroString(types.Unalias(t), qual)
+		}
+
+	case *types.Array, *types.Struct:
+		return types.TypeString(t, qual) + "{}", true
+
+	case *types.TypeParam:
+		// Assumes func new is not shadowed.
+		return "*new(" + types.TypeString(t, qual) + ")", true
+
+	case *types.Tuple:
+		// Tuples are not normal values.
+		// We are currently format as "(t[0], ..., t[n])". Could be something else.
+		isValid := true
+		components := make([]string, t.Len())
+		for i := 0; i < t.Len(); i++ {
+			comp, ok := ZeroString(t.At(i).Type(), qual)
+
+			components[i] = comp
+			isValid = isValid && ok
+		}
+		return "(" + strings.Join(components, ", ") + ")", isValid
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
+// representation. The caller should use the returned isValid boolean to determine
+// the validity of the expression.
+//
+// This function is designed for types suitable for variables and should not be
+// used with Tuple or Union types.References to named types are qualified by an
+// appropriate (optional) qualifier function.
+//
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return &ast.Ident{Name: "false"}, true
+		case t.Info()&types.IsNumeric != 0:
+			return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
+		case t.Info()&types.IsString != 0:
+			return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return ast.NewIdent("nil"), true
+		case t.Kind() == types.Invalid:
+			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+		default:
+			panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+		return ast.NewIdent("nil"), true
+
+	case *types.Interface:
+		if !t.IsMethodSet() {
+			return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+		}
+		return ast.NewIdent("nil"), true
+
+	case *types.Named:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return &ast.CompositeLit{
+				Type: TypeExpr(t, qual),
+			}, true
+		default:
+			return ZeroExpr(under, qual)
+		}
+
+	case *types.Alias:
+		switch t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return &ast.CompositeLit{
+				Type: TypeExpr(t, qual),
+			}, true
+		default:
+			return ZeroExpr(types.Unalias(t), qual)
+		}
+
+	case *types.Array, *types.Struct:
+		return &ast.CompositeLit{
+			Type: TypeExpr(t, qual),
+		}, true
+
+	case *types.TypeParam:
+		return &ast.StarExpr{ // *new(T)
+			X: &ast.CallExpr{
+				// Assumes func new is not shadowed.
+				Fun: ast.NewIdent("new"),
+				Args: []ast.Expr{
+					ast.NewIdent(t.Obj().Name()),
+				},
+			},
+		}, true
+
+	case *types.Tuple:
+		// Unlike ZeroString, there is no ast.Expr can express tuple by
+		// "(t[0], ..., t[n])".
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// IsZeroExpr uses simple syntactic heuristics to report whether expr
+// is a obvious zero value, such as 0, "", nil, or false.
+// It cannot do better without type information.
+func IsZeroExpr(expr ast.Expr) bool {
+	switch e := expr.(type) {
+	case *ast.BasicLit:
+		return e.Value == "0" || e.Value == `""`
+	case *ast.Ident:
+		return e.Name == "nil" || e.Name == "false"
+	default:
+		return false
+	}
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// are qualified by an appropriate (optional) qualifier function.
+// It may panic for types such as Tuple or Union.
+func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch t.Kind() {
+		case types.UnsafePointer:
+			return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
+		default:
+			return ast.NewIdent(t.Name())
+		}
+
+	case *types.Pointer:
+		return &ast.UnaryExpr{
+			Op: token.MUL,
+			X:  TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Array:
+		return &ast.ArrayType{
+			Len: &ast.BasicLit{
+				Kind:  token.INT,
+				Value: fmt.Sprintf("%d", t.Len()),
+			},
+			Elt: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Slice:
+		return &ast.ArrayType{
+			Elt: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Map:
+		return &ast.MapType{
+			Key:   TypeExpr(t.Key(), qual),
+			Value: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Chan:
+		dir := ast.ChanDir(t.Dir())
+		if t.Dir() == types.SendRecv {
+			dir = ast.SEND | ast.RECV
+		}
+		return &ast.ChanType{
+			Dir:   dir,
+			Value: TypeExpr(t.Elem(), qual),
+		}
+
+	case *types.Signature:
+		var params []*ast.Field
+		for i := 0; i < t.Params().Len(); i++ {
+			params = append(params, &ast.Field{
+				Type: TypeExpr(t.Params().At(i).Type(), qual),
+				Names: []*ast.Ident{
+					{
+						Name: t.Params().At(i).Name(),
+					},
+				},
+			})
+		}
+		if t.Variadic() {
+			last := params[len(params)-1]
+			last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+		}
+		var returns []*ast.Field
+		for i := 0; i < t.Results().Len(); i++ {
+			returns = append(returns, &ast.Field{
+				Type: TypeExpr(t.Results().At(i).Type(), qual),
+			})
+		}
+		return &ast.FuncType{
+			Params: &ast.FieldList{
+				List: params,
+			},
+			Results: &ast.FieldList{
+				List: returns,
+			},
+		}
+
+	case *types.TypeParam:
+		pkgName := qual(t.Obj().Pkg())
+		if pkgName == "" || t.Obj().Pkg() == nil {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		return &ast.SelectorExpr{
+			X:   ast.NewIdent(pkgName),
+			Sel: ast.NewIdent(t.Obj().Name()),
+		}
+
+	// types.TypeParam also implements interface NamedOrAlias. To differentiate,
+	// case TypeParam need to be present before case NamedOrAlias.
+	// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
+	// NamedOrAlias.
+	case NamedOrAlias:
+		var expr ast.Expr = ast.NewIdent(t.Obj().Name())
+		if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
+			expr = &ast.SelectorExpr{
+				X:   ast.NewIdent(pkgName),
+				Sel: expr.(*ast.Ident),
+			}
+		}
+
+		// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
+		// typesinternal.NamedOrAlias.
+		if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
+			if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
+				var indices []ast.Expr
+				for i := range typeArgs.Len() {
+					indices = append(indices, TypeExpr(typeArgs.At(i), qual))
+				}
+				expr = &ast.IndexListExpr{
+					X:       expr,
+					Indices: indices,
+				}
+			}
+		}
+
+		return expr
+
+	case *types.Struct:
+		return ast.NewIdent(t.String())
+
+	case *types.Interface:
+		return ast.NewIdent(t.String())
+
+	case *types.Union:
+		if t.Len() == 0 {
+			panic("Union type should have at least one term")
+		}
+		// Same as go/ast, the return expression will put last term in the
+		// Y field at topmost level of BinaryExpr.
+		// For union of type "float32 | float64 | int64", the structure looks
+		// similar to:
+		// {
+		// 	X: {
+		// 		X: float32,
+		// 		Op: |
+		// 		Y: float64,
+		// 	}
+		// 	Op: |,
+		// 	Y: int64,
+		// }
+		var union ast.Expr
+		for i := range t.Len() {
+			term := t.Term(i)
+			termExpr := TypeExpr(term.Type(), qual)
+			if term.Tilde() {
+				termExpr = &ast.UnaryExpr{
+					Op: token.TILDE,
+					X:  termExpr,
+				}
+			}
+			if i == 0 {
+				union = termExpr
+			} else {
+				union = &ast.BinaryExpr{
+					X:  union,
+					Op: token.OR,
+					Y:  termExpr,
+				}
+			}
+		}
+		return union
+
+	case *types.Tuple:
+		panic("invalid input type types.Tuple")
+
+	default:
+		panic("unreachable")
+	}
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
deleted file mode 100644
index 377bf7a5..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-// toolchain is maximum version (<1.22) that the go toolchain used
-// to build the current tool is known to support.
-//
-// When a tool is built with >=1.22, the value of toolchain is unused.
-//
-// x/tools does not support building with go <1.18. So we take this
-// as the minimum possible maximum.
-var toolchain string = Go1_18
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
deleted file mode 100644
index f65beed9..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19
-// +build go1.19
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_19) < 0 {
-		toolchain = Go1_19
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
deleted file mode 100644
index 1a9efa12..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-// +build go1.20
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_20) < 0 {
-		toolchain = Go1_20
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
deleted file mode 100644
index b7ef216d..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-// +build go1.21
-
-package versions
-
-func init() {
-	if Compare(toolchain, Go1_21) < 0 {
-		toolchain = Go1_21
-	}
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
index 562eef21..0fc10ce4 100644
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -5,15 +5,29 @@
 package versions
 
 import (
+	"go/ast"
 	"go/types"
 )
 
-// GoVersion returns the Go version of the type package.
-// It returns zero if no version can be determined.
-func GoVersion(pkg *types.Package) string {
-	// TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
-	if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
-		return pkg.GoVersion()
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+	// In tools built with Go >= 1.22, the Go version of a file
+	// follow a cascades of sources:
+	// 1) types.Info.FileVersion, which follows the cascade:
+	//   1.a) file version (ast.File.GoVersion),
+	//   1.b) the package version (types.Config.GoVersion), or
+	// 2) is some unknown Future version.
+	//
+	// File versions require a valid package version to be provided to types
+	// in Config.GoVersion. Config.GoVersion is either from the package's module
+	// or the toolchain (go run). This value should be provided by go/packages
+	// or unitchecker.Config.GoVersion.
+	if v := info.FileVersions[file]; IsValid(v) {
+		return v
 	}
-	return ""
+	// Note: we could instead return runtime.Version() [if valid].
+	// This would act as a max version on what a tool can support.
+	return Future
 }
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
deleted file mode 100644
index b4345d33..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package versions
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// FileVersion returns a language version (<=1.21) derived from runtime.Version()
-// or an unknown future version.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In x/tools built with Go <= 1.21, we do not have Info.FileVersions
-	// available. We use a go version derived from the toolchain used to
-	// compile the tool by default.
-	// This will be <= go1.21. We take this as the maximum version that
-	// this tool can support.
-	//
-	// There are no features currently in x/tools that need to tell fine grained
-	// differences for versions <1.22.
-	return toolchain
-}
-
-// InitFileVersions is a noop when compiled with this Go version.
-func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
deleted file mode 100644
index aac5db62..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.22
-// +build go1.22
-
-package versions
-
-import (
-	"go/ast"
-	"go/types"
-)
-
-// FileVersion returns a file's Go version.
-// The reported version is an unknown Future version if a
-// version cannot be determined.
-func FileVersion(info *types.Info, file *ast.File) string {
-	// In tools built with Go >= 1.22, the Go version of a file
-	// follow a cascades of sources:
-	// 1) types.Info.FileVersion, which follows the cascade:
-	//   1.a) file version (ast.File.GoVersion),
-	//   1.b) the package version (types.Config.GoVersion), or
-	// 2) is some unknown Future version.
-	//
-	// File versions require a valid package version to be provided to types
-	// in Config.GoVersion. Config.GoVersion is either from the package's module
-	// or the toolchain (go run). This value should be provided by go/packages
-	// or unitchecker.Config.GoVersion.
-	if v := info.FileVersions[file]; IsValid(v) {
-		return v
-	}
-	// Note: we could instead return runtime.Version() [if valid].
-	// This would act as a max version on what a tool can support.
-	return Future
-}
-
-// InitFileVersions initializes info to record Go versions for Go files.
-func InitFileVersions(info *types.Info) {
-	info.FileVersions = make(map[*ast.File]string)
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d44ea78a..d0a1277d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# github.com/BurntSushi/toml v1.4.0
+# github.com/BurntSushi/toml v1.5.0
 ## explicit; go 1.18
 github.com/BurntSushi/toml
 github.com/BurntSushi/toml/internal
@@ -68,8 +68,8 @@ github.com/kardianos/service
 # github.com/lifenjoiner/dhcpdns v0.0.7
 ## explicit; go 1.20
 github.com/lifenjoiner/dhcpdns
-# github.com/miekg/dns v1.1.63
-## explicit; go 1.19
+# github.com/miekg/dns v1.1.64
+## explicit; go 1.22.0
 github.com/miekg/dns
 # github.com/onsi/ginkgo/v2 v2.9.5
 ## explicit; go 1.18
@@ -107,7 +107,7 @@ github.com/powerman/deepequal
 # github.com/quic-go/qpack v0.5.1
 ## explicit; go 1.22
 github.com/quic-go/qpack
-# github.com/quic-go/quic-go v0.50.0
+# github.com/quic-go/quic-go v0.50.1
 ## explicit; go 1.23
 github.com/quic-go/quic-go
 github.com/quic-go/quic-go/http3
@@ -148,8 +148,8 @@ golang.org/x/crypto/salsa20/salsa
 # golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
 ## explicit; go 1.20
 golang.org/x/exp/rand
-# golang.org/x/mod v0.18.0
-## explicit; go 1.18
+# golang.org/x/mod v0.23.0
+## explicit; go 1.22.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
@@ -186,16 +186,17 @@ golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.22.0
-## explicit; go 1.19
+# golang.org/x/tools v0.30.0
+## explicit; go 1.22.0
 golang.org/x/tools/go/ast/astutil
 golang.org/x/tools/go/ast/inspector
 golang.org/x/tools/go/gcexportdata
-golang.org/x/tools/go/internal/packagesdriver
 golang.org/x/tools/go/packages
 golang.org/x/tools/go/types/objectpath
+golang.org/x/tools/go/types/typeutil
 golang.org/x/tools/imports
 golang.org/x/tools/internal/aliases
+golang.org/x/tools/internal/astutil/edge
 golang.org/x/tools/internal/event
 golang.org/x/tools/internal/event/core
 golang.org/x/tools/internal/event/keys
@@ -204,10 +205,11 @@ golang.org/x/tools/internal/gcimporter
 golang.org/x/tools/internal/gocommand
 golang.org/x/tools/internal/gopathwalk
 golang.org/x/tools/internal/imports
+golang.org/x/tools/internal/modindex
 golang.org/x/tools/internal/packagesinternal
 golang.org/x/tools/internal/pkgbits
 golang.org/x/tools/internal/stdlib
-golang.org/x/tools/internal/tokeninternal
+golang.org/x/tools/internal/typeparams
 golang.org/x/tools/internal/typesinternal
 golang.org/x/tools/internal/versions
 # google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1

From c030e346b53a651f724d1fefd80be56b9af1b05a Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Wed, 26 Mar 2025 23:00:30 +0100
Subject: [PATCH 44/48] Add a bit of jitter to the TTL of xtransport cached IP
 addresses

---
 dnscrypt-proxy/xtransport.go | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/dnscrypt-proxy/xtransport.go b/dnscrypt-proxy/xtransport.go
index a362298b..ed504245 100644
--- a/dnscrypt-proxy/xtransport.go
+++ b/dnscrypt-proxy/xtransport.go
@@ -36,6 +36,7 @@ const (
 	DefaultTimeout           = 30 * time.Second
 	SystemResolverIPTTL      = 12 * time.Hour
 	MinResolverIPTTL         = 4 * time.Hour
+	ResolverIPTTLMaxJitter   = 15 * time.Minute
 	ExpiredCachedIPGraceTTL  = 15 * time.Minute
 )
 
@@ -111,6 +112,7 @@ func (xTransport *XTransport) saveCachedIP(host string, ip net.IP, ttl time.Dura
 		if ttl < MinResolverIPTTL {
 			ttl = MinResolverIPTTL
 		}
+		ttl += time.Duration(rand.Int63n(int64(ResolverIPTTLMaxJitter)))
 		expiration := time.Now().Add(ttl)
 		item.expiration = &expiration
 	}

From de0dd385ce0138d32db7e0f4f8e8bcd9ffcbf7e4 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Wed, 26 Mar 2025 23:03:30 +0100
Subject: [PATCH 45/48] Update ChangeLog

---
 ChangeLog | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index a961dea6..9b0a15f9 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,21 @@
+# Version 2.1.8
+ - Dependencies have been updated, notably the QUIC implementation,
+which could be vulnerable to denial-of-service attacks.
+ - In forwarding rules, the target can now optionally include a
+non-standard DNS port number. The port number is also now optional when
+using IPv6.
+ - An annoying log message related to permissions on Windows has been
+suppressed.
+ - Resolver IP addresses can now be refreshed more frequently.
+Additionally, jitter has been introduced to prevent all resolvers from
+being refreshed simultaneously. Further changes have been implemented
+to mitigate issues arising from multiple concurrent attempts to resolve
+a resolver's IP address.
+ - An empty value for "tls_cipher_suite" is now equivalent to leaving
+the property undefined. Previously, it disabled all TLS cipher suites,
+which had little practical justification.
+ - In forwarding rules, an optional `*.` prefix is now accepted.
+
 # Version 2.1.7
  - This version reintroduces support for XSalsa20 enryption in DNSCrypt,
 which was removed in 2.1.6. Unfortunately, a bunch of servers still

From 67c121317701b01409279a312d4d06fd51123557 Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Wed, 26 Mar 2025 23:05:30 +0100
Subject: [PATCH 46/48] Bump

---
 dnscrypt-proxy/main.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/dnscrypt-proxy/main.go b/dnscrypt-proxy/main.go
index 3121bcd0..dbaff747 100644
--- a/dnscrypt-proxy/main.go
+++ b/dnscrypt-proxy/main.go
@@ -15,7 +15,7 @@ import (
 )
 
 const (
-	AppVersion            = "2.1.7"
+	AppVersion            = "2.1.8"
 	DefaultConfigFileName = "dnscrypt-proxy.toml"
 )
 

From 19f240fb3d83aab06b3afcddf2b72a382ba1897e Mon Sep 17 00:00:00 2001
From: YX Hao 
Date: Thu, 6 Feb 2025 19:26:27 +0800
Subject: [PATCH 47/48] Reduce DHCP DNS detector instances

---
 dnscrypt-proxy/plugin_forward.go | 44 +++++++++++++-------------------
 1 file changed, 18 insertions(+), 26 deletions(-)

diff --git a/dnscrypt-proxy/plugin_forward.go b/dnscrypt-proxy/plugin_forward.go
index ff144c1a..03672b8d 100644
--- a/dnscrypt-proxy/plugin_forward.go
+++ b/dnscrypt-proxy/plugin_forward.go
@@ -54,6 +54,7 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
 	if err != nil {
 		return err
 	}
+	requiresDHCP := false
 	for lineNo, line := range strings.Split(lines, "\n") {
 		line = TrimAndStripInlineComments(line)
 		if len(line) == 0 {
@@ -71,7 +72,6 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
 			)
 		}
 		domain = strings.ToLower(domain)
-		requiresDHCP := false
 		var sequence []SearchSequenceItem
 		for _, server := range strings.Split(serversStr, ",") {
 			server = strings.TrimSpace(server)
@@ -120,36 +120,28 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
 				dlog.Infof("Forwarding [%s] to [%s]", domain, server)
 			}
 		}
-		if requiresDHCP {
-			if len(proxy.userName) > 0 {
-				dlog.Warn("DHCP/DNS detection may not work when 'user_name' is set or when starting as a non-root user")
-			}
-			if proxy.SourceIPv6 {
-				dlog.Notice("Starting a DHCP/DNS detector for IPv6")
-				d6 := &dhcpdns.Detector{RemoteIPPort: "[2001:DB8::53]:80"}
-				if err := d6.Detect(); err != nil {
-					dlog.Criticalf("Failed to start the DHCP/DNS IPv6 server: %s", err)
-					continue
-				}
-				go d6.Serve(9, 10)
-				plugin.dhcpdns = append(plugin.dhcpdns, d6)
-			}
-			if proxy.SourceIPv4 {
-				dlog.Notice("Starting a DHCP/DNS detector for IPv4")
-				d4 := &dhcpdns.Detector{RemoteIPPort: "192.0.2.53:80"}
-				if err := d4.Detect(); err != nil {
-					dlog.Criticalf("Failed to start the DHCP/DNS IPv4 server: %s", err)
-					continue
-				}
-				go d4.Serve(9, 10)
-				plugin.dhcpdns = append(plugin.dhcpdns, d4)
-			}
-		}
 		plugin.forwardMap = append(plugin.forwardMap, PluginForwardEntry{
 			domain:   domain,
 			sequence: sequence,
 		})
 	}
+	if requiresDHCP {
+		if len(proxy.userName) > 0 {
+			dlog.Warn("DHCP/DNS detection may not work when 'user_name' is set or when starting as a non-root user")
+		}
+		if proxy.SourceIPv6 {
+			dlog.Notice("Starting a DHCP/DNS detector for IPv6")
+			d6 := &dhcpdns.Detector{RemoteIPPort: "[2001:DB8::53]:80"}
+			go d6.Serve(9, 10)
+			plugin.dhcpdns = append(plugin.dhcpdns, d6)
+		}
+		if proxy.SourceIPv4 {
+			dlog.Notice("Starting a DHCP/DNS detector for IPv4")
+			d4 := &dhcpdns.Detector{RemoteIPPort: "192.0.2.53:80"}
+			go d4.Serve(9, 10)
+			plugin.dhcpdns = append(plugin.dhcpdns, d4)
+		}
+	}
 	return nil
 }
 

From ae2d7d24e8e6c28281514c601912680cb6d3460b Mon Sep 17 00:00:00 2001
From: Frank Denis 
Date: Fri, 28 Mar 2025 12:17:02 +0100
Subject: [PATCH 48/48] Update x/net

---
 go.mod                                  |  2 +-
 go.sum                                  |  4 ++--
 vendor/golang.org/x/net/http2/frame.go  | 11 +++++++++++
 vendor/golang.org/x/net/http2/server.go |  5 ++++-
 vendor/modules.txt                      |  2 +-
 5 files changed, 19 insertions(+), 5 deletions(-)

diff --git a/go.mod b/go.mod
index 54bb6326..97c55a1e 100644
--- a/go.mod
+++ b/go.mod
@@ -23,7 +23,7 @@ require (
 	github.com/powerman/check v1.8.0
 	github.com/quic-go/quic-go v0.50.1
 	golang.org/x/crypto v0.36.0
-	golang.org/x/net v0.37.0
+	golang.org/x/net v0.38.0
 	golang.org/x/sys v0.31.0
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1
 )
diff --git a/go.sum b/go.sum
index 58dd76de..655a71c0 100644
--- a/go.sum
+++ b/go.sum
@@ -93,8 +93,8 @@ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJ
 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
 golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
 golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
-golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
-golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
 golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
 golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
 golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 81faec7e..97bd8b06 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -225,6 +225,11 @@ var fhBytes = sync.Pool{
 	},
 }
 
+func invalidHTTP1LookingFrameHeader() FrameHeader {
+	fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
+	return fh
+}
+
 // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
 // Most users should use Framer.ReadFrame instead.
 func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
@@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) {
 		return nil, err
 	}
 	if fh.Length > fr.maxReadSize {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+		}
 		return nil, ErrFrameTooLarge
 	}
 	payload := fr.getReadBuf(fh.Length)
 	if _, err := io.ReadFull(fr.r, payload); err != nil {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+		}
 		return nil, err
 	}
 	f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b640deb0..51fca38f 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) {
 
 func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
 	if sc.pingSent {
-		sc.vlogf("timeout waiting for PING response")
+		sc.logf("timeout waiting for PING response")
+		if f := sc.countErrorFunc; f != nil {
+			f("conn_close_lost_ping")
+		}
 		sc.conn.Close()
 		return
 	}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d0a1277d..a90cc2d1 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -154,7 +154,7 @@ golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.37.0
+# golang.org/x/net v0.38.0
 ## explicit; go 1.23.0
 golang.org/x/net/bpf
 golang.org/x/net/http/httpguts