Update deps

This commit is contained in:
Frank Denis 2025-01-10 12:16:20 +01:00
parent 4fbdf2cfcc
commit cd3cb2e98b
35 changed files with 1112 additions and 2457 deletions

View file

@ -1,182 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package box authenticates and encrypts small messages using public-key cryptography.
Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate
messages. The length of messages is not hidden.
It is the caller's responsibility to ensure the uniqueness of noncesfor
example, by using nonce 1 for the first message, nonce 2 for the second
message, etc. Nonces are long enough that randomly generated nonces have
negligible risk of collision.
Messages should be small because:
1. The whole message needs to be held in memory to be processed.
2. Using large messages pressures implementations on small machines to decrypt
and process plaintext before authenticating it. This is very dangerous, and
this API does not allow it, but a protocol that uses excessive message sizes
might present some implementations with no other choice.
3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
4. Performance may be improved by working with messages that fit into data caches.
Thus large amounts of data should be chunked so that each message is small.
(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
chunk size.
This package is interoperable with NaCl: https://nacl.cr.yp.to/box.html.
Anonymous sealing/opening is an extension of NaCl defined by and interoperable
with libsodium:
https://libsodium.gitbook.io/doc/public-key_cryptography/sealed_boxes.
*/
package box
import (
cryptorand "crypto/rand"
"io"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/salsa20/salsa"
)
const (
// Overhead is the number of bytes of overhead when boxing a message.
Overhead = secretbox.Overhead
// AnonymousOverhead is the number of bytes of overhead when using anonymous
// sealed boxes.
AnonymousOverhead = Overhead + 32
)
// GenerateKey generates a new public/private key pair suitable for use with
// Seal and Open.
func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) {
publicKey = new([32]byte)
privateKey = new([32]byte)
_, err = io.ReadFull(rand, privateKey[:])
if err != nil {
publicKey = nil
privateKey = nil
return
}
curve25519.ScalarBaseMult(publicKey, privateKey)
return
}
var zeros [16]byte
// Precompute calculates the shared key between peersPublicKey and privateKey
// and writes it to sharedKey. The shared key can be used with
// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing
// when using the same pair of keys repeatedly.
func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) {
curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey)
salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma)
}
// Seal appends an encrypted and authenticated copy of message to out, which
// will be Overhead bytes longer than the original and must not overlap it. The
// nonce must be unique for each distinct message for a given pair of keys.
func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte {
var sharedKey [32]byte
Precompute(&sharedKey, peersPublicKey, privateKey)
return secretbox.Seal(out, message, nonce, &sharedKey)
}
// SealAfterPrecomputation performs the same actions as Seal, but takes a
// shared key as generated by Precompute.
func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte {
return secretbox.Seal(out, message, nonce, sharedKey)
}
// Open authenticates and decrypts a box produced by Seal and appends the
// message to out, which must not overlap box. The output will be Overhead
// bytes smaller than box.
func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) {
var sharedKey [32]byte
Precompute(&sharedKey, peersPublicKey, privateKey)
return secretbox.Open(out, box, nonce, &sharedKey)
}
// OpenAfterPrecomputation performs the same actions as Open, but takes a
// shared key as generated by Precompute.
func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) {
return secretbox.Open(out, box, nonce, sharedKey)
}
// SealAnonymous appends an encrypted and authenticated copy of message to out,
// which will be AnonymousOverhead bytes longer than the original and must not
// overlap it. This differs from Seal in that the sender is not required to
// provide a private key.
func SealAnonymous(out, message []byte, recipient *[32]byte, rand io.Reader) ([]byte, error) {
if rand == nil {
rand = cryptorand.Reader
}
ephemeralPub, ephemeralPriv, err := GenerateKey(rand)
if err != nil {
return nil, err
}
var nonce [24]byte
if err := sealNonce(ephemeralPub, recipient, &nonce); err != nil {
return nil, err
}
if total := len(out) + AnonymousOverhead + len(message); cap(out) < total {
original := out
out = make([]byte, 0, total)
out = append(out, original...)
}
out = append(out, ephemeralPub[:]...)
return Seal(out, message, &nonce, recipient, ephemeralPriv), nil
}
// OpenAnonymous authenticates and decrypts a box produced by SealAnonymous and
// appends the message to out, which must not overlap box. The output will be
// AnonymousOverhead bytes smaller than box.
func OpenAnonymous(out, box []byte, publicKey, privateKey *[32]byte) (message []byte, ok bool) {
if len(box) < AnonymousOverhead {
return nil, false
}
var ephemeralPub [32]byte
copy(ephemeralPub[:], box[:32])
var nonce [24]byte
if err := sealNonce(&ephemeralPub, publicKey, &nonce); err != nil {
return nil, false
}
return Open(out, box[32:], &nonce, &ephemeralPub, privateKey)
}
// sealNonce generates a 24 byte nonce that is a blake2b digest of the
// ephemeral public key and the receiver's public key.
func sealNonce(ephemeralPub, peersPublicKey *[32]byte, nonce *[24]byte) error {
h, err := blake2b.New(24, nil)
if err != nil {
return err
}
if _, err = h.Write(ephemeralPub[:]); err != nil {
return err
}
if _, err = h.Write(peersPublicKey[:]); err != nil {
return err
}
h.Sum(nonce[:0])
return nil
}

View file

@ -1,173 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package secretbox encrypts and authenticates small messages.
Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
secret-key cryptography. The length of messages is not hidden.
It is the caller's responsibility to ensure the uniqueness of noncesfor
example, by using nonce 1 for the first message, nonce 2 for the second
message, etc. Nonces are long enough that randomly generated nonces have
negligible risk of collision.
Messages should be small because:
1. The whole message needs to be held in memory to be processed.
2. Using large messages pressures implementations on small machines to decrypt
and process plaintext before authenticating it. This is very dangerous, and
this API does not allow it, but a protocol that uses excessive message sizes
might present some implementations with no other choice.
3. Fixed overheads will be sufficiently amortised by messages as small as 8KB.
4. Performance may be improved by working with messages that fit into data caches.
Thus large amounts of data should be chunked so that each message is small.
(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable
chunk size.
This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
*/
package secretbox
import (
"golang.org/x/crypto/internal/alias"
"golang.org/x/crypto/internal/poly1305"
"golang.org/x/crypto/salsa20/salsa"
)
// Overhead is the number of bytes of overhead when boxing a message.
const Overhead = poly1305.TagSize
// setup produces a sub-key and Salsa20 counter given a nonce and key.
func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
// We use XSalsa20 for encryption so first we need to generate a
// key and nonce with HSalsa20.
var hNonce [16]byte
copy(hNonce[:], nonce[:])
salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
// The final 8 bytes of the original nonce form the new nonce.
copy(counter[:], nonce[16:])
}
// sliceForAppend takes a slice and a requested number of bytes. It returns a
// slice with the contents of the given slice followed by that many bytes and a
// second slice that aliases into it and contains only the extra bytes. If the
// original slice has sufficient capacity then no allocation is performed.
func sliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return
}
// Seal appends an encrypted and authenticated copy of message to out, which
// must not overlap message. The key and nonce pair must be unique for each
// distinct message and the output will be Overhead bytes longer than message.
func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
var subKey [32]byte
var counter [16]byte
setup(&subKey, &counter, nonce, key)
// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
// keystream as a side effect.
var firstBlock [64]byte
salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
var poly1305Key [32]byte
copy(poly1305Key[:], firstBlock[:])
ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
if alias.AnyOverlap(out, message) {
panic("nacl: invalid buffer overlap")
}
// We XOR up to 32 bytes of message with the keystream generated from
// the first block.
firstMessageBlock := message
if len(firstMessageBlock) > 32 {
firstMessageBlock = firstMessageBlock[:32]
}
tagOut := out
out = out[poly1305.TagSize:]
for i, x := range firstMessageBlock {
out[i] = firstBlock[32+i] ^ x
}
message = message[len(firstMessageBlock):]
ciphertext := out
out = out[len(firstMessageBlock):]
// Now encrypt the rest.
counter[8] = 1
salsa.XORKeyStream(out, message, &counter, &subKey)
var tag [poly1305.TagSize]byte
poly1305.Sum(&tag, ciphertext, &poly1305Key)
copy(tagOut, tag[:])
return ret
}
// Open authenticates and decrypts a box produced by Seal and appends the
// message to out, which must not overlap box. The output will be Overhead
// bytes smaller than box.
func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
if len(box) < Overhead {
return nil, false
}
var subKey [32]byte
var counter [16]byte
setup(&subKey, &counter, nonce, key)
// The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
// Salsa20 works with 64-byte blocks, we also generate 32 bytes of
// keystream as a side effect.
var firstBlock [64]byte
salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
var poly1305Key [32]byte
copy(poly1305Key[:], firstBlock[:])
var tag [poly1305.TagSize]byte
copy(tag[:], box)
if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
return nil, false
}
ret, out := sliceForAppend(out, len(box)-Overhead)
if alias.AnyOverlap(out, box) {
panic("nacl: invalid buffer overlap")
}
// We XOR up to 32 bytes of box with the keystream generated from
// the first block.
box = box[Overhead:]
firstMessageBlock := box
if len(firstMessageBlock) > 32 {
firstMessageBlock = firstMessageBlock[:32]
}
for i, x := range firstMessageBlock {
out[i] = firstBlock[32+i] ^ x
}
box = box[len(firstMessageBlock):]
out = out[len(firstMessageBlock):]
// Now decrypt the rest.
counter[8] = 1
salsa.XORKeyStream(out, box, &counter, &subKey)
return ret, true
}

View file

@ -1,146 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package salsa provides low-level access to functions in the Salsa family.
package salsa
import "math/bits"
// Sigma is the Salsa20 constant for 256-bit keys.
var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
// key k, and 16-byte constant c, and puts the result into the 32-byte array
// out.
func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
for i := 0; i < 20; i += 2 {
u := x0 + x12
x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
x15 ^= bits.RotateLeft32(u, 18)
}
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x5)
out[5] = byte(x5 >> 8)
out[6] = byte(x5 >> 16)
out[7] = byte(x5 >> 24)
out[8] = byte(x10)
out[9] = byte(x10 >> 8)
out[10] = byte(x10 >> 16)
out[11] = byte(x10 >> 24)
out[12] = byte(x15)
out[13] = byte(x15 >> 8)
out[14] = byte(x15 >> 16)
out[15] = byte(x15 >> 24)
out[16] = byte(x6)
out[17] = byte(x6 >> 8)
out[18] = byte(x6 >> 16)
out[19] = byte(x6 >> 24)
out[20] = byte(x7)
out[21] = byte(x7 >> 8)
out[22] = byte(x7 >> 16)
out[23] = byte(x7 >> 24)
out[24] = byte(x8)
out[25] = byte(x8 >> 8)
out[26] = byte(x8 >> 16)
out[27] = byte(x8 >> 24)
out[28] = byte(x9)
out[29] = byte(x9 >> 8)
out[30] = byte(x9 >> 16)
out[31] = byte(x9 >> 24)
}

View file

@ -1,201 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package salsa
import "math/bits"
// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
// the result into the 64-byte array out. The input and output may be the same array.
func Core208(out *[64]byte, in *[64]byte) {
j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
for i := 0; i < 8; i += 2 {
u := x0 + x12
x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
x15 ^= bits.RotateLeft32(u, 18)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x1)
out[5] = byte(x1 >> 8)
out[6] = byte(x1 >> 16)
out[7] = byte(x1 >> 24)
out[8] = byte(x2)
out[9] = byte(x2 >> 8)
out[10] = byte(x2 >> 16)
out[11] = byte(x2 >> 24)
out[12] = byte(x3)
out[13] = byte(x3 >> 8)
out[14] = byte(x3 >> 16)
out[15] = byte(x3 >> 24)
out[16] = byte(x4)
out[17] = byte(x4 >> 8)
out[18] = byte(x4 >> 16)
out[19] = byte(x4 >> 24)
out[20] = byte(x5)
out[21] = byte(x5 >> 8)
out[22] = byte(x5 >> 16)
out[23] = byte(x5 >> 24)
out[24] = byte(x6)
out[25] = byte(x6 >> 8)
out[26] = byte(x6 >> 16)
out[27] = byte(x6 >> 24)
out[28] = byte(x7)
out[29] = byte(x7 >> 8)
out[30] = byte(x7 >> 16)
out[31] = byte(x7 >> 24)
out[32] = byte(x8)
out[33] = byte(x8 >> 8)
out[34] = byte(x8 >> 16)
out[35] = byte(x8 >> 24)
out[36] = byte(x9)
out[37] = byte(x9 >> 8)
out[38] = byte(x9 >> 16)
out[39] = byte(x9 >> 24)
out[40] = byte(x10)
out[41] = byte(x10 >> 8)
out[42] = byte(x10 >> 16)
out[43] = byte(x10 >> 24)
out[44] = byte(x11)
out[45] = byte(x11 >> 8)
out[46] = byte(x11 >> 16)
out[47] = byte(x11 >> 24)
out[48] = byte(x12)
out[49] = byte(x12 >> 8)
out[50] = byte(x12 >> 16)
out[51] = byte(x12 >> 24)
out[52] = byte(x13)
out[53] = byte(x13 >> 8)
out[54] = byte(x13 >> 16)
out[55] = byte(x13 >> 24)
out[56] = byte(x14)
out[57] = byte(x14 >> 8)
out[58] = byte(x14 >> 16)
out[59] = byte(x14 >> 24)
out[60] = byte(x15)
out[61] = byte(x15 >> 8)
out[62] = byte(x15 >> 16)
out[63] = byte(x15 >> 24)
}

View file

@ -1,23 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
package salsa
//go:noescape
// salsa2020XORKeyStream is implemented in salsa20_amd64.s.
func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter
// contains the raw salsa20 counter bytes (both nonce and block counter).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
if len(in) == 0 {
return
}
_ = out[len(in)-1]
salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
}

View file

@ -1,880 +0,0 @@
// Code generated by command: go run salsa20_amd64_asm.go -out ../salsa20_amd64.s -pkg salsa. DO NOT EDIT.
//go:build amd64 && !purego && gc
// func salsa2020XORKeyStream(out *byte, in *byte, n uint64, nonce *byte, key *byte)
// Requires: SSE2
TEXT ·salsa2020XORKeyStream(SB), $456-40
// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size.
MOVQ out+0(FP), DI
MOVQ in+8(FP), SI
MOVQ n+16(FP), DX
MOVQ nonce+24(FP), CX
MOVQ key+32(FP), R8
MOVQ SP, R12
ADDQ $0x1f, R12
ANDQ $-32, R12
MOVQ DX, R9
MOVQ CX, DX
MOVQ R8, R10
CMPQ R9, $0x00
JBE DONE
MOVL 20(R10), CX
MOVL (R10), R8
MOVL (DX), AX
MOVL 16(R10), R11
MOVL CX, (R12)
MOVL R8, 4(R12)
MOVL AX, 8(R12)
MOVL R11, 12(R12)
MOVL 8(DX), CX
MOVL 24(R10), R8
MOVL 4(R10), AX
MOVL 4(DX), R11
MOVL CX, 16(R12)
MOVL R8, 20(R12)
MOVL AX, 24(R12)
MOVL R11, 28(R12)
MOVL 12(DX), CX
MOVL 12(R10), DX
MOVL 28(R10), R8
MOVL 8(R10), AX
MOVL DX, 32(R12)
MOVL CX, 36(R12)
MOVL R8, 40(R12)
MOVL AX, 44(R12)
MOVQ $0x61707865, DX
MOVQ $0x3320646e, CX
MOVQ $0x79622d32, R8
MOVQ $0x6b206574, AX
MOVL DX, 48(R12)
MOVL CX, 52(R12)
MOVL R8, 56(R12)
MOVL AX, 60(R12)
CMPQ R9, $0x00000100
JB BYTESBETWEEN1AND255
MOVOA 48(R12), X0
PSHUFL $0x55, X0, X1
PSHUFL $0xaa, X0, X2
PSHUFL $0xff, X0, X3
PSHUFL $0x00, X0, X0
MOVOA X1, 64(R12)
MOVOA X2, 80(R12)
MOVOA X3, 96(R12)
MOVOA X0, 112(R12)
MOVOA (R12), X0
PSHUFL $0xaa, X0, X1
PSHUFL $0xff, X0, X2
PSHUFL $0x00, X0, X3
PSHUFL $0x55, X0, X0
MOVOA X1, 128(R12)
MOVOA X2, 144(R12)
MOVOA X3, 160(R12)
MOVOA X0, 176(R12)
MOVOA 16(R12), X0
PSHUFL $0xff, X0, X1
PSHUFL $0x55, X0, X2
PSHUFL $0xaa, X0, X0
MOVOA X1, 192(R12)
MOVOA X2, 208(R12)
MOVOA X0, 224(R12)
MOVOA 32(R12), X0
PSHUFL $0x00, X0, X1
PSHUFL $0xaa, X0, X2
PSHUFL $0xff, X0, X0
MOVOA X1, 240(R12)
MOVOA X2, 256(R12)
MOVOA X0, 272(R12)
BYTESATLEAST256:
MOVL 16(R12), DX
MOVL 36(R12), CX
MOVL DX, 288(R12)
MOVL CX, 304(R12)
SHLQ $0x20, CX
ADDQ CX, DX
ADDQ $0x01, DX
MOVQ DX, CX
SHRQ $0x20, CX
MOVL DX, 292(R12)
MOVL CX, 308(R12)
ADDQ $0x01, DX
MOVQ DX, CX
SHRQ $0x20, CX
MOVL DX, 296(R12)
MOVL CX, 312(R12)
ADDQ $0x01, DX
MOVQ DX, CX
SHRQ $0x20, CX
MOVL DX, 300(R12)
MOVL CX, 316(R12)
ADDQ $0x01, DX
MOVQ DX, CX
SHRQ $0x20, CX
MOVL DX, 16(R12)
MOVL CX, 36(R12)
MOVQ R9, 352(R12)
MOVQ $0x00000014, DX
MOVOA 64(R12), X0
MOVOA 80(R12), X1
MOVOA 96(R12), X2
MOVOA 256(R12), X3
MOVOA 272(R12), X4
MOVOA 128(R12), X5
MOVOA 144(R12), X6
MOVOA 176(R12), X7
MOVOA 192(R12), X8
MOVOA 208(R12), X9
MOVOA 224(R12), X10
MOVOA 304(R12), X11
MOVOA 112(R12), X12
MOVOA 160(R12), X13
MOVOA 240(R12), X14
MOVOA 288(R12), X15
MAINLOOP1:
MOVOA X1, 320(R12)
MOVOA X2, 336(R12)
MOVOA X13, X1
PADDL X12, X1
MOVOA X1, X2
PSLLL $0x07, X1
PXOR X1, X14
PSRLL $0x19, X2
PXOR X2, X14
MOVOA X7, X1
PADDL X0, X1
MOVOA X1, X2
PSLLL $0x07, X1
PXOR X1, X11
PSRLL $0x19, X2
PXOR X2, X11
MOVOA X12, X1
PADDL X14, X1
MOVOA X1, X2
PSLLL $0x09, X1
PXOR X1, X15
PSRLL $0x17, X2
PXOR X2, X15
MOVOA X0, X1
PADDL X11, X1
MOVOA X1, X2
PSLLL $0x09, X1
PXOR X1, X9
PSRLL $0x17, X2
PXOR X2, X9
MOVOA X14, X1
PADDL X15, X1
MOVOA X1, X2
PSLLL $0x0d, X1
PXOR X1, X13
PSRLL $0x13, X2
PXOR X2, X13
MOVOA X11, X1
PADDL X9, X1
MOVOA X1, X2
PSLLL $0x0d, X1
PXOR X1, X7
PSRLL $0x13, X2
PXOR X2, X7
MOVOA X15, X1
PADDL X13, X1
MOVOA X1, X2
PSLLL $0x12, X1
PXOR X1, X12
PSRLL $0x0e, X2
PXOR X2, X12
MOVOA 320(R12), X1
MOVOA X12, 320(R12)
MOVOA X9, X2
PADDL X7, X2
MOVOA X2, X12
PSLLL $0x12, X2
PXOR X2, X0
PSRLL $0x0e, X12
PXOR X12, X0
MOVOA X5, X2
PADDL X1, X2
MOVOA X2, X12
PSLLL $0x07, X2
PXOR X2, X3
PSRLL $0x19, X12
PXOR X12, X3
MOVOA 336(R12), X2
MOVOA X0, 336(R12)
MOVOA X6, X0
PADDL X2, X0
MOVOA X0, X12
PSLLL $0x07, X0
PXOR X0, X4
PSRLL $0x19, X12
PXOR X12, X4
MOVOA X1, X0
PADDL X3, X0
MOVOA X0, X12
PSLLL $0x09, X0
PXOR X0, X10
PSRLL $0x17, X12
PXOR X12, X10
MOVOA X2, X0
PADDL X4, X0
MOVOA X0, X12
PSLLL $0x09, X0
PXOR X0, X8
PSRLL $0x17, X12
PXOR X12, X8
MOVOA X3, X0
PADDL X10, X0
MOVOA X0, X12
PSLLL $0x0d, X0
PXOR X0, X5
PSRLL $0x13, X12
PXOR X12, X5
MOVOA X4, X0
PADDL X8, X0
MOVOA X0, X12
PSLLL $0x0d, X0
PXOR X0, X6
PSRLL $0x13, X12
PXOR X12, X6
MOVOA X10, X0
PADDL X5, X0
MOVOA X0, X12
PSLLL $0x12, X0
PXOR X0, X1
PSRLL $0x0e, X12
PXOR X12, X1
MOVOA 320(R12), X0
MOVOA X1, 320(R12)
MOVOA X4, X1
PADDL X0, X1
MOVOA X1, X12
PSLLL $0x07, X1
PXOR X1, X7
PSRLL $0x19, X12
PXOR X12, X7
MOVOA X8, X1
PADDL X6, X1
MOVOA X1, X12
PSLLL $0x12, X1
PXOR X1, X2
PSRLL $0x0e, X12
PXOR X12, X2
MOVOA 336(R12), X12
MOVOA X2, 336(R12)
MOVOA X14, X1
PADDL X12, X1
MOVOA X1, X2
PSLLL $0x07, X1
PXOR X1, X5
PSRLL $0x19, X2
PXOR X2, X5
MOVOA X0, X1
PADDL X7, X1
MOVOA X1, X2
PSLLL $0x09, X1
PXOR X1, X10
PSRLL $0x17, X2
PXOR X2, X10
MOVOA X12, X1
PADDL X5, X1
MOVOA X1, X2
PSLLL $0x09, X1
PXOR X1, X8
PSRLL $0x17, X2
PXOR X2, X8
MOVOA X7, X1
PADDL X10, X1
MOVOA X1, X2
PSLLL $0x0d, X1
PXOR X1, X4
PSRLL $0x13, X2
PXOR X2, X4
MOVOA X5, X1
PADDL X8, X1
MOVOA X1, X2
PSLLL $0x0d, X1
PXOR X1, X14
PSRLL $0x13, X2
PXOR X2, X14
MOVOA X10, X1
PADDL X4, X1
MOVOA X1, X2
PSLLL $0x12, X1
PXOR X1, X0
PSRLL $0x0e, X2
PXOR X2, X0
MOVOA 320(R12), X1
MOVOA X0, 320(R12)
MOVOA X8, X0
PADDL X14, X0
MOVOA X0, X2
PSLLL $0x12, X0
PXOR X0, X12
PSRLL $0x0e, X2
PXOR X2, X12
MOVOA X11, X0
PADDL X1, X0
MOVOA X0, X2
PSLLL $0x07, X0
PXOR X0, X6
PSRLL $0x19, X2
PXOR X2, X6
MOVOA 336(R12), X2
MOVOA X12, 336(R12)
MOVOA X3, X0
PADDL X2, X0
MOVOA X0, X12
PSLLL $0x07, X0
PXOR X0, X13
PSRLL $0x19, X12
PXOR X12, X13
MOVOA X1, X0
PADDL X6, X0
MOVOA X0, X12
PSLLL $0x09, X0
PXOR X0, X15
PSRLL $0x17, X12
PXOR X12, X15
MOVOA X2, X0
PADDL X13, X0
MOVOA X0, X12
PSLLL $0x09, X0
PXOR X0, X9
PSRLL $0x17, X12
PXOR X12, X9
MOVOA X6, X0
PADDL X15, X0
MOVOA X0, X12
PSLLL $0x0d, X0
PXOR X0, X11
PSRLL $0x13, X12
PXOR X12, X11
MOVOA X13, X0
PADDL X9, X0
MOVOA X0, X12
PSLLL $0x0d, X0
PXOR X0, X3
PSRLL $0x13, X12
PXOR X12, X3
MOVOA X15, X0
PADDL X11, X0
MOVOA X0, X12
PSLLL $0x12, X0
PXOR X0, X1
PSRLL $0x0e, X12
PXOR X12, X1
MOVOA X9, X0
PADDL X3, X0
MOVOA X0, X12
PSLLL $0x12, X0
PXOR X0, X2
PSRLL $0x0e, X12
PXOR X12, X2
MOVOA 320(R12), X12
MOVOA 336(R12), X0
SUBQ $0x02, DX
JA MAINLOOP1
PADDL 112(R12), X12
PADDL 176(R12), X7
PADDL 224(R12), X10
PADDL 272(R12), X4
MOVD X12, DX
MOVD X7, CX
MOVD X10, R8
MOVD X4, R9
PSHUFL $0x39, X12, X12
PSHUFL $0x39, X7, X7
PSHUFL $0x39, X10, X10
PSHUFL $0x39, X4, X4
XORL (SI), DX
XORL 4(SI), CX
XORL 8(SI), R8
XORL 12(SI), R9
MOVL DX, (DI)
MOVL CX, 4(DI)
MOVL R8, 8(DI)
MOVL R9, 12(DI)
MOVD X12, DX
MOVD X7, CX
MOVD X10, R8
MOVD X4, R9
PSHUFL $0x39, X12, X12
PSHUFL $0x39, X7, X7
PSHUFL $0x39, X10, X10
PSHUFL $0x39, X4, X4
XORL 64(SI), DX
XORL 68(SI), CX
XORL 72(SI), R8
XORL 76(SI), R9
MOVL DX, 64(DI)
MOVL CX, 68(DI)
MOVL R8, 72(DI)
MOVL R9, 76(DI)
MOVD X12, DX
MOVD X7, CX
MOVD X10, R8
MOVD X4, R9
PSHUFL $0x39, X12, X12
PSHUFL $0x39, X7, X7
PSHUFL $0x39, X10, X10
PSHUFL $0x39, X4, X4
XORL 128(SI), DX
XORL 132(SI), CX
XORL 136(SI), R8
XORL 140(SI), R9
MOVL DX, 128(DI)
MOVL CX, 132(DI)
MOVL R8, 136(DI)
MOVL R9, 140(DI)
MOVD X12, DX
MOVD X7, CX
MOVD X10, R8
MOVD X4, R9
XORL 192(SI), DX
XORL 196(SI), CX
XORL 200(SI), R8
XORL 204(SI), R9
MOVL DX, 192(DI)
MOVL CX, 196(DI)
MOVL R8, 200(DI)
MOVL R9, 204(DI)
PADDL 240(R12), X14
PADDL 64(R12), X0
PADDL 128(R12), X5
PADDL 192(R12), X8
MOVD X14, DX
MOVD X0, CX
MOVD X5, R8
MOVD X8, R9
PSHUFL $0x39, X14, X14
PSHUFL $0x39, X0, X0
PSHUFL $0x39, X5, X5
PSHUFL $0x39, X8, X8
XORL 16(SI), DX
XORL 20(SI), CX
XORL 24(SI), R8
XORL 28(SI), R9
MOVL DX, 16(DI)
MOVL CX, 20(DI)
MOVL R8, 24(DI)
MOVL R9, 28(DI)
MOVD X14, DX
MOVD X0, CX
MOVD X5, R8
MOVD X8, R9
PSHUFL $0x39, X14, X14
PSHUFL $0x39, X0, X0
PSHUFL $0x39, X5, X5
PSHUFL $0x39, X8, X8
XORL 80(SI), DX
XORL 84(SI), CX
XORL 88(SI), R8
XORL 92(SI), R9
MOVL DX, 80(DI)
MOVL CX, 84(DI)
MOVL R8, 88(DI)
MOVL R9, 92(DI)
MOVD X14, DX
MOVD X0, CX
MOVD X5, R8
MOVD X8, R9
PSHUFL $0x39, X14, X14
PSHUFL $0x39, X0, X0
PSHUFL $0x39, X5, X5
PSHUFL $0x39, X8, X8
XORL 144(SI), DX
XORL 148(SI), CX
XORL 152(SI), R8
XORL 156(SI), R9
MOVL DX, 144(DI)
MOVL CX, 148(DI)
MOVL R8, 152(DI)
MOVL R9, 156(DI)
MOVD X14, DX
MOVD X0, CX
MOVD X5, R8
MOVD X8, R9
XORL 208(SI), DX
XORL 212(SI), CX
XORL 216(SI), R8
XORL 220(SI), R9
MOVL DX, 208(DI)
MOVL CX, 212(DI)
MOVL R8, 216(DI)
MOVL R9, 220(DI)
PADDL 288(R12), X15
PADDL 304(R12), X11
PADDL 80(R12), X1
PADDL 144(R12), X6
MOVD X15, DX
MOVD X11, CX
MOVD X1, R8
MOVD X6, R9
PSHUFL $0x39, X15, X15
PSHUFL $0x39, X11, X11
PSHUFL $0x39, X1, X1
PSHUFL $0x39, X6, X6
XORL 32(SI), DX
XORL 36(SI), CX
XORL 40(SI), R8
XORL 44(SI), R9
MOVL DX, 32(DI)
MOVL CX, 36(DI)
MOVL R8, 40(DI)
MOVL R9, 44(DI)
MOVD X15, DX
MOVD X11, CX
MOVD X1, R8
MOVD X6, R9
PSHUFL $0x39, X15, X15
PSHUFL $0x39, X11, X11
PSHUFL $0x39, X1, X1
PSHUFL $0x39, X6, X6
XORL 96(SI), DX
XORL 100(SI), CX
XORL 104(SI), R8
XORL 108(SI), R9
MOVL DX, 96(DI)
MOVL CX, 100(DI)
MOVL R8, 104(DI)
MOVL R9, 108(DI)
MOVD X15, DX
MOVD X11, CX
MOVD X1, R8
MOVD X6, R9
PSHUFL $0x39, X15, X15
PSHUFL $0x39, X11, X11
PSHUFL $0x39, X1, X1
PSHUFL $0x39, X6, X6
XORL 160(SI), DX
XORL 164(SI), CX
XORL 168(SI), R8
XORL 172(SI), R9
MOVL DX, 160(DI)
MOVL CX, 164(DI)
MOVL R8, 168(DI)
MOVL R9, 172(DI)
MOVD X15, DX
MOVD X11, CX
MOVD X1, R8
MOVD X6, R9
XORL 224(SI), DX
XORL 228(SI), CX
XORL 232(SI), R8
XORL 236(SI), R9
MOVL DX, 224(DI)
MOVL CX, 228(DI)
MOVL R8, 232(DI)
MOVL R9, 236(DI)
PADDL 160(R12), X13
PADDL 208(R12), X9
PADDL 256(R12), X3
PADDL 96(R12), X2
MOVD X13, DX
MOVD X9, CX
MOVD X3, R8
MOVD X2, R9
PSHUFL $0x39, X13, X13
PSHUFL $0x39, X9, X9
PSHUFL $0x39, X3, X3
PSHUFL $0x39, X2, X2
XORL 48(SI), DX
XORL 52(SI), CX
XORL 56(SI), R8
XORL 60(SI), R9
MOVL DX, 48(DI)
MOVL CX, 52(DI)
MOVL R8, 56(DI)
MOVL R9, 60(DI)
MOVD X13, DX
MOVD X9, CX
MOVD X3, R8
MOVD X2, R9
PSHUFL $0x39, X13, X13
PSHUFL $0x39, X9, X9
PSHUFL $0x39, X3, X3
PSHUFL $0x39, X2, X2
XORL 112(SI), DX
XORL 116(SI), CX
XORL 120(SI), R8
XORL 124(SI), R9
MOVL DX, 112(DI)
MOVL CX, 116(DI)
MOVL R8, 120(DI)
MOVL R9, 124(DI)
MOVD X13, DX
MOVD X9, CX
MOVD X3, R8
MOVD X2, R9
PSHUFL $0x39, X13, X13
PSHUFL $0x39, X9, X9
PSHUFL $0x39, X3, X3
PSHUFL $0x39, X2, X2
XORL 176(SI), DX
XORL 180(SI), CX
XORL 184(SI), R8
XORL 188(SI), R9
MOVL DX, 176(DI)
MOVL CX, 180(DI)
MOVL R8, 184(DI)
MOVL R9, 188(DI)
MOVD X13, DX
MOVD X9, CX
MOVD X3, R8
MOVD X2, R9
XORL 240(SI), DX
XORL 244(SI), CX
XORL 248(SI), R8
XORL 252(SI), R9
MOVL DX, 240(DI)
MOVL CX, 244(DI)
MOVL R8, 248(DI)
MOVL R9, 252(DI)
MOVQ 352(R12), R9
SUBQ $0x00000100, R9
ADDQ $0x00000100, SI
ADDQ $0x00000100, DI
CMPQ R9, $0x00000100
JAE BYTESATLEAST256
CMPQ R9, $0x00
JBE DONE
BYTESBETWEEN1AND255:
CMPQ R9, $0x40
JAE NOCOPY
MOVQ DI, DX
LEAQ 360(R12), DI
MOVQ R9, CX
REP; MOVSB
LEAQ 360(R12), DI
LEAQ 360(R12), SI
NOCOPY:
MOVQ R9, 352(R12)
MOVOA 48(R12), X0
MOVOA (R12), X1
MOVOA 16(R12), X2
MOVOA 32(R12), X3
MOVOA X1, X4
MOVQ $0x00000014, CX
MAINLOOP2:
PADDL X0, X4
MOVOA X0, X5
MOVOA X4, X6
PSLLL $0x07, X4
PSRLL $0x19, X6
PXOR X4, X3
PXOR X6, X3
PADDL X3, X5
MOVOA X3, X4
MOVOA X5, X6
PSLLL $0x09, X5
PSRLL $0x17, X6
PXOR X5, X2
PSHUFL $0x93, X3, X3
PXOR X6, X2
PADDL X2, X4
MOVOA X2, X5
MOVOA X4, X6
PSLLL $0x0d, X4
PSRLL $0x13, X6
PXOR X4, X1
PSHUFL $0x4e, X2, X2
PXOR X6, X1
PADDL X1, X5
MOVOA X3, X4
MOVOA X5, X6
PSLLL $0x12, X5
PSRLL $0x0e, X6
PXOR X5, X0
PSHUFL $0x39, X1, X1
PXOR X6, X0
PADDL X0, X4
MOVOA X0, X5
MOVOA X4, X6
PSLLL $0x07, X4
PSRLL $0x19, X6
PXOR X4, X1
PXOR X6, X1
PADDL X1, X5
MOVOA X1, X4
MOVOA X5, X6
PSLLL $0x09, X5
PSRLL $0x17, X6
PXOR X5, X2
PSHUFL $0x93, X1, X1
PXOR X6, X2
PADDL X2, X4
MOVOA X2, X5
MOVOA X4, X6
PSLLL $0x0d, X4
PSRLL $0x13, X6
PXOR X4, X3
PSHUFL $0x4e, X2, X2
PXOR X6, X3
PADDL X3, X5
MOVOA X1, X4
MOVOA X5, X6
PSLLL $0x12, X5
PSRLL $0x0e, X6
PXOR X5, X0
PSHUFL $0x39, X3, X3
PXOR X6, X0
PADDL X0, X4
MOVOA X0, X5
MOVOA X4, X6
PSLLL $0x07, X4
PSRLL $0x19, X6
PXOR X4, X3
PXOR X6, X3
PADDL X3, X5
MOVOA X3, X4
MOVOA X5, X6
PSLLL $0x09, X5
PSRLL $0x17, X6
PXOR X5, X2
PSHUFL $0x93, X3, X3
PXOR X6, X2
PADDL X2, X4
MOVOA X2, X5
MOVOA X4, X6
PSLLL $0x0d, X4
PSRLL $0x13, X6
PXOR X4, X1
PSHUFL $0x4e, X2, X2
PXOR X6, X1
PADDL X1, X5
MOVOA X3, X4
MOVOA X5, X6
PSLLL $0x12, X5
PSRLL $0x0e, X6
PXOR X5, X0
PSHUFL $0x39, X1, X1
PXOR X6, X0
PADDL X0, X4
MOVOA X0, X5
MOVOA X4, X6
PSLLL $0x07, X4
PSRLL $0x19, X6
PXOR X4, X1
PXOR X6, X1
PADDL X1, X5
MOVOA X1, X4
MOVOA X5, X6
PSLLL $0x09, X5
PSRLL $0x17, X6
PXOR X5, X2
PSHUFL $0x93, X1, X1
PXOR X6, X2
PADDL X2, X4
MOVOA X2, X5
MOVOA X4, X6
PSLLL $0x0d, X4
PSRLL $0x13, X6
PXOR X4, X3
PSHUFL $0x4e, X2, X2
PXOR X6, X3
SUBQ $0x04, CX
PADDL X3, X5
MOVOA X1, X4
MOVOA X5, X6
PSLLL $0x12, X5
PXOR X7, X7
PSRLL $0x0e, X6
PXOR X5, X0
PSHUFL $0x39, X3, X3
PXOR X6, X0
JA MAINLOOP2
PADDL 48(R12), X0
PADDL (R12), X1
PADDL 16(R12), X2
PADDL 32(R12), X3
MOVD X0, CX
MOVD X1, R8
MOVD X2, R9
MOVD X3, AX
PSHUFL $0x39, X0, X0
PSHUFL $0x39, X1, X1
PSHUFL $0x39, X2, X2
PSHUFL $0x39, X3, X3
XORL (SI), CX
XORL 48(SI), R8
XORL 32(SI), R9
XORL 16(SI), AX
MOVL CX, (DI)
MOVL R8, 48(DI)
MOVL R9, 32(DI)
MOVL AX, 16(DI)
MOVD X0, CX
MOVD X1, R8
MOVD X2, R9
MOVD X3, AX
PSHUFL $0x39, X0, X0
PSHUFL $0x39, X1, X1
PSHUFL $0x39, X2, X2
PSHUFL $0x39, X3, X3
XORL 20(SI), CX
XORL 4(SI), R8
XORL 52(SI), R9
XORL 36(SI), AX
MOVL CX, 20(DI)
MOVL R8, 4(DI)
MOVL R9, 52(DI)
MOVL AX, 36(DI)
MOVD X0, CX
MOVD X1, R8
MOVD X2, R9
MOVD X3, AX
PSHUFL $0x39, X0, X0
PSHUFL $0x39, X1, X1
PSHUFL $0x39, X2, X2
PSHUFL $0x39, X3, X3
XORL 40(SI), CX
XORL 24(SI), R8
XORL 8(SI), R9
XORL 56(SI), AX
MOVL CX, 40(DI)
MOVL R8, 24(DI)
MOVL R9, 8(DI)
MOVL AX, 56(DI)
MOVD X0, CX
MOVD X1, R8
MOVD X2, R9
MOVD X3, AX
XORL 60(SI), CX
XORL 44(SI), R8
XORL 28(SI), R9
XORL 12(SI), AX
MOVL CX, 60(DI)
MOVL R8, 44(DI)
MOVL R9, 28(DI)
MOVL AX, 12(DI)
MOVQ 352(R12), R9
MOVL 16(R12), CX
MOVL 36(R12), R8
ADDQ $0x01, CX
SHLQ $0x20, R8
ADDQ R8, CX
MOVQ CX, R8
SHRQ $0x20, R8
MOVL CX, 16(R12)
MOVL R8, 36(R12)
CMPQ R9, $0x40
JA BYTESATLEAST65
JAE BYTESATLEAST64
MOVQ DI, SI
MOVQ DX, DI
MOVQ R9, CX
REP; MOVSB
BYTESATLEAST64:
DONE:
RET
BYTESATLEAST65:
SUBQ $0x40, R9
ADDQ $0x40, DI
ADDQ $0x40, SI
JMP BYTESBETWEEN1AND255

View file

@ -1,14 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 || purego || !gc
package salsa
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out must overlap entirely or not at all. Counter
// contains the raw salsa20 counter bytes (both nonce and block counter).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
genericXORKeyStream(out, in, counter, key)
}

View file

@ -1,233 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package salsa
import "math/bits"
const rounds = 20
// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
// and 16-byte constant c, and puts the result into 64-byte array out.
func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
for i := 0; i < rounds; i += 2 {
u := x0 + x12
x4 ^= bits.RotateLeft32(u, 7)
u = x4 + x0
x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x4
x12 ^= bits.RotateLeft32(u, 13)
u = x12 + x8
x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x1
x9 ^= bits.RotateLeft32(u, 7)
u = x9 + x5
x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x9
x1 ^= bits.RotateLeft32(u, 13)
u = x1 + x13
x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x6
x14 ^= bits.RotateLeft32(u, 7)
u = x14 + x10
x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x14
x6 ^= bits.RotateLeft32(u, 13)
u = x6 + x2
x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x11
x3 ^= bits.RotateLeft32(u, 7)
u = x3 + x15
x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x3
x11 ^= bits.RotateLeft32(u, 13)
u = x11 + x7
x15 ^= bits.RotateLeft32(u, 18)
u = x0 + x3
x1 ^= bits.RotateLeft32(u, 7)
u = x1 + x0
x2 ^= bits.RotateLeft32(u, 9)
u = x2 + x1
x3 ^= bits.RotateLeft32(u, 13)
u = x3 + x2
x0 ^= bits.RotateLeft32(u, 18)
u = x5 + x4
x6 ^= bits.RotateLeft32(u, 7)
u = x6 + x5
x7 ^= bits.RotateLeft32(u, 9)
u = x7 + x6
x4 ^= bits.RotateLeft32(u, 13)
u = x4 + x7
x5 ^= bits.RotateLeft32(u, 18)
u = x10 + x9
x11 ^= bits.RotateLeft32(u, 7)
u = x11 + x10
x8 ^= bits.RotateLeft32(u, 9)
u = x8 + x11
x9 ^= bits.RotateLeft32(u, 13)
u = x9 + x8
x10 ^= bits.RotateLeft32(u, 18)
u = x15 + x14
x12 ^= bits.RotateLeft32(u, 7)
u = x12 + x15
x13 ^= bits.RotateLeft32(u, 9)
u = x13 + x12
x14 ^= bits.RotateLeft32(u, 13)
u = x14 + x13
x15 ^= bits.RotateLeft32(u, 18)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x1)
out[5] = byte(x1 >> 8)
out[6] = byte(x1 >> 16)
out[7] = byte(x1 >> 24)
out[8] = byte(x2)
out[9] = byte(x2 >> 8)
out[10] = byte(x2 >> 16)
out[11] = byte(x2 >> 24)
out[12] = byte(x3)
out[13] = byte(x3 >> 8)
out[14] = byte(x3 >> 16)
out[15] = byte(x3 >> 24)
out[16] = byte(x4)
out[17] = byte(x4 >> 8)
out[18] = byte(x4 >> 16)
out[19] = byte(x4 >> 24)
out[20] = byte(x5)
out[21] = byte(x5 >> 8)
out[22] = byte(x5 >> 16)
out[23] = byte(x5 >> 24)
out[24] = byte(x6)
out[25] = byte(x6 >> 8)
out[26] = byte(x6 >> 16)
out[27] = byte(x6 >> 24)
out[28] = byte(x7)
out[29] = byte(x7 >> 8)
out[30] = byte(x7 >> 16)
out[31] = byte(x7 >> 24)
out[32] = byte(x8)
out[33] = byte(x8 >> 8)
out[34] = byte(x8 >> 16)
out[35] = byte(x8 >> 24)
out[36] = byte(x9)
out[37] = byte(x9 >> 8)
out[38] = byte(x9 >> 16)
out[39] = byte(x9 >> 24)
out[40] = byte(x10)
out[41] = byte(x10 >> 8)
out[42] = byte(x10 >> 16)
out[43] = byte(x10 >> 24)
out[44] = byte(x11)
out[45] = byte(x11 >> 8)
out[46] = byte(x11 >> 16)
out[47] = byte(x11 >> 24)
out[48] = byte(x12)
out[49] = byte(x12 >> 8)
out[50] = byte(x12 >> 16)
out[51] = byte(x12 >> 24)
out[52] = byte(x13)
out[53] = byte(x13 >> 8)
out[54] = byte(x13 >> 16)
out[55] = byte(x13 >> 24)
out[56] = byte(x14)
out[57] = byte(x14 >> 8)
out[58] = byte(x14 >> 16)
out[59] = byte(x14 >> 24)
out[60] = byte(x15)
out[61] = byte(x15 >> 8)
out[62] = byte(x15 >> 16)
out[63] = byte(x15 >> 24)
}
// genericXORKeyStream is the generic implementation of XORKeyStream to be used
// when no assembly implementation is available.
func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
var block [64]byte
var counterCopy [16]byte
copy(counterCopy[:], counter[:])
for len(in) >= 64 {
core(&block, &counterCopy, key, &Sigma)
for i, x := range block {
out[i] = in[i] ^ x
}
u := uint32(1)
for i := 8; i < 16; i++ {
u += uint32(counterCopy[i])
counterCopy[i] = byte(u)
u >>= 8
}
in = in[64:]
out = out[64:]
}
if len(in) > 0 {
core(&block, &counterCopy, key, &Sigma)
for i, v := range in {
out[i] = v ^ block[i]
}
}
}

View file

@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
return conf
}
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{

View file

@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
fillNetHTTPConfig(conf, srv.HTTP2)
}
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
fillNetHTTPConfig(conf, tr.HTTP2)
}

View file

@ -375,6 +375,7 @@ type ClientConn struct {
doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool
closed bool
closedOnIdle bool // true if conn was closed for idleness
seenSettings bool // true if we've seen a settings frame, false otherwise
seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// If this connection has never been used for a request and is closed,
// then let it take a request (which will fail).
// If the conn was closed for idleness, we're racing the idle timer;
// don't try to use the conn. (Issue #70515.)
//
// This avoids a situation where an error early in a connection's lifetime
// goes unreported.
if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
st.canTakeNewRequest = true
}
@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() {
return
}
cc.closed = true
cc.closedOnIdle = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
@ -2434,9 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() {
// This avoids a situation where new connections are constantly created,
// added to the pool, fail, and are removed from the pool, without any error
// being surfaced to the user.
const unusedWaitTime = 5 * time.Second
unusedWaitTime := 5 * time.Second
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout
}
idleTime := cc.t.now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})

View file

@ -36,6 +36,7 @@ package inspector
import (
"go/ast"
_ "unsafe"
)
// An Inspector provides methods for inspecting
@ -44,6 +45,9 @@ type Inspector struct {
events []event
}
//go:linkname events
func events(in *Inspector) []event { return in.events }
// New returns an Inspector for the specified syntax trees.
func New(files []*ast.File) *Inspector {
return &Inspector{traverse(files)}
@ -52,9 +56,10 @@ func New(files []*ast.File) *Inspector {
// An event represents a push or a pop
// of an ast.Node during a traversal.
type event struct {
node ast.Node
typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
index int // index of corresponding push or pop event
node ast.Node
typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
index int32 // index of corresponding push or pop event
parent int32 // index of parent's push node (defined for push nodes only)
}
// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
@ -83,7 +88,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// })
mask := maskOf(types)
for i := 0; i < len(in.events); {
for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@ -113,7 +118,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// matches an element of the types slice.
func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
mask := maskOf(types)
for i := 0; i < len(in.events); {
for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@ -147,7 +152,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc
func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
mask := maskOf(types)
var stack []ast.Node
for i := 0; i < len(in.events); {
for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@ -196,18 +201,24 @@ func traverse(files []*ast.File) []event {
events := make([]event, 0, capacity)
var stack []event
stack = append(stack, event{}) // include an extra event so file nodes have a parent
stack = append(stack, event{index: -1}) // include an extra event so file nodes have a parent
for _, f := range files {
ast.Inspect(f, func(n ast.Node) bool {
if n != nil {
// push
ev := event{
node: n,
typ: 0, // temporarily used to accumulate type bits of subtree
index: len(events), // push event temporarily holds own index
node: n,
typ: 0, // temporarily used to accumulate type bits of subtree
index: int32(len(events)), // push event temporarily holds own index
parent: stack[len(stack)-1].index,
}
stack = append(stack, ev)
events = append(events, ev)
// 2B nodes ought to be enough for anyone!
if int32(len(events)) < 0 {
panic("event index exceeded int32")
}
} else {
// pop
top := len(stack) - 1
@ -216,9 +227,9 @@ func traverse(files []*ast.File) []event {
push := ev.index
parent := top - 1
events[push].typ = typ // set type of push
stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
events[push].index = len(events) // make push refer to pop
events[push].typ = typ // set type of push
stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
events[push].index = int32(len(events)) // make push refer to pop
stack = stack[:top]
events = append(events, ev)

View file

@ -26,7 +26,7 @@ func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
return func(yield func(ast.Node) bool) {
mask := maskOf(types)
for i := 0; i < len(in.events); {
for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@ -63,7 +63,7 @@ func All[N interface {
mask := typeOf((N)(nil))
return func(yield func(N) bool) {
for i := 0; i < len(in.events); {
for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push

View file

@ -12,6 +12,8 @@ package inspector
import (
"go/ast"
"math"
_ "unsafe"
)
const (
@ -215,6 +217,7 @@ func typeOf(n ast.Node) uint64 {
return 0
}
//go:linkname maskOf
func maskOf(nodes []ast.Node) uint64 {
if nodes == nil {
return math.MaxUint64 // match all node types

View file

@ -322,6 +322,7 @@ type jsonPackage struct {
ImportPath string
Dir string
Name string
Target string
Export string
GoFiles []string
CompiledGoFiles []string
@ -506,6 +507,7 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
Name: p.Name,
ID: p.ImportPath,
Dir: p.Dir,
Target: p.Target,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
@ -811,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string {
if cfg.Mode&NeedEmbedPatterns != 0 {
addFields("EmbedPatterns")
}
if cfg.Mode&NeedTarget != 0 {
addFields("Target")
}
return "-json=" + strings.Join(fields, ",")
}

View file

@ -27,6 +27,7 @@ var modes = [...]struct {
{NeedModule, "NeedModule"},
{NeedEmbedFiles, "NeedEmbedFiles"},
{NeedEmbedPatterns, "NeedEmbedPatterns"},
{NeedTarget, "NeedTarget"},
}
func (mode LoadMode) String() string {

View file

@ -118,6 +118,9 @@ const (
// NeedEmbedPatterns adds EmbedPatterns.
NeedEmbedPatterns
// NeedTarget adds Target.
NeedTarget
// Be sure to update loadmode_string.go when adding new items!
)
@ -479,6 +482,10 @@ type Package struct {
// information for the package as provided by the build system.
ExportFile string
// Target is the absolute install path of the .a file, for libraries,
// and of the executable file, for binaries.
Target string
// Imports maps import paths appearing in the package's Go source files
// to corresponding loaded Packages.
Imports map[string]*Package

View file

@ -2,30 +2,35 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeutil defines various utilities for types, such as Map,
// a mapping from types.Type to any values.
package typeutil // import "golang.org/x/tools/go/types/typeutil"
// Package typeutil defines various utilities for types, such as [Map],
// a hash table that maps [types.Type] to any value.
package typeutil
import (
"bytes"
"fmt"
"go/types"
"reflect"
"hash/maphash"
"unsafe"
"golang.org/x/tools/internal/typeparams"
)
// Map is a hash-table-based mapping from types (types.Type) to
// arbitrary any values. The concrete types that implement
// arbitrary values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
// Not thread-safe.
// Read-only map operations ([Map.At], [Map.Len], and so on) may
// safely be called concurrently.
//
// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
// and 69559, if the latter proposals for a generic hash-map type and
// a types.Hash function are accepted.
type Map struct {
hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
@ -36,35 +41,17 @@ type entry struct {
value any
}
// SetHasher sets the hasher used by Map.
// SetHasher has no effect.
//
// All Hashers are functionally equivalent but contain internal state
// used to cache the results of hashing previously seen types.
//
// A single Hasher created by MakeHasher() may be shared among many
// Maps. This is recommended if the instances have many keys in
// common, as it will amortize the cost of hash computation.
//
// A Hasher may grow without bound as new types are seen. Even when a
// type is deleted from the map, the Hasher never shrinks, since other
// types in the map may reference the deleted type indirectly.
//
// Hashers are not thread-safe, and read-only operations such as
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
// read-lock) is require around all Map operations if a shared
// hasher is accessed from multiple threads.
//
// If SetHasher is not called, the Map will create a private hasher at
// the first call to Insert.
func (m *Map) SetHasher(hasher Hasher) {
m.hasher = hasher
}
// It is a relic of an optimization that is no longer profitable. Do
// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
func (m *Map) SetHasher(Hasher) {}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
hash := m.hasher.Hash(key)
hash := hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && types.Identical(key, e.key) {
@ -83,7 +70,7 @@ func (m *Map) Delete(key types.Type) bool {
// The result is nil if the entry is not present.
func (m *Map) At(key types.Type) any {
if m != nil && m.table != nil {
for _, e := range m.table[m.hasher.Hash(key)] {
for _, e := range m.table[hash(key)] {
if e.key != nil && types.Identical(key, e.key) {
return e.value
}
@ -96,7 +83,7 @@ func (m *Map) At(key types.Type) any {
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value any) (prev any) {
if m.table != nil {
hash := m.hasher.Hash(key)
hash := hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
@ -115,10 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
if m.hasher.memo == nil {
m.hasher = MakeHasher()
}
hash := m.hasher.Hash(key)
hash := hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
@ -195,53 +179,35 @@ func (m *Map) KeysString() string {
return m.toString(false)
}
////////////////////////////////////////////////////////////////////////
// Hasher
// -- Hasher --
// A Hasher maps each type to its hash value.
// For efficiency, a hasher uses memoization; thus its memory
// footprint grows monotonically over time.
// Hashers are not thread-safe.
// Hashers have reference semantics.
// Call MakeHasher to create a Hasher.
type Hasher struct {
memo map[types.Type]uint32
// ptrMap records pointer identity.
ptrMap map[any]uint32
// sigTParams holds type parameters from the signature being hashed.
// Signatures are considered identical modulo renaming of type parameters, so
// within the scope of a signature type the identity of the signature's type
// parameters is just their index.
//
// Since the language does not currently support referring to uninstantiated
// generic types or functions, and instantiated signatures do not have type
// parameter lists, we should never encounter a second non-empty type
// parameter list when hashing a generic signature.
sigTParams *types.TypeParamList
// hash returns the hash of type t.
// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
func hash(t types.Type) uint32 {
return theHasher.Hash(t)
}
// MakeHasher returns a new Hasher instance.
func MakeHasher() Hasher {
return Hasher{
memo: make(map[types.Type]uint32),
ptrMap: make(map[any]uint32),
sigTParams: nil,
}
}
// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
// Hashers are stateless, and all are equivalent.
type Hasher struct{}
var theHasher Hasher
// MakeHasher returns Hasher{}.
// Hashers are stateless; all are equivalent.
func MakeHasher() Hasher { return theHasher }
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
hash, ok := h.memo[t]
if !ok {
hash = h.hashFor(t)
h.memo[t] = hash
}
return hash
return hasher{inGenericSig: false}.hash(t)
}
// hasher holds the state of a single Hash traversal: whether we are
// inside the signature of a generic function; this is used to
// optimize [hasher.hashTypeParam].
type hasher struct{ inGenericSig bool }
// hashString computes the Fowler–Noll–Vo hash of s.
func hashString(s string) uint32 {
var h uint32
@ -252,21 +218,21 @@ func hashString(s string) uint32 {
return h
}
// hashFor computes the hash of t.
func (h Hasher) hashFor(t types.Type) uint32 {
// hash computes the hash of t.
func (h hasher) hash(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Alias:
return h.Hash(types.Unalias(t))
return h.hash(types.Unalias(t))
case *types.Array:
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
case *types.Slice:
return 9049 + 2*h.Hash(t.Elem())
return 9049 + 2*h.hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
@ -277,12 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 {
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
hash += h.Hash(f.Type())
hash += h.hash(f.Type())
}
return hash
case *types.Pointer:
return 9067 + 2*h.Hash(t.Elem())
return 9067 + 2*h.hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
@ -290,33 +256,11 @@ func (h Hasher) hashFor(t types.Type) uint32 {
hash *= 8863
}
// Use a separate hasher for types inside of the signature, where type
// parameter identity is modified to be (index, constraint). We must use a
// new memo for this hasher as type identity may be affected by this
// masking. For example, in func[T any](*T), the identity of *T depends on
// whether we are mapping the argument in isolation, or recursively as part
// of hashing the signature.
//
// We should never encounter a generic signature while hashing another
// generic signature, but defensively set sigTParams only if h.mask is
// unset.
tparams := t.TypeParams()
if h.sigTParams == nil && tparams.Len() != 0 {
h = Hasher{
// There may be something more efficient than discarding the existing
// memo, but it would require detecting whether types are 'tainted' by
// references to type parameters.
memo: make(map[types.Type]uint32),
// Re-using ptrMap ensures that pointer identity is preserved in this
// hasher.
ptrMap: h.ptrMap,
sigTParams: tparams,
}
}
for i := 0; i < tparams.Len(); i++ {
for i := range tparams.Len() {
h.inGenericSig = true
tparam := tparams.At(i)
hash += 7 * h.Hash(tparam.Constraint())
hash += 7 * h.hash(tparam.Constraint())
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
@ -350,17 +294,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
return hash
case *types.Map:
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
case *types.Chan:
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
case *types.Named:
hash := h.hashPtr(t.Obj())
hash := h.hashTypeName(t.Obj())
targs := t.TypeArgs()
for i := 0; i < targs.Len(); i++ {
targ := targs.At(i)
hash += 2 * h.Hash(targ)
hash += 2 * h.hash(targ)
}
return hash
@ -374,17 +318,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
panic(fmt.Sprintf("%T: %v", t, t))
}
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
hash := 9137 + 2*uint32(n)
for i := 0; i < n; i++ {
hash += 3 * h.Hash(tuple.At(i).Type())
for i := range n {
hash += 3 * h.hash(tuple.At(i).Type())
}
return hash
}
func (h Hasher) hashUnion(t *types.Union) uint32 {
func (h hasher) hashUnion(t *types.Union) uint32 {
// Hash type restrictions.
terms, err := typeparams.UnionTermSet(t)
// if err != nil t has invalid type restrictions. Fall back on a non-zero
@ -395,11 +339,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 {
return h.hashTermSet(terms)
}
func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
func (h hasher) hashTermSet(terms []*types.Term) uint32 {
hash := 9157 + 2*uint32(len(terms))
for _, term := range terms {
// term order is not significant.
termHash := h.Hash(term.Type())
termHash := h.hash(term.Type())
if term.Tilde() {
termHash *= 9161
}
@ -408,36 +352,42 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
return hash
}
// hashTypeParam returns a hash of the type parameter t, with a hash value
// depending on whether t is contained in h.sigTParams.
//
// If h.sigTParams is set and contains t, then we are in the process of hashing
// a signature, and the hash value of t must depend only on t's index and
// constraint: signatures are considered identical modulo type parameter
// renaming. To avoid infinite recursion, we only hash the type parameter
// index, and rely on types.Identical to handle signatures where constraints
// are not identical.
//
// Otherwise the hash of t depends only on t's pointer identity.
func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
if h.sigTParams != nil {
i := t.Index()
if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
return 9173 + 3*uint32(i)
}
// hashTypeParam returns the hash of a type parameter.
func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
// Within the signature of a generic function, TypeParams are
// identical if they have the same index and constraint, so we
// hash them based on index.
//
// When we are outside a generic function, free TypeParams are
// identical iff they are the same object, so we can use a
// more discriminating hash consistent with object identity.
// This optimization saves [Map] about 4% when hashing all the
// types.Info.Types in the forward closure of net/http.
if !h.inGenericSig {
// Optimization: outside a generic function signature,
// use a more discrimating hash consistent with object identity.
return h.hashTypeName(t.Obj())
}
return h.hashPtr(t.Obj())
return 9173 + 3*uint32(t.Index())
}
// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
// pointers values are not dependent on the GC.
func (h Hasher) hashPtr(ptr any) uint32 {
if hash, ok := h.ptrMap[ptr]; ok {
return hash
}
hash := uint32(reflect.ValueOf(ptr).Pointer())
h.ptrMap[ptr] = hash
return hash
var theSeed = maphash.MakeSeed()
// hashTypeName hashes the pointer of tname.
func (hasher) hashTypeName(tname *types.TypeName) uint32 {
// Since types.Identical uses == to compare TypeNames,
// the Hash function uses maphash.Comparable.
// TODO(adonovan): or will, when it becomes available in go1.24.
// In the meantime we use the pointer's numeric value.
//
// hash := maphash.Comparable(theSeed, tname)
//
// (Another approach would be to hash the name and package
// path, and whether or not it is a package-level typename. It
// is rare for a package to define multiple local types with
// the same name.)
hash := uintptr(unsafe.Pointer(tname))
return uint32(hash ^ (hash >> 32))
}
// shallowHash computes a hash of t without looking at any of its
@ -454,7 +404,7 @@ func (h Hasher) hashPtr(ptr any) uint32 {
// include m itself; there is no mention of the named type X that
// might help us break the cycle.
// (See comment in go/types.identical, case *Interface, for more.)
func (h Hasher) shallowHash(t types.Type) uint32 {
func (h hasher) shallowHash(t types.Type) uint32 {
// t is the type of an interface method (Signature),
// its params or results (Tuples), or their immediate
// elements (mostly Slice, Pointer, Basic, Named),
@ -475,7 +425,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
case *types.Tuple:
n := t.Len()
hash := 9137 + 2*uint32(n)
for i := 0; i < n; i++ {
for i := range n {
hash += 53471161 * h.shallowHash(t.At(i).Type())
}
return hash
@ -508,10 +458,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
return 9127
case *types.Named:
return h.hashPtr(t.Obj())
return h.hashTypeName(t.Obj())
case *types.TypeParam:
return h.hashPtr(t.Obj())
return h.hashTypeParam(t)
}
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
}

View file

@ -2,52 +2,183 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
// This file implements FindExportData.
// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
// This file also additionally implements FindExportData for gcexportdata.NewReader.
package gcimporter
import (
"bufio"
"bytes"
"errors"
"fmt"
"go/build"
"io"
"strconv"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 16+12+6+6+8+10+2)
_, err = io.ReadFull(r, hdr)
if err != nil {
return
}
// leave for debugging
if false {
fmt.Printf("header: %s", hdr)
}
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
length, err := strconv.Atoi(s)
size = int64(length)
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
err = fmt.Errorf("invalid archive header")
return
}
name = strings.TrimSpace(string(hdr[:16]))
return
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying cmd/compile created archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function.
// The size result is the length of the export data in bytes.
// This returns the length of the export data in bytes.
//
// This function is needed by [gcexportdata.Read], which must
// accept inputs produced by the last two releases of cmd/compile,
// plus tip.
func FindExportData(r *bufio.Reader) (size int64, err error) {
arsize, err := FindPackageDefinition(r)
if err != nil {
return
}
size = int64(arsize)
objapi, headers, err := ReadObjectHeaders(r)
if err != nil {
return
}
size -= int64(len(objapi))
for _, h := range headers {
size -= int64(len(h))
}
// Check for the binary export data section header "$$B\n".
// TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
line, err := r.ReadSlice('\n')
if err != nil {
return
}
hdr := string(line)
if hdr != "$$B\n" {
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
size -= int64(len(hdr))
// For files with a binary export data header "$$B\n",
// these are always terminated by an end-of-section marker "\n$$\n".
// So the last bytes must always be this constant.
//
// The end-of-section marker is not a part of the export data itself.
// Do not include these in size.
//
// It would be nice to have sanity check that the final bytes after
// the export data are indeed the end-of-section marker. The split
// of gcexportdata.NewReader and gcexportdata.Read make checking this
// ugly so gcimporter gives up enforcing this. The compiler and go/types
// importer do enforce this, which seems good enough.
const endofsection = "\n$$\n"
size -= int64(len(endofsection))
if size < 0 {
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
return
}
return
}
// ReadUnified reads the contents of the unified export data from a reader r
// that contains the contents of a GC-created archive file.
//
// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
//
// Supported GC-created archive files have 4 layers of nesting:
// - An archive file containing a package definition file.
// - The package definition file contains headers followed by a data section.
// Headers are lines (≤ 4kb) that do not start with "$$".
// - The data section starts with "$$B\n" followed by export data followed
// by an end of section marker "\n$$\n". (The section start "$$\n" is no
// longer supported.)
// - The export data starts with a format byte ('u') followed by the <data> in
// the given format. (See ReadExportDataHeader for older formats.)
//
// Putting this together, the bytes in a GC-created archive files are expected
// to look like the following.
// See cmd/internal/archive for more details on ar file headers.
//
// | <!arch>\n | ar file signature
// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
// | go object <...>\n | objabi header
// | <optional headers>\n | other headers such as build id
// | $$B\n | binary format marker
// | u<data>\n | unified export <data>
// | $$\n | end-of-section marker
// | [optional padding] | padding byte (0x0A) if size is odd
// | [ar file header] | other ar files
// | [ar file data] |
func ReadUnified(r *bufio.Reader) (data []byte, err error) {
// We historically guaranteed headers at the default buffer size (4096) work.
// This ensures we can use ReadSlice throughout.
const minBufferSize = 4096
r = bufio.NewReaderSize(r, minBufferSize)
size, err := FindPackageDefinition(r)
if err != nil {
return
}
n := size
objapi, headers, err := ReadObjectHeaders(r)
if err != nil {
return
}
n -= len(objapi)
for _, h := range headers {
n -= len(h)
}
hdrlen, err := ReadExportDataHeader(r)
if err != nil {
return
}
n -= hdrlen
// size also includes the end of section marker. Remove that many bytes from the end.
const marker = "\n$$\n"
n -= len(marker)
if n < 0 {
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
return
}
// Read n bytes from buf.
data = make([]byte, n)
_, err = io.ReadFull(r, data)
if err != nil {
return
}
// Check for marker at the end.
var suffix [len(marker)]byte
_, err = io.ReadFull(r, suffix[:])
if err != nil {
return
}
if s := string(suffix[:]); s != marker {
err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
return
}
return
}
// FindPackageDefinition positions the reader r at the beginning of a package
// definition file ("__.PKGDEF") within a GC-created archive by reading
// from it, and returns the size of the package definition file in the archive.
//
// The reader must be positioned at the start of the archive file before calling
// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
//
// See cmd/internal/archive for details on the archive format.
func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
// Uses ReadSlice to limit risk of malformed inputs.
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
@ -61,56 +192,230 @@ func FindExportData(r *bufio.Reader) (size int64, err error) {
return
}
// Archive file. Scan to __.PKGDEF.
var name string
if name, size, err = readGopackHeader(r); err != nil {
return
}
arsize := size
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
err = fmt.Errorf("not a Go object file")
return
}
// Skip over object headers to get to the export data section header "$$B\n".
// Object headers are lines that do not start with '$'.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
}
// Check for the binary export data section header "$$B\n".
hdr := string(line)
if hdr != "$$B\n" {
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
// TODO(taking): Remove end-of-section marker "\n$$\n" from size.
if size < 0 {
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
// package export block should be first
size = readArchiveHeader(r, "__.PKGDEF")
if size <= 0 {
err = fmt.Errorf("not a package file")
return
}
return
}
// ReadObjectHeaders reads object headers from the reader. Object headers are
// lines that do not start with an end-of-section marker "$$". The first header
// is the objabi header. On success, the reader will be positioned at the beginning
// of the end-of-section marker.
//
// It returns an error if any header does not fit in r.Size() bytes.
func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
// line is a temporary buffer for headers.
// Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
var line []byte
// objapi header should be the first line
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
objapi = string(line)
// objapi header begins with "go object ".
if !strings.HasPrefix(objapi, "go object ") {
err = fmt.Errorf("not a go object file: %s", objapi)
return
}
// process remaining object header lines
for {
// check for an end of section marker "$$"
line, err = r.Peek(2)
if err != nil {
return
}
if string(line) == "$$" {
return // stop
}
// read next header
line, err = r.ReadSlice('\n')
if err != nil {
return
}
headers = append(headers, string(line))
}
}
// ReadExportDataHeader reads the export data header and format from r.
// It returns the number of bytes read, or an error if the format is no longer
// supported or it failed to read.
//
// The only currently supported format is binary export data in the
// unified export format.
func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
// Read export data header.
line, err := r.ReadSlice('\n')
if err != nil {
return
}
hdr := string(line)
switch hdr {
case "$$\n":
err = fmt.Errorf("old textual export format no longer supported (recompile package)")
return
case "$$B\n":
var format byte
format, err = r.ReadByte()
if err != nil {
return
}
// The unified export format starts with a 'u'.
switch format {
case 'u':
default:
// Older no longer supported export formats include:
// indexed export format which started with an 'i'; and
// the older binary export format which started with a 'c',
// 'd', or 'v' (from "version").
err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
return
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
n = len(hdr) + 1 // + 1 is for 'u'
return
}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
//
// FindPkg is only used in tests within x/tools.
func FindPkg(path, srcDir string) (filename, id string, err error) {
// TODO(taking): Move internal/exportdata.FindPkg into its own file,
// and then this copy into a _test package.
if path == "" {
return "", "", errors.New("path is empty")
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
var bp *build.Package
bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
if bp.Goroot && bp.Dir != "" {
filename, err = lookupGorootExport(bp.Dir)
if err == nil {
_, err = os.Stat(filename)
}
if err == nil {
return filename, bp.ImportPath, nil
}
}
goto notfound
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
}
id = bp.ImportPath
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
f, statErr := os.Stat(filename)
if statErr == nil && !f.IsDir() {
return filename, id, nil
}
if err == nil {
err = statErr
}
}
notfound:
if err == nil {
return "", path, fmt.Errorf("can't find import: %q", path)
}
return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
}
var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
var exportMap sync.Map // package dir → func() (string, error)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
//
// lookupGorootExport is only used in tests within x/tools.
func lookupGorootExport(pkgDir string) (string, error) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
err error
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
listOnce.Do(func() {
cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
var output []byte
output, err = cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
err = errors.New(string(ee.Stderr))
}
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
return
}
exportPath = exports[0]
})
return exportPath, err
})
}
return f.(func() (string, error))()
}

View file

@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
"bufio"
"bytes"
"fmt"
"go/build"
"go/token"
"go/types"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
const (
@ -45,127 +39,14 @@ const (
trace = false
)
var exportMap sync.Map // package dir → func() (string, bool)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
func lookupGorootExport(pkgDir string) (string, bool) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
listOnce.Do(func() {
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
var output []byte
output, err := cmd.Output()
if err != nil {
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
return
}
exportPath = exports[0]
})
return exportPath, exportPath != ""
})
}
return f.(func() (string, bool))()
}
var pkgExts = [...]string{".a", ".o"}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
// If no file was found, an empty filename is returned.
func FindPkg(path, srcDir string) (filename, id string) {
if path == "" {
return
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
var ok bool
if bp.Goroot && bp.Dir != "" {
filename, ok = lookupGorootExport(bp.Dir)
}
if !ok {
id = path // make sure we have an id to print in error message
return
}
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
id = bp.ImportPath
}
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
if filename != "" {
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
filename = "" // not found
return
}
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
// TODO(taking): Import is only used in tests. Move to gcimporter_test.
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
// Import is only used in tests.
func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
var id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
@ -184,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
rc = f
} else {
filename, id = FindPkg(path, srcDir)
var filename string
filename, id, err = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %q", id)
return nil, err
}
// no need to re-import if the package was imported completely before
@ -212,54 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
defer rc.Close()
var size int64
buf := bufio.NewReader(rc)
if size, err = FindExportData(buf); err != nil {
return
}
var data []byte
data, err = io.ReadAll(buf)
data, err := ReadUnified(buf)
if err != nil {
err = fmt.Errorf("import %q: %v", path, err)
return
}
if len(data) == 0 {
return nil, fmt.Errorf("no data to load a package from for path %s", id)
}
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// unified: emitted by cmd/compile since go1.20.
_, pkg, err = UImportData(fset, packages, data, id)
// Select appropriate importer.
switch data[0] {
case 'v', 'c', 'd':
// binary: emitted by cmd/compile till go1.10; obsolete.
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i':
// indexed: emitted by cmd/compile till go1.19;
// now used only for serializing go/types.
// See https://github.com/golang/go/issues/69491.
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'u':
// unified: emitted by cmd/compile since go1.20.
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
return
}
type byPath []*types.Package
func (a byPath) Len() int { return len(a) }
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }

View file

@ -5,8 +5,6 @@
// Indexed package import.
// See iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
package gcimporter
import (
@ -1111,3 +1109,9 @@ func (r *importReader) byte() byte {
}
return x
}
type byPath []*types.Package
func (a byPath) Len() int { return len(a) }
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }

View file

@ -0,0 +1,30 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcimporter
import (
"bufio"
"io"
"strconv"
"strings"
)
// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
func readArchiveHeader(b *bufio.Reader, name string) int {
// architecture-independent object file output
const HeaderSize = 60
var buf [HeaderSize]byte
if _, err := io.ReadFull(b, buf[:]); err != nil {
return -1
}
aname := strings.Trim(string(buf[0:16]), " ")
if !strings.HasPrefix(aname, name) {
return -1
}
asize := strings.Trim(string(buf[48:58]), " ")
i, _ := strconv.Atoi(asize)
return i
}

View file

@ -11,7 +11,6 @@ import (
"go/token"
"go/types"
"sort"
"strings"
"golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/pkgbits"
@ -71,7 +70,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
}
s := string(data)
s = s[:strings.LastIndex(s, "\n$$\n")]
input := pkgbits.NewPkgDecoder(path, s)
pkg = readUnifiedPackage(fset, nil, imports, input)
return
@ -266,7 +264,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
func (r *reader) doPkg() *types.Package {
path := r.String()
switch path {
case "":
// cmd/compile emits path="main" for main packages because
// that's the linker symbol prefix it used; but we need
// the package's path as it would be reported by go list,
// hence "main" below.
// See test at go/packages.TestMainPackagePathInModeTypes.
case "", "main":
path = r.p.PkgPath()
case "builtin":
return nil // universe

View file

@ -17,6 +17,7 @@ import (
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
@ -85,6 +86,28 @@ type Entry struct {
Names []string // exported names and information
}
// IndexDir is where the module index is stored.
var IndexDir string
// Set IndexDir
func init() {
var dir string
var err error
if testing.Testing() {
dir = os.TempDir()
} else {
dir, err = os.UserCacheDir()
// shouldn't happen, but TempDir is better than
// creating ./go/imports
if err != nil {
dir = os.TempDir()
}
}
dir = filepath.Join(dir, "go", "imports")
os.MkdirAll(dir, 0777)
IndexDir = dir
}
// ReadIndex reads the latest version of the on-disk index
// for the cache directory cd.
// It returns (nil, nil) if there is no index, but returns
@ -95,10 +118,7 @@ func ReadIndex(cachedir string) (*Index, error) {
return nil, err
}
cd := Abspath(cachedir)
dir, err := IndexDir()
if err != nil {
return nil, err
}
dir := IndexDir
base := indexNameBase(cd)
iname := filepath.Join(dir, base)
buf, err := os.ReadFile(iname)
@ -185,12 +205,8 @@ func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
// write the index as a text file
func writeIndex(cachedir Abspath, ix *Index) error {
dir, err := IndexDir()
if err != nil {
return err
}
ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
fd, err := os.CreateTemp(dir, ipat)
fd, err := os.CreateTemp(IndexDir, ipat)
if err != nil {
return err // can this happen?
}
@ -201,7 +217,7 @@ func writeIndex(cachedir Abspath, ix *Index) error {
content := fd.Name()
content = filepath.Base(content)
base := indexNameBase(cachedir)
nm := filepath.Join(dir, base)
nm := filepath.Join(IndexDir, base)
err = os.WriteFile(nm, []byte(content), 0666)
if err != nil {
return err
@ -241,18 +257,6 @@ func writeIndexToFile(x *Index, fd *os.File) error {
return nil
}
// tests can override this
var IndexDir = indexDir
// IndexDir computes the directory containing the index
func indexDir() (string, error) {
dir, err := os.UserCacheDir()
if err != nil {
return "", fmt.Errorf("cannot open UserCacheDir, %w", err)
}
return filepath.Join(dir, "go", "imports"), nil
}
// return the base name of the file containing the name of the current index
func indexNameBase(cachedir Abspath) string {
// crc64 is a way to convert path names into 16 hex digits.

View file

@ -16,6 +16,7 @@ type Candidate struct {
Dir string
ImportPath string
Type LexType
Deprecated bool
// information for Funcs
Results int16 // how many results
Sig []Field // arg names and types
@ -79,8 +80,9 @@ func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
Dir: string(e.Dir),
ImportPath: e.ImportPath,
Type: asLexType(flds[1][0]),
Deprecated: len(flds[1]) > 1 && flds[1][1] == 'D',
}
if flds[1] == "F" {
if px.Type == Func {
n, err := strconv.Atoi(flds[2])
if err != nil {
continue // should never happen
@ -111,6 +113,7 @@ func toFields(sig []string) []Field {
}
// benchmarks show this is measurably better than strings.Split
// split into first 4 fields separated by single space
func fastSplit(x string) []string {
ans := make([]string, 0, 4)
nxt := 0

View file

@ -19,12 +19,13 @@ import (
)
// The name of a symbol contains information about the symbol:
// <name> T for types
// <name> C for consts
// <name> V for vars
// <name> T for types, TD if the type is deprecated
// <name> C for consts, CD if the const is deprecated
// <name> V for vars, VD if the var is deprecated
// and for funcs: <name> F <num of return values> (<arg-name> <arg-type>)*
// any spaces in <arg-type> are replaced by $s so that the fields
// of the name are space separated
// of the name are space separated. F is replaced by FD if the func
// is deprecated.
type symbol struct {
pkg string // name of the symbols's package
name string // declared name
@ -41,7 +42,7 @@ func getSymbols(cd Abspath, dirs map[string][]*directory) {
d := vv[0]
g.Go(func() error {
thedir := filepath.Join(string(cd), string(d.path))
mode := parser.SkipObjectResolution
mode := parser.SkipObjectResolution | parser.ParseComments
fi, err := os.ReadDir(thedir)
if err != nil {
@ -84,6 +85,9 @@ func getFileExports(f *ast.File) []symbol {
// generic functions just like non-generic ones.
sig := dtype.Params
kind := "F"
if isDeprecated(decl.Doc) {
kind += "D"
}
result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
for _, x := range sig.List {
// This code creates a string representing the type.
@ -127,12 +131,16 @@ func getFileExports(f *ast.File) []symbol {
ans = append(ans, *s)
}
case *ast.GenDecl:
depr := isDeprecated(decl.Doc)
switch decl.Tok {
case token.CONST, token.VAR:
tp := "V"
if decl.Tok == token.CONST {
tp = "C"
}
if depr {
tp += "D"
}
for _, sp := range decl.Specs {
for _, x := range sp.(*ast.ValueSpec).Names {
if s := newsym(pkg, x.Name, tp, ""); s != nil {
@ -141,8 +149,12 @@ func getFileExports(f *ast.File) []symbol {
}
}
case token.TYPE:
tp := "T"
if depr {
tp += "D"
}
for _, sp := range decl.Specs {
if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
ans = append(ans, *s)
}
}
@ -160,6 +172,22 @@ func newsym(pkg, name, kind, sig string) *symbol {
return &sym
}
func isDeprecated(doc *ast.CommentGroup) bool {
if doc == nil {
return false
}
// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
// This code fails for /* Deprecated: */, but it's the code from
// gopls/internal/analysis/deprecated
lines := strings.Split(doc.Text(), "\n\n")
for _, line := range lines {
if strings.HasPrefix(line, "Deprecated:") {
return true
}
}
return false
}
// return the package name and the value for the symbols.
// if there are multiple packages, choose one arbitrarily
// the returned slice is sorted lexicographically

View file

@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
{"ErrTooLarge", Var, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
{"FieldsFuncSeq", Func, 24},
{"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
{"Lines", Func, 24},
{"Map", Func, 0},
{"MinRead", Const, 0},
{"NewBuffer", Func, 0},
@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
{"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
{"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
{"NewCTR", Func, 0},
{"NewGCM", Func, 2},
{"NewGCMWithNonceSize", Func, 5},
{"NewGCMWithRandomNonce", Func, 24},
{"NewGCMWithTagSize", Func, 11},
{"NewOFB", Func, 0},
{"Stream", Type, 0},
@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
{"Unmarshal", Func, 0},
{"UnmarshalCompressed", Func, 15},
},
"crypto/fips140": {
{"Enabled", Func, 24},
},
"crypto/hkdf": {
{"Expand", Func, 24},
{"Extract", Func, 24},
{"Key", Func, 24},
},
"crypto/hmac": {
{"Equal", Func, 1},
{"New", Func, 0},
@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
{"Size", Const, 0},
{"Sum", Func, 2},
},
"crypto/mlkem": {
{"(*DecapsulationKey1024).Bytes", Method, 24},
{"(*DecapsulationKey1024).Decapsulate", Method, 24},
{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
{"(*DecapsulationKey768).Bytes", Method, 24},
{"(*DecapsulationKey768).Decapsulate", Method, 24},
{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
{"(*EncapsulationKey1024).Bytes", Method, 24},
{"(*EncapsulationKey1024).Encapsulate", Method, 24},
{"(*EncapsulationKey768).Bytes", Method, 24},
{"(*EncapsulationKey768).Encapsulate", Method, 24},
{"CiphertextSize1024", Const, 24},
{"CiphertextSize768", Const, 24},
{"DecapsulationKey1024", Type, 24},
{"DecapsulationKey768", Type, 24},
{"EncapsulationKey1024", Type, 24},
{"EncapsulationKey768", Type, 24},
{"EncapsulationKeySize1024", Const, 24},
{"EncapsulationKeySize768", Const, 24},
{"GenerateKey1024", Func, 24},
{"GenerateKey768", Func, 24},
{"NewDecapsulationKey1024", Func, 24},
{"NewDecapsulationKey768", Func, 24},
{"NewEncapsulationKey1024", Func, 24},
{"NewEncapsulationKey768", Func, 24},
{"SeedSize", Const, 24},
{"SharedKeySize", Const, 24},
},
"crypto/pbkdf2": {
{"Key", Func, 24},
},
"crypto/rand": {
{"Int", Func, 0},
{"Prime", Func, 0},
{"Read", Func, 0},
{"Reader", Var, 0},
{"Text", Func, 24},
},
"crypto/rc4": {
{"(*Cipher).Reset", Method, 0},
@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
{"Sum224", Func, 2},
{"Sum256", Func, 2},
},
"crypto/sha3": {
{"(*SHA3).AppendBinary", Method, 24},
{"(*SHA3).BlockSize", Method, 24},
{"(*SHA3).MarshalBinary", Method, 24},
{"(*SHA3).Reset", Method, 24},
{"(*SHA3).Size", Method, 24},
{"(*SHA3).Sum", Method, 24},
{"(*SHA3).UnmarshalBinary", Method, 24},
{"(*SHA3).Write", Method, 24},
{"(*SHAKE).AppendBinary", Method, 24},
{"(*SHAKE).BlockSize", Method, 24},
{"(*SHAKE).MarshalBinary", Method, 24},
{"(*SHAKE).Read", Method, 24},
{"(*SHAKE).Reset", Method, 24},
{"(*SHAKE).UnmarshalBinary", Method, 24},
{"(*SHAKE).Write", Method, 24},
{"New224", Func, 24},
{"New256", Func, 24},
{"New384", Func, 24},
{"New512", Func, 24},
{"NewCSHAKE128", Func, 24},
{"NewCSHAKE256", Func, 24},
{"NewSHAKE128", Func, 24},
{"NewSHAKE256", Func, 24},
{"SHA3", Type, 24},
{"SHAKE", Type, 24},
{"Sum224", Func, 24},
{"Sum256", Func, 24},
{"Sum384", Func, 24},
{"Sum512", Func, 24},
{"SumSHAKE128", Func, 24},
{"SumSHAKE256", Func, 24},
},
"crypto/sha512": {
{"BlockSize", Const, 0},
{"New", Func, 0},
@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConstantTimeEq", Func, 0},
{"ConstantTimeLessOrEq", Func, 2},
{"ConstantTimeSelect", Func, 0},
{"WithDataIndependentTiming", Func, 24},
{"XORBytes", Func, 20},
},
"crypto/tls": {
@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
{"ClientHelloInfo", Type, 4},
{"ClientHelloInfo.CipherSuites", Field, 4},
{"ClientHelloInfo.Conn", Field, 8},
{"ClientHelloInfo.Extensions", Field, 24},
{"ClientHelloInfo.ServerName", Field, 4},
{"ClientHelloInfo.SignatureSchemes", Field, 8},
{"ClientHelloInfo.SupportedCurves", Field, 4},
@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
{"Config.EncryptedClientHelloConfigList", Field, 23},
{"Config.EncryptedClientHelloKeys", Field, 24},
{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{
{"ECHRejectionError", Type, 23},
{"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
{"EncryptedClientHelloKey", Type, 24},
{"EncryptedClientHelloKey.Config", Field, 24},
{"EncryptedClientHelloKey.PrivateKey", Field, 24},
{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
{"LoadX509KeyPair", Func, 0},
@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
{"VersionTLS12", Const, 2},
{"VersionTLS13", Const, 12},
{"X25519", Const, 8},
{"X25519MLKEM768", Const, 24},
{"X509KeyPair", Func, 0},
},
"crypto/x509": {
@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{
{"(ConstraintViolationError).Error", Method, 0},
{"(HostnameError).Error", Method, 0},
{"(InsecureAlgorithmError).Error", Method, 6},
{"(OID).AppendBinary", Method, 24},
{"(OID).AppendText", Method, 24},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
{"(OID).MarshalBinary", Method, 23},
@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.Extensions", Field, 2},
{"Certificate.ExtraExtensions", Field, 2},
{"Certificate.IPAddresses", Field, 1},
{"Certificate.InhibitAnyPolicy", Field, 24},
{"Certificate.InhibitAnyPolicyZero", Field, 24},
{"Certificate.InhibitPolicyMapping", Field, 24},
{"Certificate.InhibitPolicyMappingZero", Field, 24},
{"Certificate.IsCA", Field, 0},
{"Certificate.Issuer", Field, 0},
{"Certificate.IssuingCertificateURL", Field, 2},
@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.PermittedURIDomains", Field, 10},
{"Certificate.Policies", Field, 22},
{"Certificate.PolicyIdentifiers", Field, 0},
{"Certificate.PolicyMappings", Field, 24},
{"Certificate.PublicKey", Field, 0},
{"Certificate.PublicKeyAlgorithm", Field, 0},
{"Certificate.Raw", Field, 0},
@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.RawSubject", Field, 0},
{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
{"Certificate.RawTBSCertificate", Field, 0},
{"Certificate.RequireExplicitPolicy", Field, 24},
{"Certificate.RequireExplicitPolicyZero", Field, 24},
{"Certificate.SerialNumber", Field, 0},
{"Certificate.Signature", Field, 0},
{"Certificate.SignatureAlgorithm", Field, 0},
@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
{"NameConstraintsWithoutSANs", Const, 10},
{"NameMismatch", Const, 8},
{"NewCertPool", Func, 0},
{"NoValidChains", Const, 24},
{"NotAuthorizedToSign", Const, 0},
{"OID", Type, 22},
{"OIDFromInts", Func, 22},
@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{
{"ParsePKCS8PrivateKey", Func, 0},
{"ParsePKIXPublicKey", Func, 0},
{"ParseRevocationList", Func, 19},
{"PolicyMapping", Type, 24},
{"PolicyMapping.IssuerDomainPolicy", Field, 24},
{"PolicyMapping.SubjectDomainPolicy", Field, 24},
{"PublicKeyAlgorithm", Type, 0},
{"PureEd25519", Const, 13},
{"RSA", Const, 0},
@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
{"UnknownPublicKeyAlgorithm", Const, 0},
{"UnknownSignatureAlgorithm", Const, 0},
{"VerifyOptions", Type, 0},
{"VerifyOptions.CertificatePolicies", Field, 24},
{"VerifyOptions.CurrentTime", Field, 0},
{"VerifyOptions.DNSName", Field, 0},
{"VerifyOptions.Intermediates", Field, 0},
@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*File).DynString", Method, 1},
{"(*File).DynValue", Method, 21},
{"(*File).DynamicSymbols", Method, 4},
{"(*File).DynamicVersionNeeds", Method, 24},
{"(*File).DynamicVersions", Method, 24},
{"(*File).ImportedLibraries", Method, 0},
{"(*File).ImportedSymbols", Method, 0},
{"(*File).Section", Method, 0},
@ -2240,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{
{"DynFlag", Type, 0},
{"DynFlag1", Type, 21},
{"DynTag", Type, 0},
{"DynamicVersion", Type, 24},
{"DynamicVersion.Deps", Field, 24},
{"DynamicVersion.Flags", Field, 24},
{"DynamicVersion.Index", Field, 24},
{"DynamicVersion.Name", Field, 24},
{"DynamicVersionDep", Type, 24},
{"DynamicVersionDep.Dep", Field, 24},
{"DynamicVersionDep.Flags", Field, 24},
{"DynamicVersionDep.Index", Field, 24},
{"DynamicVersionFlag", Type, 24},
{"DynamicVersionNeed", Type, 24},
{"DynamicVersionNeed.Name", Field, 24},
{"DynamicVersionNeed.Needs", Field, 24},
{"EI_ABIVERSION", Const, 0},
{"EI_CLASS", Const, 0},
{"EI_DATA", Const, 0},
@ -3726,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{
{"Symbol.Size", Field, 0},
{"Symbol.Value", Field, 0},
{"Symbol.Version", Field, 13},
{"Symbol.VersionIndex", Field, 24},
{"Symbol.VersionScope", Field, 24},
{"SymbolVersionScope", Type, 24},
{"Type", Type, 0},
{"VER_FLG_BASE", Const, 24},
{"VER_FLG_INFO", Const, 24},
{"VER_FLG_WEAK", Const, 24},
{"Version", Type, 0},
{"VersionScopeGlobal", Const, 24},
{"VersionScopeHidden", Const, 24},
{"VersionScopeLocal", Const, 24},
{"VersionScopeNone", Const, 24},
{"VersionScopeSpecific", Const, 24},
},
"debug/gosym": {
{"(*DecodingError).Error", Method, 0},
@ -4453,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{
{"FS", Type, 16},
},
"encoding": {
{"BinaryAppender", Type, 24},
{"BinaryMarshaler", Type, 2},
{"BinaryUnmarshaler", Type, 2},
{"TextAppender", Type, 24},
{"TextMarshaler", Type, 2},
{"TextUnmarshaler", Type, 2},
},
@ -5984,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{
{"(*Interface).Complete", Method, 5},
{"(*Interface).Embedded", Method, 5},
{"(*Interface).EmbeddedType", Method, 11},
{"(*Interface).EmbeddedTypes", Method, 24},
{"(*Interface).Empty", Method, 5},
{"(*Interface).ExplicitMethod", Method, 5},
{"(*Interface).ExplicitMethods", Method, 24},
{"(*Interface).IsComparable", Method, 18},
{"(*Interface).IsImplicit", Method, 18},
{"(*Interface).IsMethodSet", Method, 18},
{"(*Interface).MarkImplicit", Method, 18},
{"(*Interface).Method", Method, 5},
{"(*Interface).Methods", Method, 24},
{"(*Interface).NumEmbeddeds", Method, 5},
{"(*Interface).NumExplicitMethods", Method, 5},
{"(*Interface).NumMethods", Method, 5},
@ -6011,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*MethodSet).At", Method, 5},
{"(*MethodSet).Len", Method, 5},
{"(*MethodSet).Lookup", Method, 5},
{"(*MethodSet).Methods", Method, 24},
{"(*MethodSet).String", Method, 5},
{"(*Named).AddMethod", Method, 5},
{"(*Named).Method", Method, 5},
{"(*Named).Methods", Method, 24},
{"(*Named).NumMethods", Method, 5},
{"(*Named).Obj", Method, 5},
{"(*Named).Origin", Method, 18},
@ -6054,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Pointer).String", Method, 5},
{"(*Pointer).Underlying", Method, 5},
{"(*Scope).Child", Method, 5},
{"(*Scope).Children", Method, 24},
{"(*Scope).Contains", Method, 5},
{"(*Scope).End", Method, 5},
{"(*Scope).Innermost", Method, 5},
@ -6089,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*StdSizes).Offsetsof", Method, 5},
{"(*StdSizes).Sizeof", Method, 5},
{"(*Struct).Field", Method, 5},
{"(*Struct).Fields", Method, 24},
{"(*Struct).NumFields", Method, 5},
{"(*Struct).String", Method, 5},
{"(*Struct).Tag", Method, 5},
@ -6100,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{
{"(*Tuple).Len", Method, 5},
{"(*Tuple).String", Method, 5},
{"(*Tuple).Underlying", Method, 5},
{"(*Tuple).Variables", Method, 24},
{"(*TypeList).At", Method, 18},
{"(*TypeList).Len", Method, 18},
{"(*TypeList).Types", Method, 24},
{"(*TypeName).Exported", Method, 5},
{"(*TypeName).Id", Method, 5},
{"(*TypeName).IsAlias", Method, 9},
@ -6119,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*TypeParam).Underlying", Method, 18},
{"(*TypeParamList).At", Method, 18},
{"(*TypeParamList).Len", Method, 18},
{"(*TypeParamList).TypeParams", Method, 24},
{"(*Union).Len", Method, 18},
{"(*Union).String", Method, 18},
{"(*Union).Term", Method, 18},
{"(*Union).Terms", Method, 24},
{"(*Union).Underlying", Method, 18},
{"(*Var).Anonymous", Method, 5},
{"(*Var).Embedded", Method, 11},
@ -6392,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{
{"(*Hash).WriteByte", Method, 14},
{"(*Hash).WriteString", Method, 14},
{"Bytes", Func, 19},
{"Comparable", Func, 24},
{"Hash", Type, 14},
{"MakeSeed", Func, 14},
{"Seed", Type, 14},
{"String", Func, 19},
{"WriteComparable", Func, 24},
},
"html": {
{"EscapeString", Func, 0},
@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*JSONHandler).WithGroup", Method, 21},
{"(*Level).UnmarshalJSON", Method, 21},
{"(*Level).UnmarshalText", Method, 21},
{"(*LevelVar).AppendText", Method, 24},
{"(*LevelVar).Level", Method, 21},
{"(*LevelVar).MarshalText", Method, 21},
{"(*LevelVar).Set", Method, 21},
@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Attr).Equal", Method, 21},
{"(Attr).String", Method, 21},
{"(Kind).String", Method, 21},
{"(Level).AppendText", Method, 24},
{"(Level).Level", Method, 21},
{"(Level).MarshalJSON", Method, 21},
{"(Level).MarshalText", Method, 21},
@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{
{"Debug", Func, 21},
{"DebugContext", Func, 21},
{"Default", Func, 21},
{"DiscardHandler", Var, 24},
{"Duration", Func, 21},
{"DurationValue", Func, 21},
{"Error", Func, 21},
@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Float).Acc", Method, 5},
{"(*Float).Add", Method, 5},
{"(*Float).Append", Method, 5},
{"(*Float).AppendText", Method, 24},
{"(*Float).Cmp", Method, 5},
{"(*Float).Copy", Method, 5},
{"(*Float).Float32", Method, 5},
@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).And", Method, 0},
{"(*Int).AndNot", Method, 0},
{"(*Int).Append", Method, 6},
{"(*Int).AppendText", Method, 24},
{"(*Int).Binomial", Method, 0},
{"(*Int).Bit", Method, 0},
{"(*Int).BitLen", Method, 0},
@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).Xor", Method, 0},
{"(*Rat).Abs", Method, 0},
{"(*Rat).Add", Method, 0},
{"(*Rat).AppendText", Method, 24},
{"(*Rat).Cmp", Method, 0},
{"(*Rat).Denom", Method, 0},
{"(*Rat).Float32", Method, 4},
@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{
{"Zipf", Type, 0},
},
"math/rand/v2": {
{"(*ChaCha8).AppendBinary", Method, 24},
{"(*ChaCha8).MarshalBinary", Method, 22},
{"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
{"(*PCG).AppendBinary", Method, 24},
{"(*PCG).MarshalBinary", Method, 22},
{"(*PCG).Seed", Method, 22},
{"(*PCG).Uint64", Method, 22},
@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*UnixListener).SyscallConn", Method, 10},
{"(Flags).String", Method, 0},
{"(HardwareAddr).String", Method, 0},
{"(IP).AppendText", Method, 24},
{"(IP).DefaultMask", Method, 0},
{"(IP).Equal", Method, 0},
{"(IP).IsGlobalUnicast", Method, 0},
@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*MaxBytesError).Error", Method, 19},
{"(*ProtocolError).Error", Method, 0},
{"(*ProtocolError).Is", Method, 21},
{"(*Protocols).SetHTTP1", Method, 24},
{"(*Protocols).SetHTTP2", Method, 24},
{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
{"(*Request).AddCookie", Method, 0},
{"(*Request).BasicAuth", Method, 4},
{"(*Request).Clone", Method, 13},
@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{
{"(Header).Values", Method, 14},
{"(Header).Write", Method, 0},
{"(Header).WriteSubset", Method, 0},
{"(Protocols).HTTP1", Method, 24},
{"(Protocols).HTTP2", Method, 24},
{"(Protocols).String", Method, 24},
{"(Protocols).UnencryptedHTTP2", Method, 24},
{"AllowQuerySemicolons", Func, 17},
{"CanonicalHeaderKey", Func, 0},
{"Client", Type, 0},
@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{
{"FileSystem", Type, 0},
{"Flusher", Type, 0},
{"Get", Func, 0},
{"HTTP2Config", Type, 24},
{"HTTP2Config.CountError", Field, 24},
{"HTTP2Config.MaxConcurrentStreams", Field, 24},
{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
{"HTTP2Config.MaxReadFrameSize", Field, 24},
{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
{"HTTP2Config.PingTimeout", Field, 24},
{"HTTP2Config.SendPingTimeout", Field, 24},
{"HTTP2Config.WriteByteTimeout", Field, 24},
{"Handle", Func, 0},
{"HandleFunc", Func, 0},
{"Handler", Type, 0},
@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{
{"PostForm", Func, 0},
{"ProtocolError", Type, 0},
{"ProtocolError.ErrorString", Field, 0},
{"Protocols", Type, 24},
{"ProxyFromEnvironment", Func, 0},
{"ProxyURL", Func, 0},
{"PushOptions", Type, 8},
@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{
{"Server.ConnState", Field, 3},
{"Server.DisableGeneralOptionsHandler", Field, 20},
{"Server.ErrorLog", Field, 3},
{"Server.HTTP2", Field, 24},
{"Server.Handler", Field, 0},
{"Server.IdleTimeout", Field, 8},
{"Server.MaxHeaderBytes", Field, 0},
{"Server.Protocols", Field, 24},
{"Server.ReadHeaderTimeout", Field, 8},
{"Server.ReadTimeout", Field, 0},
{"Server.TLSConfig", Field, 0},
@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{
{"Transport.ExpectContinueTimeout", Field, 6},
{"Transport.ForceAttemptHTTP2", Field, 13},
{"Transport.GetProxyConnectHeader", Field, 16},
{"Transport.HTTP2", Field, 24},
{"Transport.IdleConnTimeout", Field, 7},
{"Transport.MaxConnsPerHost", Field, 11},
{"Transport.MaxIdleConns", Field, 7},
{"Transport.MaxIdleConnsPerHost", Field, 0},
{"Transport.MaxResponseHeaderBytes", Field, 7},
{"Transport.OnProxyConnectResponse", Field, 20},
{"Transport.Protocols", Field, 24},
{"Transport.Proxy", Field, 0},
{"Transport.ProxyConnectHeader", Field, 8},
{"Transport.ReadBufferSize", Field, 13},
@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*AddrPort).UnmarshalText", Method, 18},
{"(*Prefix).UnmarshalBinary", Method, 18},
{"(*Prefix).UnmarshalText", Method, 18},
{"(Addr).AppendBinary", Method, 24},
{"(Addr).AppendText", Method, 24},
{"(Addr).AppendTo", Method, 18},
{"(Addr).As16", Method, 18},
{"(Addr).As4", Method, 18},
@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Addr).WithZone", Method, 18},
{"(Addr).Zone", Method, 18},
{"(AddrPort).Addr", Method, 18},
{"(AddrPort).AppendBinary", Method, 24},
{"(AddrPort).AppendText", Method, 24},
{"(AddrPort).AppendTo", Method, 18},
{"(AddrPort).Compare", Method, 22},
{"(AddrPort).IsValid", Method, 18},
@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{
{"(AddrPort).Port", Method, 18},
{"(AddrPort).String", Method, 18},
{"(Prefix).Addr", Method, 18},
{"(Prefix).AppendBinary", Method, 24},
{"(Prefix).AppendText", Method, 24},
{"(Prefix).AppendTo", Method, 18},
{"(Prefix).Bits", Method, 18},
{"(Prefix).Contains", Method, 18},
@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Error).Temporary", Method, 6},
{"(*Error).Timeout", Method, 6},
{"(*Error).Unwrap", Method, 13},
{"(*URL).AppendBinary", Method, 24},
{"(*URL).EscapedFragment", Method, 15},
{"(*URL).EscapedPath", Method, 5},
{"(*URL).Hostname", Method, 8},
@ -8967,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{
{"(*ProcessState).SysUsage", Method, 0},
{"(*ProcessState).SystemTime", Method, 0},
{"(*ProcessState).UserTime", Method, 0},
{"(*Root).Close", Method, 24},
{"(*Root).Create", Method, 24},
{"(*Root).FS", Method, 24},
{"(*Root).Lstat", Method, 24},
{"(*Root).Mkdir", Method, 24},
{"(*Root).Name", Method, 24},
{"(*Root).Open", Method, 24},
{"(*Root).OpenFile", Method, 24},
{"(*Root).OpenRoot", Method, 24},
{"(*Root).Remove", Method, 24},
{"(*Root).Stat", Method, 24},
{"(*SyscallError).Error", Method, 0},
{"(*SyscallError).Timeout", Method, 10},
{"(*SyscallError).Unwrap", Method, 13},
@ -9060,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{
{"O_WRONLY", Const, 0},
{"Open", Func, 0},
{"OpenFile", Func, 0},
{"OpenInRoot", Func, 24},
{"OpenRoot", Func, 24},
{"PathError", Type, 0},
{"PathError.Err", Field, 0},
{"PathError.Op", Field, 0},
@ -9081,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{
{"Remove", Func, 0},
{"RemoveAll", Func, 0},
{"Rename", Func, 0},
{"Root", Type, 24},
{"SEEK_CUR", Const, 0},
{"SEEK_END", Const, 0},
{"SEEK_SET", Const, 0},
@ -9422,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{
{"Zero", Func, 0},
},
"regexp": {
{"(*Regexp).AppendText", Method, 24},
{"(*Regexp).Copy", Method, 6},
{"(*Regexp).Expand", Method, 0},
{"(*Regexp).ExpandString", Method, 0},
@ -9602,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*StackRecord).Stack", Method, 0},
{"(*TypeAssertionError).Error", Method, 0},
{"(*TypeAssertionError).RuntimeError", Method, 0},
{"(Cleanup).Stop", Method, 24},
{"AddCleanup", Func, 24},
{"BlockProfile", Func, 1},
{"BlockProfileRecord", Type, 1},
{"BlockProfileRecord.Count", Field, 1},
@ -9612,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{
{"Caller", Func, 0},
{"Callers", Func, 0},
{"CallersFrames", Func, 7},
{"Cleanup", Type, 24},
{"Compiler", Const, 0},
{"Error", Type, 0},
{"Frame", Type, 7},
@ -9974,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{
{"EqualFold", Func, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
{"FieldsFuncSeq", Func, 24},
{"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@ -9986,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
{"Lines", Func, 24},
{"Map", Func, 0},
{"NewReader", Func, 0},
{"NewReplacer", Func, 0},
@ -9997,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
{"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
{"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@ -16413,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{
{"ValueOf", Func, 0},
},
"testing": {
{"(*B).Chdir", Method, 24},
{"(*B).Cleanup", Method, 14},
{"(*B).Context", Method, 24},
{"(*B).Elapsed", Method, 20},
{"(*B).Error", Method, 0},
{"(*B).Errorf", Method, 0},
@ -16425,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).Helper", Method, 9},
{"(*B).Log", Method, 0},
{"(*B).Logf", Method, 0},
{"(*B).Loop", Method, 24},
{"(*B).Name", Method, 8},
{"(*B).ReportAllocs", Method, 1},
{"(*B).ReportMetric", Method, 13},
@ -16442,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).StopTimer", Method, 0},
{"(*B).TempDir", Method, 15},
{"(*F).Add", Method, 18},
{"(*F).Chdir", Method, 24},
{"(*F).Cleanup", Method, 18},
{"(*F).Context", Method, 24},
{"(*F).Error", Method, 18},
{"(*F).Errorf", Method, 18},
{"(*F).Fail", Method, 18},
@ -16463,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*F).TempDir", Method, 18},
{"(*M).Run", Method, 4},
{"(*PB).Next", Method, 3},
{"(*T).Chdir", Method, 24},
{"(*T).Cleanup", Method, 14},
{"(*T).Context", Method, 24},
{"(*T).Deadline", Method, 15},
{"(*T).Error", Method, 0},
{"(*T).Errorf", Method, 0},
@ -16954,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{
{"(Time).Add", Method, 0},
{"(Time).AddDate", Method, 0},
{"(Time).After", Method, 0},
{"(Time).AppendBinary", Method, 24},
{"(Time).AppendFormat", Method, 5},
{"(Time).AppendText", Method, 24},
{"(Time).Before", Method, 0},
{"(Time).Clock", Method, 0},
{"(Time).Compare", Method, 20},
@ -17428,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{
{"String", Func, 0},
{"StringData", Func, 0},
},
"weak": {
{"(Pointer).Value", Method, 24},
{"Make", Func, 24},
{"Pointer", Type, 24},
},
}

View file

@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool {
_, ok := types.Unalias(t).(*types.TypeParam)
return ok
}
// GenericAssignableTo is a generalization of types.AssignableTo that
// implements the following rule for uninstantiated generic types:
//
// If V and T are generic named types, then V is considered assignable to T if,
// for every possible instantiation of V[A_1, ..., A_N], the instantiation
// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
//
// If T has structural constraints, they must be satisfied by V.
//
// For example, consider the following type declarations:
//
// type Interface[T any] interface {
// Accept(T)
// }
//
// type Container[T any] struct {
// Element T
// }
//
// func (c Container[T]) Accept(t T) { c.Element = t }
//
// In this case, GenericAssignableTo reports that instantiations of Container
// are assignable to the corresponding instantiation of Interface.
func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
V = types.Unalias(V)
T = types.Unalias(T)
// If V and T are not both named, or do not have matching non-empty type
// parameter lists, fall back on types.AssignableTo.
VN, Vnamed := V.(*types.Named)
TN, Tnamed := T.(*types.Named)
if !Vnamed || !Tnamed {
return types.AssignableTo(V, T)
}
vtparams := VN.TypeParams()
ttparams := TN.TypeParams()
if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
return types.AssignableTo(V, T)
}
// V and T have the same (non-zero) number of type params. Instantiate both
// with the type parameters of V. This must always succeed for V, and will
// succeed for T if and only if the type set of each type parameter of V is a
// subset of the type set of the corresponding type parameter of T, meaning
// that every instantiation of V corresponds to a valid instantiation of T.
// Minor optimization: ensure we share a context across the two
// instantiations below.
if ctxt == nil {
ctxt = types.NewContext()
}
var targs []types.Type
for i := 0; i < vtparams.Len(); i++ {
targs = append(targs, vtparams.At(i))
}
vinst, err := types.Instantiate(ctxt, V, targs, true)
if err != nil {
panic("type parameters should satisfy their own constraints")
}
tinst, err := types.Instantiate(ctxt, T, targs, true)
if err != nil {
return false
}
return types.AssignableTo(vinst, tinst)
}

View file

@ -0,0 +1,46 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typesinternal
import (
"go/ast"
"go/types"
"strconv"
)
// FileQualifier returns a [types.Qualifier] function that qualifies
// imported symbols appropriately based on the import environment of a given
// file.
// If the same package is imported multiple times, the last appearance is
// recorded.
func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
// Construct mapping of import paths to their defined names.
// It is only necessary to look at renaming imports.
imports := make(map[string]string)
for _, imp := range f.Imports {
if imp.Name != nil && imp.Name.Name != "_" {
path, _ := strconv.Unquote(imp.Path.Value)
imports[path] = imp.Name.Name
}
}
// Define qualifier to replace full package paths with names of the imports.
return func(p *types.Package) string {
if p == nil || p == pkg {
return ""
}
if name, ok := imports[p.Path()]; ok {
if name == "." {
return ""
} else {
return name
}
}
// If there is no local renaming, fall back to the package name.
return p.Name()
}
}

View file

@ -11,6 +11,8 @@ import (
// ReceiverNamed returns the named type (if any) associated with the
// type of recv, which may be of the form N or *N, or aliases thereof.
// It also reports whether a Pointer was present.
//
// The named result may be nil in ill-typed code.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {

View file

@ -82,6 +82,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
type NamedOrAlias interface {
types.Type
Obj() *types.TypeName
// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
}
// TypeParams is a light shim around t.TypeParams().

View file

@ -9,62 +9,97 @@ import (
"go/ast"
"go/token"
"go/types"
"strconv"
"strings"
)
// ZeroString returns the string representation of the "zero" value of the type t.
// This string can be used on the right-hand side of an assignment where the
// left-hand side has that explicit type.
// Exception: This does not apply to tuples. Their string representation is
// informational only and cannot be used in an assignment.
// ZeroString returns the string representation of the zero value for any type t.
// The boolean result indicates whether the type is or contains an invalid type
// or a non-basic (constraint) interface type.
//
// Even for invalid input types, ZeroString may return a partially correct
// string representation. The caller should use the returned isValid boolean
// to determine the validity of the expression.
//
// When assigning to a wider type (such as 'any'), it's the caller's
// responsibility to handle any necessary type conversions.
//
// This string can be used on the right-hand side of an assignment where the
// left-hand side has that explicit type.
// References to named types are qualified by an appropriate (optional)
// qualifier function.
// Exception: This does not apply to tuples. Their string representation is
// informational only and cannot be used in an assignment.
//
// See [ZeroExpr] for a variant that returns an [ast.Expr].
func ZeroString(t types.Type, qf types.Qualifier) string {
func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return "false"
return "false", true
case t.Info()&types.IsNumeric != 0:
return "0"
return "0", true
case t.Info()&types.IsString != 0:
return `""`
return `""`, true
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return "nil"
return "nil", true
case t.Kind() == types.Invalid:
return "invalid", false
default:
panic(fmt.Sprint("ZeroString for unexpected type:", t))
panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return "nil"
case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
return "nil", true
case *types.Named, *types.Alias:
case *types.Interface:
if !t.IsMethodSet() {
return "invalid", false
}
return "nil", true
case *types.Named:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return types.TypeString(t, qf) + "{}"
return types.TypeString(t, qual) + "{}", true
default:
return ZeroString(under, qf)
return ZeroString(under, qual)
}
case *types.Alias:
switch t.Underlying().(type) {
case *types.Struct, *types.Array:
return types.TypeString(t, qual) + "{}", true
default:
// A type parameter can have alias but alias type's underlying type
// can never be a type parameter.
// Use types.Unalias to preserve the info of type parameter instead
// of call Underlying() going right through and get the underlying
// type of the type parameter which is always an interface.
return ZeroString(types.Unalias(t), qual)
}
case *types.Array, *types.Struct:
return types.TypeString(t, qf) + "{}"
return types.TypeString(t, qual) + "{}", true
case *types.TypeParam:
// Assumes func new is not shadowed.
return "*new(" + types.TypeString(t, qf) + ")"
return "*new(" + types.TypeString(t, qual) + ")", true
case *types.Tuple:
// Tuples are not normal values.
// We are currently format as "(t[0], ..., t[n])". Could be something else.
isValid := true
components := make([]string, t.Len())
for i := 0; i < t.Len(); i++ {
components[i] = ZeroString(t.At(i).Type(), qf)
comp, ok := ZeroString(t.At(i).Type(), qual)
components[i] = comp
isValid = isValid && ok
}
return "(" + strings.Join(components, ", ") + ")"
return "(" + strings.Join(components, ", ") + ")", isValid
case *types.Union:
// Variables of these types cannot be created, so it makes
@ -76,45 +111,72 @@ func ZeroString(t types.Type, qf types.Qualifier) string {
}
}
// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
// ZeroExpr is defined for types that are suitable for variables.
// It may panic for other types such as Tuple or Union.
// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
// The boolean result indicates whether the type is or contains an invalid type
// or a non-basic (constraint) interface type.
//
// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
// representation. The caller should use the returned isValid boolean to determine
// the validity of the expression.
//
// This function is designed for types suitable for variables and should not be
// used with Tuple or Union types.References to named types are qualified by an
// appropriate (optional) qualifier function.
//
// See [ZeroString] for a variant that returns a string.
func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return &ast.Ident{Name: "false"}
return &ast.Ident{Name: "false"}, true
case t.Info()&types.IsNumeric != 0:
return &ast.BasicLit{Kind: token.INT, Value: "0"}
return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
case t.Info()&types.IsString != 0:
return &ast.BasicLit{Kind: token.STRING, Value: `""`}
return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return ast.NewIdent("nil")
return ast.NewIdent("nil"), true
case t.Kind() == types.Invalid:
return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
default:
panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return ast.NewIdent("nil")
case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
return ast.NewIdent("nil"), true
case *types.Named, *types.Alias:
case *types.Interface:
if !t.IsMethodSet() {
return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
}
return ast.NewIdent("nil"), true
case *types.Named:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return &ast.CompositeLit{
Type: TypeExpr(f, pkg, typ),
}
Type: TypeExpr(t, qual),
}, true
default:
return ZeroExpr(f, pkg, under)
return ZeroExpr(under, qual)
}
case *types.Alias:
switch t.Underlying().(type) {
case *types.Struct, *types.Array:
return &ast.CompositeLit{
Type: TypeExpr(t, qual),
}, true
default:
return ZeroExpr(types.Unalias(t), qual)
}
case *types.Array, *types.Struct:
return &ast.CompositeLit{
Type: TypeExpr(f, pkg, typ),
}
Type: TypeExpr(t, qual),
}, true
case *types.TypeParam:
return &ast.StarExpr{ // *new(T)
@ -125,7 +187,7 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
ast.NewIdent(t.Obj().Name()),
},
},
}
}, true
case *types.Tuple:
// Unlike ZeroString, there is no ast.Expr can express tuple by
@ -157,16 +219,14 @@ func IsZeroExpr(expr ast.Expr) bool {
}
// TypeExpr returns syntax for the specified type. References to named types
// from packages other than pkg are qualified by an appropriate package name, as
// defined by the import environment of file.
// are qualified by an appropriate (optional) qualifier function.
// It may panic for types such as Tuple or Union.
func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
switch t := t.(type) {
case *types.Basic:
switch t.Kind() {
case types.UnsafePointer:
// TODO(hxjiang): replace the implementation with types.Qualifier.
return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
default:
return ast.NewIdent(t.Name())
}
@ -174,7 +234,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
case *types.Pointer:
return &ast.UnaryExpr{
Op: token.MUL,
X: TypeExpr(f, pkg, t.Elem()),
X: TypeExpr(t.Elem(), qual),
}
case *types.Array:
@ -183,18 +243,18 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
Kind: token.INT,
Value: fmt.Sprintf("%d", t.Len()),
},
Elt: TypeExpr(f, pkg, t.Elem()),
Elt: TypeExpr(t.Elem(), qual),
}
case *types.Slice:
return &ast.ArrayType{
Elt: TypeExpr(f, pkg, t.Elem()),
Elt: TypeExpr(t.Elem(), qual),
}
case *types.Map:
return &ast.MapType{
Key: TypeExpr(f, pkg, t.Key()),
Value: TypeExpr(f, pkg, t.Elem()),
Key: TypeExpr(t.Key(), qual),
Value: TypeExpr(t.Elem(), qual),
}
case *types.Chan:
@ -204,14 +264,14 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
}
return &ast.ChanType{
Dir: dir,
Value: TypeExpr(f, pkg, t.Elem()),
Value: TypeExpr(t.Elem(), qual),
}
case *types.Signature:
var params []*ast.Field
for i := 0; i < t.Params().Len(); i++ {
params = append(params, &ast.Field{
Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
Type: TypeExpr(t.Params().At(i).Type(), qual),
Names: []*ast.Ident{
{
Name: t.Params().At(i).Name(),
@ -226,7 +286,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
var returns []*ast.Field
for i := 0; i < t.Results().Len(); i++ {
returns = append(returns, &ast.Field{
Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
Type: TypeExpr(t.Results().At(i).Type(), qual),
})
}
return &ast.FuncType{
@ -238,23 +298,9 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
},
}
case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
switch t.Obj().Pkg() {
case pkg, nil:
return ast.NewIdent(t.Obj().Name())
}
pkgName := t.Obj().Pkg().Name()
// TODO(hxjiang): replace the implementation with types.Qualifier.
// If the file already imports the package under another name, use that.
for _, cand := range f.Imports {
if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
if cand.Name != nil && cand.Name.Name != "" {
pkgName = cand.Name.Name
}
}
}
if pkgName == "." {
case *types.TypeParam:
pkgName := qual(t.Obj().Pkg())
if pkgName == "" || t.Obj().Pkg() == nil {
return ast.NewIdent(t.Obj().Name())
}
return &ast.SelectorExpr{
@ -262,6 +308,36 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
Sel: ast.NewIdent(t.Obj().Name()),
}
// types.TypeParam also implements interface NamedOrAlias. To differentiate,
// case TypeParam need to be present before case NamedOrAlias.
// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
// NamedOrAlias.
case NamedOrAlias:
var expr ast.Expr = ast.NewIdent(t.Obj().Name())
if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
expr = &ast.SelectorExpr{
X: ast.NewIdent(pkgName),
Sel: expr.(*ast.Ident),
}
}
// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
// typesinternal.NamedOrAlias.
if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
var indices []ast.Expr
for i := range typeArgs.Len() {
indices = append(indices, TypeExpr(typeArgs.At(i), qual))
}
expr = &ast.IndexListExpr{
X: expr,
Indices: indices,
}
}
}
return expr
case *types.Struct:
return ast.NewIdent(t.String())
@ -269,9 +345,43 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
return ast.NewIdent(t.String())
case *types.Union:
// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
// Remove nil check when calling typesinternal.TypeExpr.
return nil
if t.Len() == 0 {
panic("Union type should have at least one term")
}
// Same as go/ast, the return expression will put last term in the
// Y field at topmost level of BinaryExpr.
// For union of type "float32 | float64 | int64", the structure looks
// similar to:
// {
// X: {
// X: float32,
// Op: |
// Y: float64,
// }
// Op: |,
// Y: int64,
// }
var union ast.Expr
for i := range t.Len() {
term := t.Term(i)
termExpr := TypeExpr(term.Type(), qual)
if term.Tilde() {
termExpr = &ast.UnaryExpr{
Op: token.TILDE,
X: termExpr,
}
}
if i == 0 {
union = termExpr
} else {
union = &ast.BinaryExpr{
X: union,
Op: token.OR,
Y: termExpr,
}
}
}
return union
case *types.Tuple:
panic("invalid input type types.Tuple")

9
vendor/modules.txt vendored
View file

@ -139,11 +139,8 @@ golang.org/x/crypto/ed25519
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/nacl/box
golang.org/x/crypto/nacl/secretbox
golang.org/x/crypto/poly1305
golang.org/x/crypto/salsa20/salsa
# golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329
# golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
## explicit; go 1.22.0
golang.org/x/exp/rand
# golang.org/x/mod v0.22.0
@ -152,7 +149,7 @@ golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.33.0
# golang.org/x/net v0.34.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/http/httpguts
@ -183,7 +180,7 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# golang.org/x/tools v0.28.0
# golang.org/x/tools v0.29.0
## explicit; go 1.22.0
golang.org/x/tools/cover
golang.org/x/tools/go/ast/astutil