Update deps

This commit is contained in:
Frank Denis 2018-05-16 11:39:59 +02:00
parent 281c2fa7f4
commit 49e5c87f8d
22 changed files with 685 additions and 296 deletions

14
Gopkg.lock generated
View file

@ -92,7 +92,7 @@
branch = "master" branch = "master"
name = "github.com/jedisct1/dlog" name = "github.com/jedisct1/dlog"
packages = ["."] packages = ["."]
revision = "08cef41542e5c78041f764962bea3d30dc5d4a64" revision = "d9d2d2575aef4e29f8a9b7d75a75a1d0b146f1af"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -110,7 +110,7 @@
branch = "master" branch = "master"
name = "github.com/jedisct1/go-minisign" name = "github.com/jedisct1/go-minisign"
packages = ["."] packages = ["."]
revision = "9db77cefece9267a416903f994c5dda9da849d65" revision = "f4dbde220b4f73d450949b9ba27fa941faa05a78"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -139,8 +139,8 @@
[[projects]] [[projects]]
name = "github.com/miekg/dns" name = "github.com/miekg/dns"
packages = ["."] packages = ["."]
revision = "eac804ceef194db2da6ee80c728d7658c8c805ff" revision = "e57bf427e68187a27e22adceac868350d7a7079b"
version = "v1.0.6" version = "v1.0.7"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -154,7 +154,7 @@
"poly1305", "poly1305",
"salsa20/salsa" "salsa20/salsa"
] ]
revision = "2d027ae1dddd4694d54f7a8b6cbe78dca8720226" revision = "1a580b3eff7814fc9b40602fd35256c63b50f491"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -170,7 +170,7 @@
"ipv4", "ipv4",
"ipv6" "ipv6"
] ]
revision = "f73e4c9ed3b7ebdd5f699a16a880c2b1994e50dd" revision = "2491c5de3490fced2f6cff376127c667efeed857"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -183,7 +183,7 @@
"windows/svc/eventlog", "windows/svc/eventlog",
"windows/svc/mgr" "windows/svc/mgr"
] ]
revision = "7dfd1290c7917b7ba22824b9d24954ab3002fe24" revision = "7c87d13f8e835d2fb3a70a2912c811ed0c1d241b"
[[projects]] [[projects]]
name = "golang.org/x/text" name = "golang.org/x/text"

View file

@ -8,7 +8,7 @@
"ed25519", "ed25519",
"ed25519/internal/edwards25519" "ed25519/internal/edwards25519"
] ]
revision = "2d027ae1dddd4694d54f7a8b6cbe78dca8720226" revision = "1a580b3eff7814fc9b40602fd35256c63b50f491"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"

View file

@ -7,8 +7,12 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"io/ioutil"
"net" "net"
"net/http"
"net/url"
"strings" "strings"
"time" "time"
) )
@ -16,6 +20,8 @@ import (
const dnsTimeout time.Duration = 2 * time.Second const dnsTimeout time.Duration = 2 * time.Second
const tcpIdleTimeout time.Duration = 8 * time.Second const tcpIdleTimeout time.Duration = 8 * time.Second
const dohMimeType = "application/dns-udpwireformat"
// A Conn represents a connection to a DNS server. // A Conn represents a connection to a DNS server.
type Conn struct { type Conn struct {
net.Conn // a net.Conn holding the connection net.Conn // a net.Conn holding the connection
@ -37,6 +43,7 @@ type Client struct {
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
HTTPClient *http.Client // The http.Client to use for DNS-over-HTTPS
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight group singleflight
@ -134,6 +141,11 @@ func (c *Client) Dial(address string) (conn *Conn, err error) {
// attribute appropriately // attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight { if !c.SingleInflight {
if c.Net == "https" {
// TODO(tmthrgd): pipe timeouts into exchangeDOH
return c.exchangeDOH(context.TODO(), m, address)
}
return c.exchange(m, address) return c.exchange(m, address)
} }
@ -146,6 +158,11 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
cl = cl1 cl = cl1
} }
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
if c.Net == "https" {
// TODO(tmthrgd): pipe timeouts into exchangeDOH
return c.exchangeDOH(context.TODO(), m, address)
}
return c.exchange(m, address) return c.exchange(m, address)
}) })
if r != nil && shared { if r != nil && shared {
@ -191,6 +208,77 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
return r, rtt, err return r, rtt, err
} }
func (c *Client) exchangeDOH(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
p, err := m.Pack()
if err != nil {
return nil, 0, err
}
// TODO(tmthrgd): Allow the path to be customised?
u := &url.URL{
Scheme: "https",
Host: a,
Path: "/.well-known/dns-query",
}
if u.Port() == "443" {
u.Host = u.Hostname()
}
req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader(p))
if err != nil {
return nil, 0, err
}
req.Header.Set("Content-Type", dohMimeType)
req.Header.Set("Accept", dohMimeType)
t := time.Now()
hc := http.DefaultClient
if c.HTTPClient != nil {
hc = c.HTTPClient
}
if ctx != context.Background() && ctx != context.TODO() {
req = req.WithContext(ctx)
}
resp, err := hc.Do(req)
if err != nil {
return nil, 0, err
}
defer closeHTTPBody(resp.Body)
if resp.StatusCode != http.StatusOK {
return nil, 0, fmt.Errorf("dns: server returned HTTP %d error: %q", resp.StatusCode, resp.Status)
}
if ct := resp.Header.Get("Content-Type"); ct != dohMimeType {
return nil, 0, fmt.Errorf("dns: unexpected Content-Type %q; expected %q", ct, dohMimeType)
}
p, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, 0, err
}
rtt = time.Since(t)
r = new(Msg)
if err := r.Unpack(p); err != nil {
return r, 0, err
}
// TODO: TSIG? Is it even supported over DoH?
return r, rtt, nil
}
func closeHTTPBody(r io.ReadCloser) error {
io.Copy(ioutil.Discard, io.LimitReader(r, 8<<20))
return r.Close()
}
// ReadMsg reads a message from the connection co. // ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction signature // If the received message contains a TSIG record the transaction signature
// is verified. This method always tries to return the message, however if an // is verified. This method always tries to return the message, however if an
@ -490,6 +578,10 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
// context, if present. If there is both a context deadline and a configured // context, if present. If there is both a context deadline and a configured
// timeout on the client, the earliest of the two takes effect. // timeout on the client, the earliest of the two takes effect.
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight && c.Net == "https" {
return c.exchangeDOH(ctx, m, a)
}
var timeout time.Duration var timeout time.Duration
if deadline, ok := ctx.Deadline(); !ok { if deadline, ok := ctx.Deadline(); !ok {
timeout = 0 timeout = 0
@ -498,6 +590,7 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg,
} }
// not passing the context to the underlying calls, as the API does not support // not passing the context to the underlying calls, as the API does not support
// context. For timeouts you should set up Client.Dialer and call Client.Exchange. // context. For timeouts you should set up Client.Dialer and call Client.Exchange.
// TODO(tmthrgd): this is a race condition
c.Dialer = &net.Dialer{Timeout: timeout} c.Dialer = &net.Dialer{Timeout: timeout}
return c.Exchange(m, a) return c.Exchange(m, a)
} }

View file

@ -588,3 +588,25 @@ func TestConcurrentExchanges(t *testing.T) {
} }
} }
} }
func TestDoHExchange(t *testing.T) {
const addrstr = "dns.cloudflare.com:443"
m := new(Msg)
m.SetQuestion("miek.nl.", TypeSOA)
cl := &Client{Net: "https"}
r, _, err := cl.Exchange(m, addrstr)
if err != nil {
t.Fatalf("failed to exchange: %v", err)
}
if r == nil || r.Rcode != RcodeSuccess {
t.Errorf("failed to get an valid answer\n%v", r)
}
t.Log(r)
// TODO: proper tests for this
}

View file

@ -101,7 +101,8 @@ Names:
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n") fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR, initLen int) int {\n")
fmt.Fprint(b, "currentLen := initLen\n")
fmt.Fprint(b, "switch x := r.(type) {\n") fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range domainTypes { for _, name := range domainTypes {
o := scope.Lookup(name) o := scope.Lookup(name)
@ -109,7 +110,10 @@ Names:
fmt.Fprintf(b, "case *%s:\n", name) fmt.Fprintf(b, "case *%s:\n", name)
for i := 1; i < st.NumFields(); i++ { for i := 1; i < st.NumFields(); i++ {
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) } out := func(s string) {
fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
}
if _, ok := st.Field(i).Type().(*types.Slice); ok { if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) { switch st.Tag(i) {
@ -118,8 +122,12 @@ Names:
case `dns:"cdomain-name"`: case `dns:"cdomain-name"`:
// For HIP we need to slice over the elements in this slice. // For HIP we need to slice over the elements in this slice.
fmt.Fprintf(b, `for i := range x.%s { fmt.Fprintf(b, `for i := range x.%s {
compressionLenHelper(c, x.%s[i]) currentLen -= len(x.%s[i]) + 1
} }
`, st.Field(i).Name(), st.Field(i).Name())
fmt.Fprintf(b, `for i := range x.%s {
currentLen += compressionLenHelper(c, x.%s[i], currentLen)
}
`, st.Field(i).Name(), st.Field(i).Name()) `, st.Field(i).Name(), st.Field(i).Name())
} }
continue continue
@ -133,11 +141,11 @@ Names:
} }
} }
} }
fmt.Fprintln(b, "}\n}\n\n") fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
// compressionLenSearchType - search cdomain-tags types for compressible names. // compressionLenSearchType - search cdomain-tags types for compressible names.
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n") fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {\n")
fmt.Fprint(b, "switch x := r.(type) {\n") fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range cdomainTypes { for _, name := range cdomainTypes {
o := scope.Lookup(name) o := scope.Lookup(name)
@ -147,7 +155,7 @@ Names:
j := 1 j := 1
for i := 1; i < st.NumFields(); i++ { for i := 1; i < st.NumFields(); i++ {
out := func(s string, j int) { out := func(s string, j int) {
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name()) fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
} }
// There are no slice types with names that can be compressed. // There are no slice types with names that can be compressed.
@ -160,13 +168,15 @@ Names:
} }
k := "k1" k := "k1"
ok := "ok1" ok := "ok1"
sz := "sz1"
for i := 2; i < j; i++ { for i := 2; i < j; i++ {
k += fmt.Sprintf(" + k%d", i) k += fmt.Sprintf(" + k%d", i)
ok += fmt.Sprintf(" && ok%d", i) ok += fmt.Sprintf(" && ok%d", i)
sz += fmt.Sprintf(" + sz%d", i)
} }
fmt.Fprintf(b, "return %s, %s\n", k, ok) fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
} }
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n") fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
// gofmt // gofmt
res, err := format.Source(b.Bytes()) res, err := format.Source(b.Bytes())

10
vendor/github.com/miekg/dns/dns.go generated vendored
View file

@ -55,16 +55,6 @@ func (h *RR_Header) Header() *RR_Header { return h }
// Just to implement the RR interface. // Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil } func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) copyHeader() *RR_Header {
r := new(RR_Header)
r.Name = h.Name
r.Rrtype = h.Rrtype
r.Class = h.Class
r.Ttl = h.Ttl
r.Rdlength = h.Rdlength
return r
}
func (h *RR_Header) String() string { func (h *RR_Header) String() string {
var s string var s string

View file

@ -240,7 +240,7 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. // ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
c := &CDNSKEY{DNSKEY: *k} c := &CDNSKEY{DNSKEY: *k}
c.Hdr = *k.Hdr.copyHeader() c.Hdr = k.Hdr
c.Hdr.Rrtype = TypeCDNSKEY c.Hdr.Rrtype = TypeCDNSKEY
return c return c
} }
@ -248,7 +248,7 @@ func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
// ToCDS converts a DS record to a CDS record. // ToCDS converts a DS record to a CDS record.
func (d *DS) ToCDS() *CDS { func (d *DS) ToCDS() *CDS {
c := &CDS{DS: *d} c := &CDS{DS: *d}
c.Hdr = *d.Hdr.copyHeader() c.Hdr = d.Hdr
c.Hdr.Rrtype = TypeCDS c.Hdr.Rrtype = TypeCDS
return c return c
} }

View file

@ -29,6 +29,7 @@ func interestingGoroutines() (gs []string) {
strings.Contains(stack, "closeWriteAndWait") || strings.Contains(stack, "closeWriteAndWait") ||
strings.Contains(stack, "testing.Main(") || strings.Contains(stack, "testing.Main(") ||
strings.Contains(stack, "testing.(*T).Run(") || strings.Contains(stack, "testing.(*T).Run(") ||
strings.Contains(stack, "created by net/http.(*http2Transport).newClientConn") ||
// These only show up with GOTRACEBACK=2; Issue 5005 (comment 28) // These only show up with GOTRACEBACK=2; Issue 5005 (comment 28)
strings.Contains(stack, "runtime.goexit") || strings.Contains(stack, "runtime.goexit") ||
strings.Contains(stack, "created by runtime.gc") || strings.Contains(stack, "created by runtime.gc") ||

View file

@ -4,6 +4,8 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"net" "net"
"reflect"
"strings"
"testing" "testing"
) )
@ -52,6 +54,7 @@ func TestMsgCompressLength(t *testing.T) {
func TestMsgLength(t *testing.T) { func TestMsgLength(t *testing.T) {
makeMsg := func(question string, ans, ns, e []RR) *Msg { makeMsg := func(question string, ans, ns, e []RR) *Msg {
msg := new(Msg) msg := new(Msg)
msg.Compress = true
msg.SetQuestion(Fqdn(question), TypeANY) msg.SetQuestion(Fqdn(question), TypeANY)
msg.Answer = append(msg.Answer, ans...) msg.Answer = append(msg.Answer, ans...)
msg.Ns = append(msg.Ns, ns...) msg.Ns = append(msg.Ns, ns...)
@ -79,6 +82,92 @@ func TestMsgLength(t *testing.T) {
} }
} }
func TestCompressionLenHelper(t *testing.T) {
c := make(map[string]int)
compressionLenHelper(c, "example.com", 12)
if c["example.com"] != 12 {
t.Errorf("bad %d", c["example.com"])
}
if c["com"] != 20 {
t.Errorf("bad %d", c["com"])
}
// Test boundaries
c = make(map[string]int)
// foo label starts at 16379
// com label starts at 16384
compressionLenHelper(c, "foo.com", 16379)
if c["foo.com"] != 16379 {
t.Errorf("bad %d", c["foo.com"])
}
// com label is accessible
if c["com"] != 16383 {
t.Errorf("bad %d", c["com"])
}
c = make(map[string]int)
// foo label starts at 16379
// com label starts at 16385 => outside range
compressionLenHelper(c, "foo.com", 16380)
if c["foo.com"] != 16380 {
t.Errorf("bad %d", c["foo.com"])
}
// com label is NOT accessible
if c["com"] != 0 {
t.Errorf("bad %d", c["com"])
}
c = make(map[string]int)
compressionLenHelper(c, "example.com", 16375)
if c["example.com"] != 16375 {
t.Errorf("bad %d", c["example.com"])
}
// com starts AFTER 16384
if c["com"] != 16383 {
t.Errorf("bad %d", c["com"])
}
c = make(map[string]int)
compressionLenHelper(c, "example.com", 16376)
if c["example.com"] != 16376 {
t.Errorf("bad %d", c["example.com"])
}
// com starts AFTER 16384
if c["com"] != 0 {
t.Errorf("bad %d", c["com"])
}
}
func TestCompressionLenSearch(t *testing.T) {
c := make(map[string]int)
compressed, ok, fullSize := compressionLenSearch(c, "a.b.org.")
if compressed != 0 || ok || fullSize != 14 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
c["org."] = 3
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 4 || !ok || fullSize != 8 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
c["b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 6 || !ok || fullSize != 4 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
// Not found long compression
c["x.b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 6 || !ok || fullSize != 4 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
// Found long compression
c["a.b.org."] = 5
compressed, ok, fullSize = compressionLenSearch(c, "a.b.org.")
if compressed != 8 || !ok || fullSize != 0 {
panic(fmt.Errorf("Failed: compressed:=%d, ok:=%v, fullSize:=%d", compressed, ok, fullSize))
}
}
func TestMsgLength2(t *testing.T) { func TestMsgLength2(t *testing.T) {
// Serialized replies // Serialized replies
var testMessages = []string{ var testMessages = []string{
@ -159,7 +248,7 @@ func TestMsgCompressLengthLargeRecords(t *testing.T) {
msg.SetQuestion("my.service.acme.", TypeSRV) msg.SetQuestion("my.service.acme.", TypeSRV)
j := 1 j := 1
for i := 0; i < 250; i++ { for i := 0; i < 250; i++ {
target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", j, i) target := fmt.Sprintf("host-redis-1-%d.test.acme.com.node.dc1.consul.", i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target}) msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: 1, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", j, i)}) msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: 1, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", j, i)})
} }
@ -172,3 +261,111 @@ func TestMsgCompressLengthLargeRecords(t *testing.T) {
t.Fatalf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) t.Fatalf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
} }
} }
func TestCompareCompressionMapsForANY(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("a.service.acme.", TypeANY)
// Be sure to have more than 14bits
for i := 0; i < 2000; i++ {
target := fmt.Sprintf("host.app-%d.x%d.test.acme.", i%250, i)
msg.Answer = append(msg.Answer, &AAAA{Hdr: RR_Header{Name: target, Rrtype: TypeAAAA, Class: ClassINET, Ttl: 0x3c}, AAAA: net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, byte(i / 255), byte(i % 255)}})
msg.Answer = append(msg.Answer, &A{Hdr: RR_Header{Name: target, Rrtype: TypeA, Class: ClassINET, Ttl: 0x3c}, A: net.IP{127, 0, byte(i / 255), byte(i % 255)}})
if msg.Len() > 16384 {
break
}
}
for labelSize := 0; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("a%s.service.acme.", strings.Repeat("x", labelSize)), TypeANY)
compressionFake := make(map[string]int)
lenFake := compressedLenWithCompressionMap(msg, compressionFake)
compressionReal := make(map[string]int)
buf, err := msg.packBufferWithCompressionMap(nil, compressionReal)
if err != nil {
t.Fatal(err)
}
if lenFake != len(buf) {
t.Fatalf("padding= %d ; Predicted len := %d != real:= %d", labelSize, lenFake, len(buf))
}
if !reflect.DeepEqual(compressionFake, compressionReal) {
t.Fatalf("padding= %d ; Fake Compression Map != Real Compression Map\n*** Real:= %v\n\n***Fake:= %v", labelSize, compressionReal, compressionFake)
}
}
}
func TestCompareCompressionMapsForSRV(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("a.service.acme.", TypeSRV)
// Be sure to have more than 14bits
for i := 0; i < 2000; i++ {
target := fmt.Sprintf("host.app-%d.x%d.test.acme.", i%250, i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: ClassINET, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: target, Rrtype: TypeA, Class: ClassINET, Ttl: 0x3c}, A: net.IP{127, 0, byte(i / 255), byte(i % 255)}})
if msg.Len() > 16384 {
break
}
}
for labelSize := 0; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("a%s.service.acme.", strings.Repeat("x", labelSize)), TypeAAAA)
compressionFake := make(map[string]int)
lenFake := compressedLenWithCompressionMap(msg, compressionFake)
compressionReal := make(map[string]int)
buf, err := msg.packBufferWithCompressionMap(nil, compressionReal)
if err != nil {
t.Fatal(err)
}
if lenFake != len(buf) {
t.Fatalf("padding= %d ; Predicted len := %d != real:= %d", labelSize, lenFake, len(buf))
}
if !reflect.DeepEqual(compressionFake, compressionReal) {
t.Fatalf("padding= %d ; Fake Compression Map != Real Compression Map\n*** Real:= %v\n\n***Fake:= %v", labelSize, compressionReal, compressionFake)
}
}
}
func TestMsgCompressLengthLargeRecordsWithPaddingPermutation(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("my.service.acme.", TypeSRV)
for i := 0; i < 250; i++ {
target := fmt.Sprintf("host-redis-x-%d.test.acme.com.node.dc1.consul.", i)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: ClassINET, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.x.%d.", i)})
}
for labelSize := 1; labelSize < 63; labelSize++ {
msg.SetQuestion(fmt.Sprintf("my.%s.service.acme.", strings.Repeat("x", labelSize)), TypeSRV)
predicted := msg.Len()
buf, err := msg.Pack()
if err != nil {
t.Error(err)
}
if predicted != len(buf) {
t.Fatalf("padding= %d ; predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", labelSize, msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
}
func TestMsgCompressLengthLargeRecordsAllValues(t *testing.T) {
msg := new(Msg)
msg.Compress = true
msg.SetQuestion("redis.service.consul.", TypeSRV)
for i := 0; i < 900; i++ {
target := fmt.Sprintf("host-redis-%d-%d.test.acme.com.node.dc1.consul.", i/256, i%256)
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "redis.service.consul.", Class: 1, Rrtype: TypeSRV, Ttl: 0x3c}, Port: 0x4c57, Target: target})
msg.Extra = append(msg.Extra, &CNAME{Hdr: RR_Header{Name: target, Class: ClassINET, Rrtype: TypeCNAME, Ttl: 0x3c}, Target: fmt.Sprintf("fx.168.%d.%d.", i/256, i%256)})
predicted := msg.Len()
buf, err := msg.Pack()
if err != nil {
t.Error(err)
}
if predicted != len(buf) {
t.Fatalf("predicted compressed length is wrong for %d records: predicted %s (len=%d) %d, actual %d", i, msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
}
}
}

123
vendor/github.com/miekg/dns/msg.go generated vendored
View file

@ -691,18 +691,20 @@ func (dns *Msg) Pack() (msg []byte, err error) {
return dns.PackBuffer(nil) return dns.PackBuffer(nil)
} }
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small // PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
// a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig. var compression map[string]int
var (
dh Header
compression map[string]int
)
if dns.Compress { if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings compression = make(map[string]int) // Compression pointer mappings.
} }
return dns.packBufferWithCompressionMap(buf, compression)
}
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var dh Header
if dns.Rcode < 0 || dns.Rcode > 0xFFF { if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode return nil, ErrRcode
@ -714,12 +716,11 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
return nil, ErrExtendedRcode return nil, ErrExtendedRcode
} }
opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) opt.SetExtendedRcode(uint8(dns.Rcode >> 4))
dns.Rcode &= 0xF
} }
// Convert convenient Msg into wire-like Header. // Convert convenient Msg into wire-like Header.
dh.Id = dns.Id dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
if dns.Response { if dns.Response {
dh.Bits |= _QR dh.Bits |= _QR
} }
@ -922,23 +923,27 @@ func (dns *Msg) String() string {
// than packing it, measuring the size and discarding the buffer. // than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
// compressedLen returns the message length when in compressed wire format func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int {
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
l := 12 // Message header is always 12 bytes l := 12 // Message header is always 12 bytes
if compress {
compression := map[string]int{}
for _, r := range dns.Question { for _, r := range dns.Question {
compressionLenHelper(compression, r.Name, l)
l += r.len() l += r.len()
compressionLenHelper(compression, r.Name)
} }
l += compressionLenSlice(l, compression, dns.Answer) l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns) l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra) l += compressionLenSlice(l, compression, dns.Extra)
return l return l
}
// compressedLen returns the message length when in compressed wire format
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
if compress {
compression := map[string]int{}
return compressedLenWithCompressionMap(dns, compression)
} }
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question { for _, r := range dns.Question {
l += r.len() l += r.len()
@ -962,70 +967,94 @@ func compressedLen(dns *Msg, compress bool) int {
return l return l
} }
func compressionLenSlice(len int, c map[string]int, rs []RR) int { func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
var l int initLen := lenp
for _, r := range rs { for _, r := range rs {
if r == nil { if r == nil {
continue continue
} }
// track this length, and the global length in len, while taking compression into account for both. // TmpLen is to track len of record at 14bits boudaries
tmpLen := lenp
x := r.len() x := r.len()
l += x // track this length, and the global length in len, while taking compression into account for both.
len += x k, ok, _ := compressionLenSearch(c, r.Header().Name)
k, ok := compressionLenSearch(c, r.Header().Name)
if ok { if ok {
l += 1 - k // Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
len += 1 - k // so, basically x:= x - k - 1 + 2
x += 1 - k
} }
if len < maxCompressionOffset { tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
compressionLenHelper(c, r.Header().Name) k, ok, _ = compressionLenSearchType(c, r)
}
k, ok = compressionLenSearchType(c, r)
if ok { if ok {
l += 1 - k x += 1 - k
len += 1 - k
} }
lenp += x
tmpLen = lenp
tmpLen += compressionLenHelperType(c, r, tmpLen)
if len < maxCompressionOffset {
compressionLenHelperType(c, r)
} }
} return lenp - initLen
return l
} }
// Put the parts of the name in the compression map. // Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]int, s string) { func compressionLenHelper(c map[string]int, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
initLen := currentLen
pref := "" pref := ""
prev := s
lbs := Split(s) lbs := Split(s)
for j := len(lbs) - 1; j >= 0; j-- { for j := 0; j < len(lbs); j++ {
pref = s[lbs[j]:] pref = s[lbs[j]:]
currentLen += len(prev) - len(pref)
prev = pref
if _, ok := c[pref]; !ok { if _, ok := c[pref]; !ok {
c[pref] = len(pref) // If first byte label is within the first 14bits, it might be re-used later
if currentLen < maxCompressionOffset {
c[pref] = currentLen
}
} else {
added := currentLen - initLen
if j > 0 {
// We added a new PTR
added += 2
}
return added
} }
} }
return currentLen - initLen
} }
// Look for each part in the compression map and returns its length, // Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match. // keep on searching so we get the longest match.
func compressionLenSearch(c map[string]int, s string) (int, bool) { // Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
off := 0 off := 0
end := false end := false
if s == "" { // don't bork on bogus data if s == "" { // don't bork on bogus data
return 0, false return 0, false, 0
} }
fullSize := 0
for { for {
if _, ok := c[s[off:]]; ok { if _, ok := c[s[off:]]; ok {
return len(s[off:]), true return len(s[off:]), true, fullSize + off
} }
if end { if end {
break break
} }
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off) off, end = NextLabel(s, off)
} }
return 0, false return 0, false, fullSize + len(s)
} }
// Copy returns a new RR which is a deep-copy of r. // Copy returns a new RR which is a deep-copy of r.

View file

@ -26,6 +26,28 @@ var (
}) })
) )
func TestPackNoSideEffect(t *testing.T) {
m := new(Msg)
m.SetQuestion(Fqdn("example.com."), TypeNS)
a := new(Msg)
o := &OPT{
Hdr: RR_Header{
Name: ".",
Rrtype: TypeOPT,
},
}
o.SetUDPSize(DefaultMsgSize)
a.Extra = append(a.Extra, o)
a.SetRcode(m, RcodeBadVers)
a.Pack()
if a.Rcode != RcodeBadVers {
t.Errorf("after pack: Rcode is expected to be BADVERS")
}
}
func TestUnpackDomainName(t *testing.T) { func TestUnpackDomainName(t *testing.T) {
var cases = []struct { var cases = []struct {
label string label string

View file

@ -131,3 +131,11 @@ func TestNsec3(t *testing.T) {
} }
} }
} }
func TestNsec3EmptySalt(t *testing.T) {
rr, _ := NewRR("CK0POJMG874LJREF7EFN8430QVIT8BSM.com. 86400 IN NSEC3 1 1 0 - CK0Q1GIN43N1ARRC9OSM6QPQR81H5M9A NS SOA RRSIG DNSKEY NSEC3PARAM")
if !rr.(*NSEC3).Match("com.") {
t.Fatalf("expected record to match com. label")
}
}

View file

@ -56,8 +56,7 @@ func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() }
func (r *PrivateRR) copy() RR { func (r *PrivateRR) copy() RR {
// make new RR like this: // make new RR like this:
rr := mkPrivateRR(r.Hdr.Rrtype) rr := mkPrivateRR(r.Hdr.Rrtype)
newh := r.Hdr.copyHeader() rr.Hdr = r.Hdr
rr.Hdr = *newh
err := r.Data.Copy(rr.Data) err := r.Data.Copy(rr.Data)
if err != nil { if err != nil {

View file

@ -1255,8 +1255,10 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
if len(l.token) == 0 || l.err { if len(l.token) == 0 || l.err {
return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" return nil, &ParseError{f, "bad NSEC3 Salt", l}, ""
} }
if l.token != "-" {
rr.SaltLength = uint8(len(l.token)) / 2 rr.SaltLength = uint8(len(l.token)) / 2
rr.Salt = l.token rr.Salt = l.token
}
<-c <-c
l = <-c l = <-c
@ -1321,8 +1323,10 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin
rr.Iterations = uint16(i) rr.Iterations = uint16(i)
<-c <-c
l = <-c l = <-c
if l.token != "-" {
rr.SaltLength = uint8(len(l.token)) rr.SaltLength = uint8(len(l.token))
rr.Salt = l.token rr.Salt = l.token
}
return rr, nil, "" return rr, nil, ""
} }

View file

@ -13,7 +13,7 @@ import (
"time" "time"
) )
// Maximum number of TCP queries before we close the socket. // Default maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128 const maxTCPQueries = 128
// Interval for stop worker if no load // Interval for stop worker if no load
@ -303,6 +303,8 @@ type Server struct {
DecorateReader DecorateReader DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages. // DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter DecorateWriter DecorateWriter
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
MaxTCPQueries int
// UDP packet or TCP connection queue // UDP packet or TCP connection queue
queue chan *response queue chan *response
@ -593,8 +595,12 @@ func (srv *Server) serve(w *response) {
timeout := srv.getReadTimeout() timeout := srv.getReadTimeout()
// TODO(miek): make maxTCPQueries configurable? limit := srv.MaxTCPQueries
for q := 0; q < maxTCPQueries; q++ { if limit == 0 {
limit = maxTCPQueries
}
for q := 0; q < limit || limit == -1; q++ {
var err error var err error
w.msg, err = reader.ReadTCP(w.tcp, timeout) w.msg, err = reader.ReadTCP(w.tcp, timeout)
if err != nil { if err != nil {

View file

@ -226,7 +226,7 @@ func main() {
continue continue
} }
fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name)
fields := []string{"*rr.Hdr.copyHeader()"} fields := []string{"rr.Hdr"}
for i := 1; i < st.NumFields(); i++ { for i := 1; i < st.NumFields(); i++ {
f := st.Field(i).Name() f := st.Field(i).Name()
if sl, ok := st.Field(i).Type().(*types.Slice); ok { if sl, ok := st.Field(i).Type().(*types.Slice); ok {

View file

@ -3,7 +3,7 @@ package dns
import "fmt" import "fmt"
// Version is current version of this library. // Version is current version of this library.
var Version = V{1, 0, 6} var Version = V{1, 0, 7}
// V holds the version of this library. // V holds the version of this library.
type V struct { type V struct {

View file

@ -2,117 +2,154 @@
package dns package dns
func compressionLenHelperType(c map[string]int, r RR) { func compressionLenHelperType(c map[string]int, r RR, initLen int) int {
currentLen := initLen
switch x := r.(type) { switch x := r.(type) {
case *AFSDB: case *AFSDB:
compressionLenHelper(c, x.Hostname) currentLen -= len(x.Hostname) + 1
currentLen += compressionLenHelper(c, x.Hostname, currentLen)
case *CNAME: case *CNAME:
compressionLenHelper(c, x.Target) currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *DNAME: case *DNAME:
compressionLenHelper(c, x.Target) currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *HIP: case *HIP:
for i := range x.RendezvousServers { for i := range x.RendezvousServers {
compressionLenHelper(c, x.RendezvousServers[i]) currentLen -= len(x.RendezvousServers[i]) + 1
}
for i := range x.RendezvousServers {
currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen)
} }
case *KX: case *KX:
compressionLenHelper(c, x.Exchanger) currentLen -= len(x.Exchanger) + 1
currentLen += compressionLenHelper(c, x.Exchanger, currentLen)
case *LP: case *LP:
compressionLenHelper(c, x.Fqdn) currentLen -= len(x.Fqdn) + 1
currentLen += compressionLenHelper(c, x.Fqdn, currentLen)
case *MB: case *MB:
compressionLenHelper(c, x.Mb) currentLen -= len(x.Mb) + 1
currentLen += compressionLenHelper(c, x.Mb, currentLen)
case *MD: case *MD:
compressionLenHelper(c, x.Md) currentLen -= len(x.Md) + 1
currentLen += compressionLenHelper(c, x.Md, currentLen)
case *MF: case *MF:
compressionLenHelper(c, x.Mf) currentLen -= len(x.Mf) + 1
currentLen += compressionLenHelper(c, x.Mf, currentLen)
case *MG: case *MG:
compressionLenHelper(c, x.Mg) currentLen -= len(x.Mg) + 1
currentLen += compressionLenHelper(c, x.Mg, currentLen)
case *MINFO: case *MINFO:
compressionLenHelper(c, x.Rmail) currentLen -= len(x.Rmail) + 1
compressionLenHelper(c, x.Email) currentLen += compressionLenHelper(c, x.Rmail, currentLen)
currentLen -= len(x.Email) + 1
currentLen += compressionLenHelper(c, x.Email, currentLen)
case *MR: case *MR:
compressionLenHelper(c, x.Mr) currentLen -= len(x.Mr) + 1
currentLen += compressionLenHelper(c, x.Mr, currentLen)
case *MX: case *MX:
compressionLenHelper(c, x.Mx) currentLen -= len(x.Mx) + 1
currentLen += compressionLenHelper(c, x.Mx, currentLen)
case *NAPTR: case *NAPTR:
compressionLenHelper(c, x.Replacement) currentLen -= len(x.Replacement) + 1
currentLen += compressionLenHelper(c, x.Replacement, currentLen)
case *NS: case *NS:
compressionLenHelper(c, x.Ns) currentLen -= len(x.Ns) + 1
currentLen += compressionLenHelper(c, x.Ns, currentLen)
case *NSAPPTR: case *NSAPPTR:
compressionLenHelper(c, x.Ptr) currentLen -= len(x.Ptr) + 1
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
case *NSEC: case *NSEC:
compressionLenHelper(c, x.NextDomain) currentLen -= len(x.NextDomain) + 1
currentLen += compressionLenHelper(c, x.NextDomain, currentLen)
case *PTR: case *PTR:
compressionLenHelper(c, x.Ptr) currentLen -= len(x.Ptr) + 1
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
case *PX: case *PX:
compressionLenHelper(c, x.Map822) currentLen -= len(x.Map822) + 1
compressionLenHelper(c, x.Mapx400) currentLen += compressionLenHelper(c, x.Map822, currentLen)
currentLen -= len(x.Mapx400) + 1
currentLen += compressionLenHelper(c, x.Mapx400, currentLen)
case *RP: case *RP:
compressionLenHelper(c, x.Mbox) currentLen -= len(x.Mbox) + 1
compressionLenHelper(c, x.Txt) currentLen += compressionLenHelper(c, x.Mbox, currentLen)
currentLen -= len(x.Txt) + 1
currentLen += compressionLenHelper(c, x.Txt, currentLen)
case *RRSIG: case *RRSIG:
compressionLenHelper(c, x.SignerName) currentLen -= len(x.SignerName) + 1
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
case *RT: case *RT:
compressionLenHelper(c, x.Host) currentLen -= len(x.Host) + 1
currentLen += compressionLenHelper(c, x.Host, currentLen)
case *SIG: case *SIG:
compressionLenHelper(c, x.SignerName) currentLen -= len(x.SignerName) + 1
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
case *SOA: case *SOA:
compressionLenHelper(c, x.Ns) currentLen -= len(x.Ns) + 1
compressionLenHelper(c, x.Mbox) currentLen += compressionLenHelper(c, x.Ns, currentLen)
currentLen -= len(x.Mbox) + 1
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
case *SRV: case *SRV:
compressionLenHelper(c, x.Target) currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *TALINK: case *TALINK:
compressionLenHelper(c, x.PreviousName) currentLen -= len(x.PreviousName) + 1
compressionLenHelper(c, x.NextName) currentLen += compressionLenHelper(c, x.PreviousName, currentLen)
currentLen -= len(x.NextName) + 1
currentLen += compressionLenHelper(c, x.NextName, currentLen)
case *TKEY: case *TKEY:
compressionLenHelper(c, x.Algorithm) currentLen -= len(x.Algorithm) + 1
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
case *TSIG: case *TSIG:
compressionLenHelper(c, x.Algorithm) currentLen -= len(x.Algorithm) + 1
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
} }
return currentLen - initLen
} }
func compressionLenSearchType(c map[string]int, r RR) (int, bool) { func compressionLenSearchType(c map[string]int, r RR) (int, bool, int) {
switch x := r.(type) { switch x := r.(type) {
case *AFSDB: case *AFSDB:
k1, ok1 := compressionLenSearch(c, x.Hostname) k1, ok1, sz1 := compressionLenSearch(c, x.Hostname)
return k1, ok1 return k1, ok1, sz1
case *CNAME: case *CNAME:
k1, ok1 := compressionLenSearch(c, x.Target) k1, ok1, sz1 := compressionLenSearch(c, x.Target)
return k1, ok1 return k1, ok1, sz1
case *MB: case *MB:
k1, ok1 := compressionLenSearch(c, x.Mb) k1, ok1, sz1 := compressionLenSearch(c, x.Mb)
return k1, ok1 return k1, ok1, sz1
case *MD: case *MD:
k1, ok1 := compressionLenSearch(c, x.Md) k1, ok1, sz1 := compressionLenSearch(c, x.Md)
return k1, ok1 return k1, ok1, sz1
case *MF: case *MF:
k1, ok1 := compressionLenSearch(c, x.Mf) k1, ok1, sz1 := compressionLenSearch(c, x.Mf)
return k1, ok1 return k1, ok1, sz1
case *MG: case *MG:
k1, ok1 := compressionLenSearch(c, x.Mg) k1, ok1, sz1 := compressionLenSearch(c, x.Mg)
return k1, ok1 return k1, ok1, sz1
case *MINFO: case *MINFO:
k1, ok1 := compressionLenSearch(c, x.Rmail) k1, ok1, sz1 := compressionLenSearch(c, x.Rmail)
k2, ok2 := compressionLenSearch(c, x.Email) k2, ok2, sz2 := compressionLenSearch(c, x.Email)
return k1 + k2, ok1 && ok2 return k1 + k2, ok1 && ok2, sz1 + sz2
case *MR: case *MR:
k1, ok1 := compressionLenSearch(c, x.Mr) k1, ok1, sz1 := compressionLenSearch(c, x.Mr)
return k1, ok1 return k1, ok1, sz1
case *MX: case *MX:
k1, ok1 := compressionLenSearch(c, x.Mx) k1, ok1, sz1 := compressionLenSearch(c, x.Mx)
return k1, ok1 return k1, ok1, sz1
case *NS: case *NS:
k1, ok1 := compressionLenSearch(c, x.Ns) k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
return k1, ok1 return k1, ok1, sz1
case *PTR: case *PTR:
k1, ok1 := compressionLenSearch(c, x.Ptr) k1, ok1, sz1 := compressionLenSearch(c, x.Ptr)
return k1, ok1 return k1, ok1, sz1
case *RT: case *RT:
k1, ok1 := compressionLenSearch(c, x.Host) k1, ok1, sz1 := compressionLenSearch(c, x.Host)
return k1, ok1 return k1, ok1, sz1
case *SOA: case *SOA:
k1, ok1 := compressionLenSearch(c, x.Ns) k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
k2, ok2 := compressionLenSearch(c, x.Mbox) k2, ok2, sz2 := compressionLenSearch(c, x.Mbox)
return k1 + k2, ok1 && ok2 return k1 + k2, ok1 && ok2, sz1 + sz2
} }
return 0, false return 0, false, 0
} }

130
vendor/github.com/miekg/dns/ztypes.go generated vendored
View file

@ -649,215 +649,215 @@ func (rr *X25) len() int {
// copy() functions // copy() functions
func (rr *A) copy() RR { func (rr *A) copy() RR {
return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} return &A{rr.Hdr, copyIP(rr.A)}
} }
func (rr *AAAA) copy() RR { func (rr *AAAA) copy() RR {
return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
} }
func (rr *AFSDB) copy() RR { func (rr *AFSDB) copy() RR {
return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
} }
func (rr *ANY) copy() RR { func (rr *ANY) copy() RR {
return &ANY{*rr.Hdr.copyHeader()} return &ANY{rr.Hdr}
} }
func (rr *AVC) copy() RR { func (rr *AVC) copy() RR {
Txt := make([]string, len(rr.Txt)) Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt) copy(Txt, rr.Txt)
return &AVC{*rr.Hdr.copyHeader(), Txt} return &AVC{rr.Hdr, Txt}
} }
func (rr *CAA) copy() RR { func (rr *CAA) copy() RR {
return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
} }
func (rr *CERT) copy() RR { func (rr *CERT) copy() RR {
return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
} }
func (rr *CNAME) copy() RR { func (rr *CNAME) copy() RR {
return &CNAME{*rr.Hdr.copyHeader(), rr.Target} return &CNAME{rr.Hdr, rr.Target}
} }
func (rr *CSYNC) copy() RR { func (rr *CSYNC) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap)) TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap) copy(TypeBitMap, rr.TypeBitMap)
return &CSYNC{*rr.Hdr.copyHeader(), rr.Serial, rr.Flags, TypeBitMap} return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
} }
func (rr *DHCID) copy() RR { func (rr *DHCID) copy() RR {
return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} return &DHCID{rr.Hdr, rr.Digest}
} }
func (rr *DNAME) copy() RR { func (rr *DNAME) copy() RR {
return &DNAME{*rr.Hdr.copyHeader(), rr.Target} return &DNAME{rr.Hdr, rr.Target}
} }
func (rr *DNSKEY) copy() RR { func (rr *DNSKEY) copy() RR {
return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
} }
func (rr *DS) copy() RR { func (rr *DS) copy() RR {
return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
} }
func (rr *EID) copy() RR { func (rr *EID) copy() RR {
return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} return &EID{rr.Hdr, rr.Endpoint}
} }
func (rr *EUI48) copy() RR { func (rr *EUI48) copy() RR {
return &EUI48{*rr.Hdr.copyHeader(), rr.Address} return &EUI48{rr.Hdr, rr.Address}
} }
func (rr *EUI64) copy() RR { func (rr *EUI64) copy() RR {
return &EUI64{*rr.Hdr.copyHeader(), rr.Address} return &EUI64{rr.Hdr, rr.Address}
} }
func (rr *GID) copy() RR { func (rr *GID) copy() RR {
return &GID{*rr.Hdr.copyHeader(), rr.Gid} return &GID{rr.Hdr, rr.Gid}
} }
func (rr *GPOS) copy() RR { func (rr *GPOS) copy() RR {
return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
} }
func (rr *HINFO) copy() RR { func (rr *HINFO) copy() RR {
return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
} }
func (rr *HIP) copy() RR { func (rr *HIP) copy() RR {
RendezvousServers := make([]string, len(rr.RendezvousServers)) RendezvousServers := make([]string, len(rr.RendezvousServers))
copy(RendezvousServers, rr.RendezvousServers) copy(RendezvousServers, rr.RendezvousServers)
return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
} }
func (rr *KX) copy() RR { func (rr *KX) copy() RR {
return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
} }
func (rr *L32) copy() RR { func (rr *L32) copy() RR {
return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
} }
func (rr *L64) copy() RR { func (rr *L64) copy() RR {
return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} return &L64{rr.Hdr, rr.Preference, rr.Locator64}
} }
func (rr *LOC) copy() RR { func (rr *LOC) copy() RR {
return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
} }
func (rr *LP) copy() RR { func (rr *LP) copy() RR {
return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
} }
func (rr *MB) copy() RR { func (rr *MB) copy() RR {
return &MB{*rr.Hdr.copyHeader(), rr.Mb} return &MB{rr.Hdr, rr.Mb}
} }
func (rr *MD) copy() RR { func (rr *MD) copy() RR {
return &MD{*rr.Hdr.copyHeader(), rr.Md} return &MD{rr.Hdr, rr.Md}
} }
func (rr *MF) copy() RR { func (rr *MF) copy() RR {
return &MF{*rr.Hdr.copyHeader(), rr.Mf} return &MF{rr.Hdr, rr.Mf}
} }
func (rr *MG) copy() RR { func (rr *MG) copy() RR {
return &MG{*rr.Hdr.copyHeader(), rr.Mg} return &MG{rr.Hdr, rr.Mg}
} }
func (rr *MINFO) copy() RR { func (rr *MINFO) copy() RR {
return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
} }
func (rr *MR) copy() RR { func (rr *MR) copy() RR {
return &MR{*rr.Hdr.copyHeader(), rr.Mr} return &MR{rr.Hdr, rr.Mr}
} }
func (rr *MX) copy() RR { func (rr *MX) copy() RR {
return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} return &MX{rr.Hdr, rr.Preference, rr.Mx}
} }
func (rr *NAPTR) copy() RR { func (rr *NAPTR) copy() RR {
return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
} }
func (rr *NID) copy() RR { func (rr *NID) copy() RR {
return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} return &NID{rr.Hdr, rr.Preference, rr.NodeID}
} }
func (rr *NIMLOC) copy() RR { func (rr *NIMLOC) copy() RR {
return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} return &NIMLOC{rr.Hdr, rr.Locator}
} }
func (rr *NINFO) copy() RR { func (rr *NINFO) copy() RR {
ZSData := make([]string, len(rr.ZSData)) ZSData := make([]string, len(rr.ZSData))
copy(ZSData, rr.ZSData) copy(ZSData, rr.ZSData)
return &NINFO{*rr.Hdr.copyHeader(), ZSData} return &NINFO{rr.Hdr, ZSData}
} }
func (rr *NS) copy() RR { func (rr *NS) copy() RR {
return &NS{*rr.Hdr.copyHeader(), rr.Ns} return &NS{rr.Hdr, rr.Ns}
} }
func (rr *NSAPPTR) copy() RR { func (rr *NSAPPTR) copy() RR {
return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} return &NSAPPTR{rr.Hdr, rr.Ptr}
} }
func (rr *NSEC) copy() RR { func (rr *NSEC) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap)) TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap) copy(TypeBitMap, rr.TypeBitMap)
return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
} }
func (rr *NSEC3) copy() RR { func (rr *NSEC3) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap)) TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap) copy(TypeBitMap, rr.TypeBitMap)
return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
} }
func (rr *NSEC3PARAM) copy() RR { func (rr *NSEC3PARAM) copy() RR {
return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
} }
func (rr *OPENPGPKEY) copy() RR { func (rr *OPENPGPKEY) copy() RR {
return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
} }
func (rr *OPT) copy() RR { func (rr *OPT) copy() RR {
Option := make([]EDNS0, len(rr.Option)) Option := make([]EDNS0, len(rr.Option))
copy(Option, rr.Option) copy(Option, rr.Option)
return &OPT{*rr.Hdr.copyHeader(), Option} return &OPT{rr.Hdr, Option}
} }
func (rr *PTR) copy() RR { func (rr *PTR) copy() RR {
return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} return &PTR{rr.Hdr, rr.Ptr}
} }
func (rr *PX) copy() RR { func (rr *PX) copy() RR {
return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
} }
func (rr *RFC3597) copy() RR { func (rr *RFC3597) copy() RR {
return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} return &RFC3597{rr.Hdr, rr.Rdata}
} }
func (rr *RKEY) copy() RR { func (rr *RKEY) copy() RR {
return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
} }
func (rr *RP) copy() RR { func (rr *RP) copy() RR {
return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} return &RP{rr.Hdr, rr.Mbox, rr.Txt}
} }
func (rr *RRSIG) copy() RR { func (rr *RRSIG) copy() RR {
return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
} }
func (rr *RT) copy() RR { func (rr *RT) copy() RR {
return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} return &RT{rr.Hdr, rr.Preference, rr.Host}
} }
func (rr *SMIMEA) copy() RR { func (rr *SMIMEA) copy() RR {
return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
} }
func (rr *SOA) copy() RR { func (rr *SOA) copy() RR {
return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
} }
func (rr *SPF) copy() RR { func (rr *SPF) copy() RR {
Txt := make([]string, len(rr.Txt)) Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt) copy(Txt, rr.Txt)
return &SPF{*rr.Hdr.copyHeader(), Txt} return &SPF{rr.Hdr, Txt}
} }
func (rr *SRV) copy() RR { func (rr *SRV) copy() RR {
return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
} }
func (rr *SSHFP) copy() RR { func (rr *SSHFP) copy() RR {
return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
} }
func (rr *TA) copy() RR { func (rr *TA) copy() RR {
return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
} }
func (rr *TALINK) copy() RR { func (rr *TALINK) copy() RR {
return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
} }
func (rr *TKEY) copy() RR { func (rr *TKEY) copy() RR {
return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
} }
func (rr *TLSA) copy() RR { func (rr *TLSA) copy() RR {
return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
} }
func (rr *TSIG) copy() RR { func (rr *TSIG) copy() RR {
return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
} }
func (rr *TXT) copy() RR { func (rr *TXT) copy() RR {
Txt := make([]string, len(rr.Txt)) Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt) copy(Txt, rr.Txt)
return &TXT{*rr.Hdr.copyHeader(), Txt} return &TXT{rr.Hdr, Txt}
} }
func (rr *UID) copy() RR { func (rr *UID) copy() RR {
return &UID{*rr.Hdr.copyHeader(), rr.Uid} return &UID{rr.Hdr, rr.Uid}
} }
func (rr *UINFO) copy() RR { func (rr *UINFO) copy() RR {
return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} return &UINFO{rr.Hdr, rr.Uinfo}
} }
func (rr *URI) copy() RR { func (rr *URI) copy() RR {
return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
} }
func (rr *X25) copy() RR { func (rr *X25) copy() RR {
return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} return &X25{rr.Hdr, rr.PSDNAddress}
} }

View file

@ -5,7 +5,6 @@
package poly1305 package poly1305
import ( import (
"bytes"
"encoding/hex" "encoding/hex"
"flag" "flag"
"testing" "testing"
@ -14,80 +13,51 @@ import (
var stressFlag = flag.Bool("stress", false, "run slow stress tests") var stressFlag = flag.Bool("stress", false, "run slow stress tests")
var testData = []struct { type test struct {
in, k, correct []byte in string
}{ key string
{ tag string
[]byte("Hello world!"),
[]byte("this is 32-byte key for Poly1305"),
[]byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0},
},
{
make([]byte, 32),
[]byte("this is 32-byte key for Poly1305"),
[]byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07},
},
{
make([]byte, 2007),
[]byte("this is 32-byte key for Poly1305"),
[]byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa},
},
{
make([]byte, 2007),
make([]byte, 32),
make([]byte, 16),
},
{
// This test triggers an edge-case. See https://go-review.googlesource.com/#/c/30101/.
[]byte{0x81, 0xd8, 0xb2, 0xe4, 0x6a, 0x25, 0x21, 0x3b, 0x58, 0xfe, 0xe4, 0x21, 0x3a, 0x2a, 0x28, 0xe9, 0x21, 0xc1, 0x2a, 0x96, 0x32, 0x51, 0x6d, 0x3b, 0x73, 0x27, 0x27, 0x27, 0xbe, 0xcf, 0x21, 0x29},
[]byte{0x3b, 0x3a, 0x29, 0xe9, 0x3b, 0x21, 0x3a, 0x5c, 0x5c, 0x3b, 0x3b, 0x05, 0x3a, 0x3a, 0x8c, 0x0d},
[]byte{0x6d, 0xc1, 0x8b, 0x8c, 0x34, 0x4c, 0xd7, 0x99, 0x27, 0x11, 0x8b, 0xbe, 0x84, 0xb7, 0xf3, 0x14},
},
{
// This test generates a result of (2^130-1) % (2^130-5).
[]byte{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
[]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
[]byte{4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
{
// This test generates a result of (2^130-6) % (2^130-5).
[]byte{
0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
[]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
[]byte{0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
},
{
// This test generates a result of (2^130-5) % (2^130-5).
[]byte{
0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
[]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
} }
func testSum(t *testing.T, unaligned bool) { func (t *test) Input() []byte {
var out [16]byte in, err := hex.DecodeString(t.in)
var key [32]byte if err != nil {
panic(err)
}
return in
}
func (t *test) Key() [32]byte {
buf, err := hex.DecodeString(t.key)
if err != nil {
panic(err)
}
var key [32]byte
copy(key[:], buf[:32])
return key
}
func (t *test) Tag() [16]byte {
buf, err := hex.DecodeString(t.tag)
if err != nil {
panic(err)
}
var tag [16]byte
copy(tag[:], buf[:16])
return tag
}
func testSum(t *testing.T, unaligned bool, sumImpl func(tag *[TagSize]byte, msg []byte, key *[32]byte)) {
var tag [16]byte
for i, v := range testData { for i, v := range testData {
in := v.in in := v.Input()
if unaligned { if unaligned {
in = unalignBytes(in) in = unalignBytes(in)
} }
copy(key[:], v.k) key := v.Key()
Sum(&out, in, &key) sumImpl(&tag, in, &key)
if !bytes.Equal(out[:], v.correct) { if tag != v.Tag() {
t.Errorf("%d: expected %x, got %x", i, v.correct, out[:]) t.Errorf("%d: expected %x, got %x", i, v.Tag(), tag[:])
} }
} }
} }
@ -125,8 +95,10 @@ func TestBurnin(t *testing.T) {
} }
} }
func TestSum(t *testing.T) { testSum(t, false) } func TestSum(t *testing.T) { testSum(t, false, Sum) }
func TestSumUnaligned(t *testing.T) { testSum(t, true) } func TestSumUnaligned(t *testing.T) { testSum(t, true, Sum) }
func TestSumGeneric(t *testing.T) { testSum(t, false, sumGeneric) }
func TestSumGenericUnaligned(t *testing.T) { testSum(t, true, sumGeneric) }
func benchmark(b *testing.B, size int, unaligned bool) { func benchmark(b *testing.B, size int, unaligned bool) {
var out [16]byte var out [16]byte
@ -146,6 +118,7 @@ func Benchmark64(b *testing.B) { benchmark(b, 64, false) }
func Benchmark1K(b *testing.B) { benchmark(b, 1024, false) } func Benchmark1K(b *testing.B) { benchmark(b, 1024, false) }
func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) } func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) }
func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) } func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) }
func Benchmark2M(b *testing.B) { benchmark(b, 2097152, true) }
func unalignBytes(in []byte) []byte { func unalignBytes(in []byte) []byte {
out := make([]byte, len(in)+1) out := make([]byte, len(in)+1)

View file

@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !amd64,!arm gccgo appengine nacl
package poly1305 package poly1305
import "encoding/binary" import "encoding/binary"
// Sum generates an authenticator for msg using a one-time key and puts the // sumGeneric generates an authenticator for msg using a one-time key and
// 16-byte result into out. Authenticating two different messages with the same // puts the 16-byte result into out. This is the generic implementation of
// key allows an attacker to forge messages at will. // Sum and should be called if no assembly implementation is available.
func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
var ( var (
h0, h1, h2, h3, h4 uint32 // the hash accumulators h0, h1, h2, h3, h4 uint32 // the hash accumulators
r0, r1, r2, r3, r4 uint64 // the r part of the key r0, r1, r2, r3, r4 uint64 // the r part of the key

View file

@ -98,7 +98,7 @@ func (h *Header) Marshal() ([]byte, error) {
return b, nil return b, nil
} }
// Parse parses b as an IPv4 header and sotres the result in h. // Parse parses b as an IPv4 header and stores the result in h.
func (h *Header) Parse(b []byte) error { func (h *Header) Parse(b []byte) error {
if h == nil || len(b) < HeaderLen { if h == nil || len(b) < HeaderLen {
return errHeaderTooShort return errHeaderTooShort