Update toml dependency

This commit is contained in:
Frank Denis 2021-08-08 10:21:25 +02:00
parent 1052fa6323
commit c9d5d81e6a
43 changed files with 1455 additions and 825 deletions

6
go.mod
View file

@ -3,7 +3,7 @@ module github.com/dnscrypt/dnscrypt-proxy
go 1.16 go 1.16
require ( require (
github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml v0.4.1
github.com/VividCortex/ewma v1.2.0 github.com/VividCortex/ewma v1.2.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185 github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
@ -21,7 +21,7 @@ require (
github.com/miekg/dns v1.1.43 github.com/miekg/dns v1.1.43
github.com/powerman/check v1.5.0 github.com/powerman/check v1.5.0
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069
gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0
) )

11
go.sum
View file

@ -36,8 +36,9 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
@ -798,8 +799,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 h1:4CSI6oo7cOjJKajidEljs9h+uP0rRZBPPPhcCbj5mw8= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -876,8 +877,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View file

@ -1,5 +1,2 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test toml.test
/toml-test

View file

@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View file

@ -1,3 +1 @@
Compatible with TOML version Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View file

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View file

@ -6,27 +6,22 @@ packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data `encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.) representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Compatible with TOML version Documentation: https://godocs.io/github.com/BurntSushi/toml
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
Installation: This library requires Go 1.13 or newer; install it with:
```bash $ go get github.com/BurntSushi/toml
go get github.com/BurntSushi/toml
```
Try the toml validator: It also comes with a TOML validator CLI tool:
```bash $ go get github.com/BurntSushi/toml/cmd/tomlv
go get github.com/BurntSushi/toml/cmd/tomlv $ tomlv some-toml-file.toml
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing ### Testing
@ -36,8 +31,8 @@ and the encoder.
### Examples ### Examples
This package works similarly to how the Go standard library handles `XML` This package works similarly to how the Go standard library handles XML and
and `JSON`. Namely, data is loaded into Go values via reflection. JSON. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys For the simplest example, consider some TOML file as just a list of keys
and values: and values:
@ -54,11 +49,11 @@ Which could be defined in Go as:
```go ```go
type Config struct { type Config struct {
Age int Age int
Cats []string Cats []string
Pi float64 Pi float64
Perfection []int Perfection []int
DOB time.Time // requires `import time` DOB time.Time // requires `import time`
} }
``` ```
@ -84,6 +79,9 @@ type TOML struct {
} }
``` ```
Beware that like other most other decoders **only exported fields** are
considered when encoding and decoding; private fields are silently ignored.
### Using the `encoding.TextUnmarshaler` interface ### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into Here's an example that automatically parses duration strings into
@ -103,19 +101,19 @@ Which can be decoded with:
```go ```go
type song struct { type song struct {
Name string Name string
Duration duration Duration duration
} }
type songs struct { type songs struct {
Song []song Song []song
} }
var favorites songs var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil { if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err) log.Fatal(err)
} }
for _, s := range favorites.Song { for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration) fmt.Printf("%s (%s)\n", s.Name, s.Duration)
} }
``` ```
@ -134,6 +132,9 @@ func (d *duration) UnmarshalText(text []byte) error {
} }
``` ```
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
### More complex usage ### More complex usage
Here's an example of how to load the example from the official spec page: Here's an example of how to load the example from the official spec page:
@ -180,23 +181,23 @@ And the corresponding Go types are:
```go ```go
type tomlConfig struct { type tomlConfig struct {
Title string Title string
Owner ownerInfo Owner ownerInfo
DB database `toml:"database"` DB database `toml:"database"`
Servers map[string]server Servers map[string]server
Clients clients Clients clients
} }
type ownerInfo struct { type ownerInfo struct {
Name string Name string
Org string `toml:"organization"` Org string `toml:"organization"`
Bio string Bio string
DOB time.Time DOB time.Time
} }
type database struct { type database struct {
Server string Server string
Ports []int Ports []int
ConnMax int `toml:"connection_max"` ConnMax int `toml:"connection_max"`
Enabled bool Enabled bool
} }
@ -207,7 +208,7 @@ type server struct {
} }
type clients struct { type clients struct {
Data [][]interface{} Data [][]interface{}
Hosts []string Hosts []string
} }
``` ```
@ -216,3 +217,4 @@ Note that a case insensitive match will be tried if an exact match can't be
found. found.
A working example of the above can be found in `_examples/example.{go,toml}`. A working example of the above can be found in `_examples/example.{go,toml}`.

View file

@ -1,19 +1,17 @@
package toml package toml
import ( import (
"encoding"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"os"
"reflect" "reflect"
"strings" "strings"
"time" "time"
) )
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a // Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves. // TOML description of themselves.
type Unmarshaler interface { type Unmarshaler interface {
@ -27,30 +25,21 @@ func Unmarshal(p []byte, v interface{}) error {
} }
// Primitive is a TOML value that hasn't been decoded into a Go value. // Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
// //
// A `Primitive` value can be decoded using the `PrimitiveDecode` function. // This type can be used for any value, which will cause decoding to be delayed.
// You can use the PrimitiveDecode() function to "manually" decode these values.
// //
// The underlying representation of a `Primitive` value is subject to change. // NOTE: The underlying representation of a `Primitive` value is subject to
// Do not rely on it. // change. Do not rely on it.
// //
// N.B. Primitive values are still parsed, so using them will only avoid // NOTE: Primitive values are still parsed, so using them will only avoid the
// the overhead of reflection. They can be useful when you don't know the // overhead of reflection. They can be useful when you don't know the exact type
// exact type of TOML data until run time. // of TOML data until runtime.
type Primitive struct { type Primitive struct {
undecoded interface{} undecoded interface{}
context Key context Key
} }
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it // PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values // decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions, // can *only* be obtained from values filled by the decoder functions,
@ -68,43 +57,51 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
return md.unify(primValue.undecoded, rvalue(v)) return md.unify(primValue.undecoded, rvalue(v))
} }
// Decode will decode the contents of `data` in TOML format into a pointer // Decoder decodes TOML data.
// `v`.
// //
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be // TOML tables correspond to Go structs or maps (dealer's choice – they can be
// used interchangeably.) // used interchangeably).
// //
// TOML arrays of tables correspond to either a slice of structs or a slice // TOML table arrays correspond to either a slice of structs or a slice of maps.
// of maps.
// //
// TOML datetimes correspond to Go `time.Time` values. // TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
// //
// All other TOML types (float, string, int, bool and array) correspond // All other TOML types (float, string, int, bool and array) correspond to the
// to the obvious Go types. // obvious Go types.
// //
// An exception to the above rules is if a type implements the // An exception to the above rules is if a type implements the TextUnmarshaler
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value // interface, in which case any primitive TOML value (floats, strings, integers,
// (floats, strings, integers, booleans and datetimes) will be converted to // booleans, datetimes) will be converted to a []byte and given to the value's
// a byte string and given to the value's UnmarshalText method. See the // UnmarshalText method. See the Unmarshaler example for a demonstration with
// Unmarshaler example for a demonstration with time duration strings. // time duration strings.
// //
// Key mapping // Key mapping
// //
// TOML keys can map to either keys in a Go map or field names in a Go // TOML keys can map to either keys in a Go map or field names in a Go struct.
// struct. The special `toml` struct tag may be used to map TOML keys to // The special `toml` struct tag can be used to map TOML keys to struct fields
// struct fields that don't match the key name exactly. (See the example.) // that don't match the key name exactly (see the example). A case insensitive
// A case insensitive match to struct names will be tried if an exact match // match to struct names will be tried if an exact match can't be found.
// can't be found.
// //
// The mapping between TOML values and Go values is loose. That is, there // The mapping between TOML values and Go values is loose. That is, there may
// may exist TOML values that cannot be placed into your representation, and // exist TOML values that cannot be placed into your representation, and there
// there may be parts of your representation that do not correspond to // may be parts of your representation that do not correspond to TOML values.
// TOML values. This loose mapping can be made stricter by using the IsDefined // This loose mapping can be made stricter by using the IsDefined and/or
// and/or Undecoded methods on the MetaData returned. // Undecoded methods on the MetaData returned.
// //
// This decoder will not handle cyclic types. If a cyclic type is passed, // This decoder does not handle cyclic types. Decode will not terminate if a
// `Decode` will not terminate. // cyclic type is passed.
func Decode(data string, v interface{}) (MetaData, error) { type Decoder struct {
r io.Reader
}
// NewDecoder creates a new Decoder.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// Decode TOML data in to the pointer `v`.
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr { if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
@ -112,7 +109,15 @@ func Decode(data string, v interface{}) (MetaData, error) {
if rv.IsNil() { if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
} }
p, err := parse(data)
// TODO: have parser should read from io.Reader? Or at the very least, make
// it read from []byte rather than string
data, err := ioutil.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
p, err := parse(string(data))
if err != nil { if err != nil {
return MetaData{}, err return MetaData{}, err
} }
@ -123,24 +128,22 @@ func Decode(data string, v interface{}) (MetaData, error) {
return md, md.unify(p.mapping, indirect(rv)) return md, md.unify(p.mapping, indirect(rv))
} }
// DecodeFile is just like Decode, except it will automatically read the // Decode the TOML data in to the pointer v.
// contents of the file at `fpath` and decode it for you. //
func DecodeFile(fpath string, v interface{}) (MetaData, error) { // See the documentation on Decoder for a description of the decoding process.
bs, err := ioutil.ReadFile(fpath) func Decode(data string, v interface{}) (MetaData, error) {
if err != nil { return NewDecoder(strings.NewReader(data)).Decode(v)
return MetaData{}, err
}
return Decode(string(bs), v)
} }
// DecodeReader is just like Decode, except it will consume all bytes // DecodeFile is just like Decode, except it will automatically read the
// from the reader and decode it for you. // contents of the file at path and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { func DecodeFile(path string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r) fp, err := os.Open(path)
if err != nil { if err != nil {
return MetaData{}, err return MetaData{}, err
} }
return Decode(string(bs), v) defer fp.Close()
return NewDecoder(fp).Decode(v)
} }
// unify performs a sort of type unification based on the structure of `rv`, // unify performs a sort of type unification based on the structure of `rv`,
@ -149,8 +152,8 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
// Any type mismatch produces an error. Finding a type that we don't know // Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error. // how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error { func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value. // Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive // Save the undecoded data and the key context into the primitive
// value. // value.
@ -170,25 +173,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
} }
} }
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface. // Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok { if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v) return md.unifyText(data, v)
} }
// BUG(burntsushi) // TODO:
// The behavior here is incorrect whenever a Go type satisfies the // The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
// hash or array. In particular, the unmarshaler should only be applied // array. In particular, the unmarshaler should only be applied to primitive
// to primitive TOML values. But at this point, it will be applied to // TOML values. But at this point, it will be applied to all kinds of values
// all kinds of values and produce an incorrect error whenever those values // and produce an incorrect error whenever those values are hashes or arrays
// are hashes or arrays (including arrays of tables). // (including arrays of tables).
k := rv.Kind() k := rv.Kind()
@ -277,6 +272,12 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
} }
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
if k := rv.Type().Key().Kind(); k != reflect.String {
return fmt.Errorf(
"toml: cannot decode to a map with non-string key type (%s in %q)",
k, rv.Type())
}
tmap, ok := mapping.(map[string]interface{}) tmap, ok := mapping.(map[string]interface{})
if !ok { if !ok {
if tmap == nil { if tmap == nil {
@ -312,10 +313,8 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
} }
return badtype("slice", data) return badtype("slice", data)
} }
sliceLen := datav.Len() if l := datav.Len(); l != rv.Len() {
if sliceLen != rv.Len() { return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
} }
return md.unifySliceArray(datav, rv) return md.unifySliceArray(datav, rv)
} }
@ -337,11 +336,10 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
} }
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len() l := data.Len()
for i := 0; i < sliceLen; i++ { for i := 0; i < l; i++ {
v := data.Index(i).Interface() err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
sliceval := indirect(rv.Index(i)) if err != nil {
if err := md.unify(v, sliceval); err != nil {
return err return err
} }
} }
@ -439,7 +437,7 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
return nil return nil
} }
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string var s string
switch sdata := data.(type) { switch sdata := data.(type) {
case TextMarshaler: case TextMarshaler:
@ -482,7 +480,7 @@ func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr { if v.Kind() != reflect.Ptr {
if v.CanSet() { if v.CanSet() {
pv := v.Addr() pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok { if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
return pv return pv
} }
} }
@ -498,12 +496,16 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() { if rv.CanSet() {
return true return true
} }
if _, ok := rv.Interface().(TextUnmarshaler); ok { if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return true return true
} }
return false return false
} }
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
func badtype(expected string, data interface{}) error { func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected) return e("cannot load TOML value of type %T into a Go %s", data, expected)
} }

18
vendor/github.com/BurntSushi/toml/decode_go116.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
// +build go1.16
package toml
import (
"io/fs"
)
// DecodeFS is just like Decode, except it will automatically read the contents
// of the file at `path` from a fs.FS instance.
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
fp, err := fsys.Open(path)
if err != nil {
return MetaData{}, err
}
defer fp.Close()
return NewDecoder(fp).Decode(v)
}

View file

@ -2,9 +2,9 @@ package toml
import "strings" import "strings"
// MetaData allows access to meta information about TOML data that may not // MetaData allows access to meta information about TOML data that may not be
// be inferrable via reflection. In particular, whether a key has been defined // inferable via reflection. In particular, whether a key has been defined and
// and the TOML type of a key. // the TOML type of a key.
type MetaData struct { type MetaData struct {
mapping map[string]interface{} mapping map[string]interface{}
types map[string]tomlType types map[string]tomlType
@ -13,10 +13,11 @@ type MetaData struct {
context Key // Used only during decoding. context Key // Used only during decoding.
} }
// IsDefined returns true if the key given exists in the TOML data. The key // IsDefined reports if the key exists in the TOML data.
// should be specified hierarchially. e.g., //
// The key should be specified hierarchically, for example to access the TOML
// key "a.b.c" you would use:
// //
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c") // IsDefined("a", "b", "c")
// //
// IsDefined will return false if an empty key given. Keys are case sensitive. // IsDefined will return false if an empty key given. Keys are case sensitive.
@ -41,8 +42,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type returns a string representation of the type of the key specified. // Type returns a string representation of the type of the key specified.
// //
// Type will return the empty string if given an empty key or a key that // Type will return the empty string if given an empty key or a key that does
// does not exist. Keys are case sensitive. // not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string { func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".") fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok { if typ, ok := md.types[fullkey]; ok {
@ -51,13 +52,11 @@ func (md *MetaData) Type(key ...string) string {
return "" return ""
} }
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys // Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// to get values of this type. // values of this type.
type Key []string type Key []string
func (k Key) String() string { func (k Key) String() string { return strings.Join(k, ".") }
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string { func (k Key) maybeQuotedAll() string {
var ss []string var ss []string
@ -68,6 +67,9 @@ func (k Key) maybeQuotedAll() string {
} }
func (k Key) maybeQuoted(i int) string { func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
quote := false quote := false
for _, c := range k[i] { for _, c := range k[i] {
if !isBareKeyChar(c) { if !isBareKeyChar(c) {
@ -76,7 +78,7 @@ func (k Key) maybeQuoted(i int) string {
} }
} }
if quote { if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" return `"` + quotedReplacer.Replace(k[i]) + `"`
} }
return k[i] return k[i]
} }
@ -89,10 +91,10 @@ func (k Key) add(piece string) Key {
} }
// Keys returns a slice of every key in the TOML data, including key groups. // Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
// //
// The list will have the same order as the keys appeared in the TOML data. // Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific. The list will have the same
// order as the keys appeared in the TOML data.
// //
// All keys returned are non-empty. // All keys returned are non-empty.
func (md *MetaData) Keys() []Key { func (md *MetaData) Keys() []Key {

33
vendor/github.com/BurntSushi/toml/deprecated.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
package toml
import (
"encoding"
"io"
)
// DEPRECATED!
//
// Use the identical encoding.TextMarshaler instead. It is defined here to
// support Go 1.1 and older.
type TextMarshaler encoding.TextMarshaler
// DEPRECATED!
//
// Use the identical encoding.TextUnmarshaler instead. It is defined here to
// support Go 1.1 and older.
type TextUnmarshaler encoding.TextUnmarshaler
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// DEPRECATED!
//
// Use NewDecoder(reader).Decode(&v) instead.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
return NewDecoder(r).Decode(v)
}

View file

@ -1,27 +1,13 @@
/* /*
Package toml provides facilities for decoding and encoding TOML configuration Package toml implements decoding and encoding of TOML files.
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml This package supports TOML v1.0.0, as listed on https://toml.io
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify There is also support for delaying decoding with the Primitive type, and
whether a file is a valid TOML document. It can also be used to print the querying the set of keys in a TOML document with the MetaData type.
type of each key in a TOML document.
Testing The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
and can be used to verify if TOML document is valid. It can also be used to
There are two important types of tests used for this package. The first is print the type of each key.
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/ */
package toml package toml

View file

@ -2,48 +2,92 @@ package toml
import ( import (
"bufio" "bufio"
"encoding"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/BurntSushi/toml/internal"
) )
type tomlEncodeError struct{ error } type tomlEncodeError struct{ error }
var ( var (
errArrayMixedElementTypes = errors.New( errArrayNilElement = errors.New("toml: cannot encode array with nil element")
"toml: cannot encode array with mixed element types") errNonString = errors.New("toml: cannot encode a map with non-string key type")
errArrayNilElement = errors.New( errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
"toml: cannot encode array with nil element") errNoKey = errors.New("toml: top-level values must be Go maps or structs")
errNonString = errors.New( errAnything = errors.New("") // used in testing
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
) )
var quotedReplacer = strings.NewReplacer( var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"", "\"", "\\\"",
"\\", "\\\\", "\\", "\\\\",
"\x00", `\u0000`,
"\x01", `\u0001`,
"\x02", `\u0002`,
"\x03", `\u0003`,
"\x04", `\u0004`,
"\x05", `\u0005`,
"\x06", `\u0006`,
"\x07", `\u0007`,
"\b", `\b`,
"\t", `\t`,
"\n", `\n`,
"\x0b", `\u000b`,
"\f", `\f`,
"\r", `\r`,
"\x0e", `\u000e`,
"\x0f", `\u000f`,
"\x10", `\u0010`,
"\x11", `\u0011`,
"\x12", `\u0012`,
"\x13", `\u0013`,
"\x14", `\u0014`,
"\x15", `\u0015`,
"\x16", `\u0016`,
"\x17", `\u0017`,
"\x18", `\u0018`,
"\x19", `\u0019`,
"\x1a", `\u001a`,
"\x1b", `\u001b`,
"\x1c", `\u001c`,
"\x1d", `\u001d`,
"\x1e", `\u001e`,
"\x1f", `\u001f`,
"\x7f", `\u007f`,
) )
// Encoder controls the encoding of Go values to a TOML document to some // Encoder encodes a Go to a TOML document.
// io.Writer.
// //
// The indentation level can be controlled with the Indent field. // The mapping between Go values and TOML values should be precisely the same as
// for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.
//
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
// are encoded first.
//
// Go maps will be sorted alphabetically by key for deterministic output.
//
// Encoding Go values without a corresponding TOML representation will return an
// error. Examples of this includes maps with non-string keys, slices with nil
// elements, embedded non-struct types, and nested slices containing maps or
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
// is okay, as is []map[string][]string).
//
// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct { type Encoder struct {
// A single indentation level. By default it is two spaces. // The string to use for a single indentation level. The default is two
// spaces.
Indent string Indent string
// hasWritten is whether we have written any output to w yet. // hasWritten is whether we have written any output to w yet.
@ -51,8 +95,7 @@ type Encoder struct {
w *bufio.Writer w *bufio.Writer
} }
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer // NewEncoder create a new Encoder.
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder { func NewEncoder(w io.Writer) *Encoder {
return &Encoder{ return &Encoder{
w: bufio.NewWriter(w), w: bufio.NewWriter(w),
@ -60,29 +103,10 @@ func NewEncoder(w io.Writer) *Encoder {
} }
} }
// Encode writes a TOML representation of the Go value to the underlying // Encode writes a TOML representation of the Go value to the Encoder's writer.
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
// //
// The mapping between Go values and TOML values should be precisely the same // An error is returned if the value given cannot be encoded to a valid TOML
// as for the Decode* functions. Similarly, the TextMarshaler interface is // document.
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error { func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v)) rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil { if err := enc.safeEncode(Key([]string{}), rv); err != nil {
@ -110,9 +134,13 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. If we can marshal the type to text, then we used that. // Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as // Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is). // generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) { switch t := rv.Interface().(type) {
case time.Time, TextMarshaler: case time.Time, encoding.TextMarshaler:
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
return
// TODO: #76 would make this superfluous after implemented.
case Primitive:
enc.encode(key, reflect.ValueOf(t.undecoded))
return return
} }
@ -123,12 +151,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64, reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv) enc.eArrayOfTables(key, rv)
} else { } else {
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
} }
case reflect.Interface: case reflect.Interface:
if rv.IsNil() { if rv.IsNil() {
@ -148,22 +176,32 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct: case reflect.Struct:
enc.eTable(key, rv) enc.eTable(key, rv)
default: default:
panic(e("unsupported type for key '%s': %s", key, k)) encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
} }
} }
// eElement encodes any value that can be an array element (primitives and // eElement encodes any value that can be an array element.
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) { func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) { switch v := rv.Interface().(type) {
case time.Time: case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
// Special case time.Time as a primitive. Has to come before format := time.RFC3339Nano
// TextMarshaler below because time.Time implements switch v.Location() {
// encoding.TextMarshaler, but we need to always use UTC. case internal.LocalDatetime:
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) format = "2006-01-02T15:04:05.999999999"
case internal.LocalDate:
format = "2006-01-02"
case internal.LocalTime:
format = "15:04:05.999999999"
}
switch v.Location() {
default:
enc.wf(v.Format(format))
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
enc.wf(v.In(time.UTC).Format(format))
}
return return
case TextMarshaler: case encoding.TextMarshaler:
// Special case. Use text marshaler if it's available for this value. // Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil { if s, err := v.MarshalText(); err != nil {
encPanic(err) encPanic(err)
} else { } else {
@ -171,32 +209,49 @@ func (enc *Encoder) eElement(rv reflect.Value) {
} }
return return
} }
switch rv.Kind() { switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String: case reflect.String:
enc.writeQuoted(rv.String()) enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Struct:
enc.eStruct(nil, rv, true)
case reflect.Map:
enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
default: default:
panic(e("unexpected primitive type: %s", rv.Kind())) encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
} }
} }
// By the TOML spec, all floats must have a decimal with at least one // By the TOML spec, all floats must have a decimal with at least one number on
// number on either side. // either side.
func floatAddDecimal(fstr string) string { func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") { if !strings.Contains(fstr, ".") {
return fstr + ".0" return fstr + ".0"
@ -230,16 +285,14 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if isNil(trv) { if isNil(trv) {
continue continue
} }
panicIfInvalidKey(key)
enc.newline() enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline() enc.newline()
enc.eMapOrStruct(key, trv) enc.eMapOrStruct(key, trv, false)
} }
} }
func (enc *Encoder) eTable(key Key, rv reflect.Value) { func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 { if len(key) == 1 {
// Output an extra newline between top-level tables. // Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.) // (The newline isn't written if nothing else has been written though.)
@ -249,21 +302,22 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline() enc.newline()
} }
enc.eMapOrStruct(key, rv) enc.eMapOrStruct(key, rv, false)
} }
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
switch rv := eindirect(rv); rv.Kind() { switch rv := eindirect(rv); rv.Kind() {
case reflect.Map: case reflect.Map:
enc.eMap(key, rv) enc.eMap(key, rv, inline)
case reflect.Struct: case reflect.Struct:
enc.eStruct(key, rv) enc.eStruct(key, rv, inline)
default: default:
// Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
} }
} }
func (enc *Encoder) eMap(key Key, rv reflect.Value) { func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type() rt := rv.Type()
if rt.Key().Kind() != reflect.String { if rt.Key().Kind() != reflect.String {
encPanic(errNonString) encPanic(errNonString)
@ -281,57 +335,76 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
} }
} }
var writeMapKeys = func(mapKeys []string) { var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys) sort.Strings(mapKeys)
for _, mapKey := range mapKeys { for i, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey)) val := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) { if isNil(val) {
// Don't write anything for nil fields.
continue continue
} }
enc.encode(key.add(mapKey), mrv)
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
}
} }
} }
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub) if inline {
enc.wf("{")
}
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
writeMapKeys(mapKeysSub, false)
if inline {
enc.wf("}")
}
} }
func (enc *Encoder) eStruct(key Key, rv reflect.Value) { func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write // Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that // a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here). // table (not the one we're writing here).
rt := rv.Type() //
var fieldsDirect, fieldsSub [][]int // Fields is a [][]int: for fieldsDirect this always has one entry (the
var addFields func(rt reflect.Type, rv reflect.Value, start []int) // struct index). For fieldsSub it contains two entries: the parent field
// index from tv, and the field indexes for the fields of the sub.
var (
rt = rv.Type()
fieldsDirect, fieldsSub [][]int
addFields func(rt reflect.Type, rv reflect.Value, start []int)
)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) { addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ { for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i) f := rt.Field(i)
// skip unexported fields if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
if f.PkgPath != "" && !f.Anonymous {
continue continue
} }
frv := rv.Field(i) frv := rv.Field(i)
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
// Non-struct anonymous fields use the normal encoding logic.
if f.Anonymous { if f.Anonymous {
t := f.Type t := f.Type
switch t.Kind() { switch t.Kind() {
case reflect.Struct: case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" { if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index) addFields(t, frv, append(start, f.Index...))
continue continue
} }
case reflect.Ptr: case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct && if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
getOptions(f.Tag).name == "" {
if !frv.IsNil() { if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index) addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
} }
continue continue
} }
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
} }
} }
@ -344,35 +417,49 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
} }
addFields(rt, rv, nil) addFields(rt, rv, nil)
var writeFields = func(fields [][]int) { writeFields := func(fields [][]int) {
for _, fieldIndex := range fields { for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex) fieldType := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields. if isNil(fieldVal) { /// Don't write anything for nil fields.
continue continue
} }
opts := getOptions(sft.Tag) opts := getOptions(fieldType.Tag)
if opts.skip { if opts.skip {
continue continue
} }
keyName := sft.Name keyName := fieldType.Name
if opts.name != "" { if opts.name != "" {
keyName = opts.name keyName = opts.name
} }
if opts.omitempty && isEmpty(sf) { if opts.omitempty && isEmpty(fieldVal) {
continue continue
} }
if opts.omitzero && isZero(sf) { if opts.omitzero && isZero(fieldVal) {
continue continue
} }
enc.encode(key.add(keyName), sf) if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(keyName), fieldVal)
}
} }
} }
if inline {
enc.wf("{")
}
writeFields(fieldsDirect) writeFields(fieldsDirect)
writeFields(fieldsSub) writeFields(fieldsSub)
if inline {
enc.wf("}")
}
} }
// tomlTypeName returns the TOML type name of the Go value's type. It is // tomlTypeName returns the TOML type name of the Go value's type. It is
@ -411,13 +498,26 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
switch rv.Interface().(type) { switch rv.Interface().(type) {
case time.Time: case time.Time:
return tomlDatetime return tomlDatetime
case TextMarshaler: case encoding.TextMarshaler:
return tomlString return tomlString
default: default:
// Someone used a pointer receiver: we can make it work for pointer
// values.
if rv.CanAddr() {
_, ok := rv.Addr().Interface().(encoding.TextMarshaler)
if ok {
return tomlString
}
}
return tomlHash return tomlHash
} }
default: default:
panic("unexpected reflect.Kind: " + rv.Kind().String()) _, ok := rv.Interface().(encoding.TextMarshaler)
if ok {
return tomlString
}
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
panic("") // Need *some* return value
} }
} }
@ -430,30 +530,19 @@ func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil return nil
} }
/// Don't allow nil.
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
if tomlTypeOfGo(rv.Index(i)) == nil {
encPanic(errArrayNilElement)
}
}
firstType := tomlTypeOfGo(rv.Index(0)) firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil { if firstType == nil {
encPanic(errArrayNilElement) encPanic(errArrayNilElement)
} }
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType return firstType
} }
@ -511,14 +600,20 @@ func (enc *Encoder) newline() {
} }
} }
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { // Write a key/value pair:
//
// key = <any value>
//
// If inline is true it won't add a newline at the end.
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 { if len(key) == 0 {
encPanic(errNoKey) encPanic(errNoKey)
} }
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val) enc.eElement(val)
enc.newline() if !inline {
enc.newline()
}
} }
func (enc *Encoder) wf(format string, v ...interface{}) { func (enc *Encoder) wf(format string, v ...interface{}) {
@ -553,16 +648,3 @@ func isNil(rv reflect.Value) bool {
return false return false
} }
} }
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View file

@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View file

@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

3
vendor/github.com/BurntSushi/toml/go.mod generated vendored Normal file
View file

@ -0,0 +1,3 @@
module github.com/BurntSushi/toml
go 1.16

0
vendor/github.com/BurntSushi/toml/go.sum generated vendored Normal file
View file

36
vendor/github.com/BurntSushi/toml/internal/tz.go generated vendored Normal file
View file

@ -0,0 +1,36 @@
package internal
import "time"
// Timezones used for local datetime, date, and time TOML types.
//
// The exact way times and dates without a timezone should be interpreted is not
// well-defined in the TOML specification and left to the implementation. These
// defaults to current local timezone offset of the computer, but this can be
// changed by changing these variables before decoding.
//
// TODO:
// Ideally we'd like to offer people the ability to configure the used timezone
// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
// tricky: the reason we use three different variables for this is to support
// round-tripping – without these specific TZ names we wouldn't know which
// format to use.
//
// There isn't a good way to encode this right now though, and passing this sort
// of information also ties in to various related issues such as string format
// encoding, encoding of comments, etc.
//
// So, for the time being, just put this in internal until we can write a good
// comprehensive API for doing all of this.
//
// The reason they're exported is because they're referred from in e.g.
// internal/tag.
//
// Note that this behaviour is valid according to the TOML spec as the exact
// behaviour is left up to implementations.
var (
localOffset = func() int { _, o := time.Now().Zone(); return o }()
LocalDatetime = time.FixedZone("datetime-local", localOffset)
LocalDate = time.FixedZone("date-local", localOffset)
LocalTime = time.FixedZone("time-local", localOffset)
)

View file

@ -2,6 +2,8 @@ package toml
import ( import (
"fmt" "fmt"
"reflect"
"runtime"
"strings" "strings"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
@ -29,6 +31,7 @@ const (
itemArrayTableStart itemArrayTableStart
itemArrayTableEnd itemArrayTableEnd
itemKeyStart itemKeyStart
itemKeyEnd
itemCommentStart itemCommentStart
itemInlineTableStart itemInlineTableStart
itemInlineTableEnd itemInlineTableEnd
@ -64,9 +67,9 @@ type lexer struct {
state stateFn state stateFn
items chan item items chan item
// Allow for backing up up to three runes. // Allow for backing up up to four runes.
// This is necessary because TOML contains 3-rune tokens (""" and '''). // This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int prevWidths [4]int
nprev int // how many of prevWidths are in use nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call // If we emit an eof, we can still back up, but it is not OK to call
// next again. // next again.
@ -93,6 +96,7 @@ func (lx *lexer) nextItem() item {
return item return item
default: default:
lx.state = lx.state(lx) lx.state = lx.state(lx)
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
} }
} }
} }
@ -137,7 +141,7 @@ func (lx *lexer) emitTrim(typ itemType) {
func (lx *lexer) next() (r rune) { func (lx *lexer) next() (r rune) {
if lx.atEOF { if lx.atEOF {
panic("next called after EOF") panic("BUG in lexer: next called after EOF")
} }
if lx.pos >= len(lx.input) { if lx.pos >= len(lx.input) {
lx.atEOF = true lx.atEOF = true
@ -147,12 +151,19 @@ func (lx *lexer) next() (r rune) {
if lx.input[lx.pos] == '\n' { if lx.input[lx.pos] == '\n' {
lx.line++ lx.line++
} }
lx.prevWidths[3] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[1] lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0] lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 { if lx.nprev < 4 {
lx.nprev++ lx.nprev++
} }
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
if r == utf8.RuneError {
lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
return utf8.RuneError
}
lx.prevWidths[0] = w lx.prevWidths[0] = w
lx.pos += w lx.pos += w
return r return r
@ -163,18 +174,19 @@ func (lx *lexer) ignore() {
lx.start = lx.pos lx.start = lx.pos
} }
// backup steps back one rune. Can be called only twice between calls to next. // backup steps back one rune. Can be called 4 times between calls to next.
func (lx *lexer) backup() { func (lx *lexer) backup() {
if lx.atEOF { if lx.atEOF {
lx.atEOF = false lx.atEOF = false
return return
} }
if lx.nprev < 1 { if lx.nprev < 1 {
panic("backed up too far") panic("BUG in lexer: backed up too far")
} }
w := lx.prevWidths[0] w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1] lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2] lx.prevWidths[1] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev-- lx.nprev--
lx.pos -= w lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
@ -269,8 +281,9 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF) lx.emit(itemEOF)
return nil return nil
} }
return lx.errorf("expected a top-level item to end with a newline, "+ return lx.errorf(
"comment, or EOF, but got %q instead", r) "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
r)
} }
// lexTable lexes the beginning of a table. Namely, it makes sure that // lexTable lexes the beginning of a table. Namely, it makes sure that
@ -297,8 +310,9 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn { func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd { if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+ return lx.errorf(
"but got %q instead", arrayTableEnd, r) "expected end of table array name delimiter %q, but got %q instead",
arrayTableEnd, r)
} }
lx.emit(itemArrayTableEnd) lx.emit(itemArrayTableEnd)
return lexTopEnd return lexTopEnd
@ -308,32 +322,19 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace) lx.skip(isWhitespace)
switch r := lx.peek(); { switch r := lx.peek(); {
case r == tableEnd || r == eof: case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " + return lx.errorf("unexpected end of table name (table names cannot be empty)")
"(table names cannot be empty)")
case r == tableSep: case r == tableSep:
return lx.errorf("unexpected table separator " + return lx.errorf("unexpected table separator (table names cannot be empty)")
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart: case r == stringStart || r == rawStringStart:
lx.ignore() lx.ignore()
lx.push(lexTableNameEnd) lx.push(lexTableNameEnd)
return lexValue // reuse string lexing return lexQuotedName
default: default:
return lexBareTableName lx.push(lexTableNameEnd)
return lexBareName
} }
} }
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally // lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace. // consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn { func lexTableNameEnd(lx *lexer) stateFn {
@ -347,63 +348,101 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd: case r == tableEnd:
return lx.pop() return lx.pop()
default: default:
return lx.errorf("expected '.' or ']' to end table name, "+ return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
"but got %q instead", r)
} }
} }
// lexKeyStart consumes a key name up until the first non-whitespace character. // lexBareName lexes one part of a key or table.
// lexKeyStart will ignore whitespace. //
func lexKeyStart(lx *lexer) stateFn { // It assumes that at least one valid character for the table has already been
r := lx.peek() // read.
//
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareName
}
lx.backup()
lx.emit(itemText)
return lx.pop()
}
// lexBareName lexes one part of a key or table.
//
// It assumes that at least one valid character for the table has already been
// read.
//
// Lexes only one part, e.g. only '"a"' inside '"a".b'.
func lexQuotedName(lx *lexer) stateFn {
r := lx.next()
switch { switch {
case r == keySep: case isWhitespace(r):
return lx.errorf("unexpected key separator %q", keySep) return lexSkip(lx, lexValue)
case isWhitespace(r) || isNL(r): case r == stringStart:
lx.next() lx.ignore() // ignore the '"'
return lexSkip(lx, lexKeyStart) return lexString
case r == rawStringStart:
lx.ignore() // ignore the "'"
return lexRawString
case r == eof:
return lx.errorf("unexpected EOF; expected value")
default:
return lx.errorf("expected value but found %q instead", r)
}
}
// lexKeyStart consumes all key parts until a '='.
func lexKeyStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == '=' || r == eof:
return lx.errorf("unexpected '=': key name appears blank")
case r == '.':
return lx.errorf("unexpected '.': keys cannot start with a '.'")
case r == stringStart || r == rawStringStart: case r == stringStart || r == rawStringStart:
lx.ignore() lx.ignore()
fallthrough
default: // Bare key
lx.emit(itemKeyStart) lx.emit(itemKeyStart)
lx.push(lexKeyEnd) return lexKeyNameStart
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
} }
} }
// lexBareKey consumes the text of a bare key. Assumes that the first character func lexKeyNameStart(lx *lexer) stateFn {
// (which is not whitespace) has not yet been consumed. lx.skip(isWhitespace)
func lexBareKey(lx *lexer) stateFn { switch r := lx.peek(); {
switch r := lx.next(); { case r == '=' || r == eof:
case isBareKeyChar(r): return lx.errorf("unexpected '='")
return lexBareKey case r == '.':
case isWhitespace(r): return lx.errorf("unexpected '.'")
lx.backup() case r == stringStart || r == rawStringStart:
lx.emit(itemText) lx.ignore()
return lexKeyEnd lx.push(lexKeyEnd)
case r == keySep: return lexQuotedName
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default: default:
return lx.errorf("bare keys cannot contain %q", r) lx.push(lexKeyEnd)
return lexBareName
} }
} }
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key // lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator). // separator).
func lexKeyEnd(lx *lexer) stateFn { func lexKeyEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); { switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexKeyEnd) return lexSkip(lx, lexKeyEnd)
case r == eof:
return lx.errorf("unexpected EOF; expected key separator %q", keySep)
case r == '.':
lx.ignore()
return lexKeyNameStart
case r == '=':
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default: default:
return lx.errorf("expected key separator %q, but got %q instead", return lx.errorf("expected '.' or '=', but got %q instead", r)
keySep, r)
} }
} }
@ -450,10 +489,15 @@ func lexValue(lx *lexer) stateFn {
} }
lx.ignore() // ignore the "'" lx.ignore() // ignore the "'"
return lexRawString return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'") return lx.errorf("floats must start with a digit, not '.'")
case 'i', 'n':
if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
lx.emit(itemFloat)
return lx.pop()
}
case '-', '+':
return lexDecimalNumberStart
} }
if unicode.IsLetter(r) { if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the // Be permissive here; lexBool will give a nice error if the
@ -463,6 +507,9 @@ func lexValue(lx *lexer) stateFn {
lx.backup() lx.backup()
return lexBool return lexBool
} }
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
return lx.errorf("expected value but found %q instead", r) return lx.errorf("expected value but found %q instead", r)
} }
@ -507,9 +554,8 @@ func lexArrayValueEnd(lx *lexer) stateFn {
return lexArrayEnd return lexArrayEnd
} }
return lx.errorf( return lx.errorf(
"expected a comma or array terminator %q, but got %q instead", "expected a comma or array terminator %q, but got %s instead",
arrayEnd, r, arrayEnd, runeOrEOF(r))
)
} }
// lexArrayEnd finishes the lexing of an array. // lexArrayEnd finishes the lexing of an array.
@ -546,8 +592,7 @@ func lexInlineTableValue(lx *lexer) stateFn {
// key/value pair and the next pair (or the end of the table): // key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'. // it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn { func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next() switch r := lx.next(); {
switch {
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd) return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r): case isNL(r):
@ -557,12 +602,25 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
return lexCommentStart return lexCommentStart
case r == comma: case r == comma:
lx.ignore() lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue return lexInlineTableValue
case r == inlineTableEnd: case r == inlineTableEnd:
return lexInlineTableEnd return lexInlineTableEnd
default:
return lx.errorf(
"expected a comma or an inline table terminator %q, but got %s instead",
inlineTableEnd, runeOrEOF(r))
} }
return lx.errorf("expected a comma or an inline table terminator %q, "+ }
"but got %q instead", inlineTableEnd, r)
func runeOrEOF(r rune) string {
if r == eof {
return "end of file"
}
return "'" + string(r) + "'"
} }
// lexInlineTableEnd finishes the lexing of an inline table. // lexInlineTableEnd finishes the lexing of an inline table.
@ -579,7 +637,9 @@ func lexString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == eof: case r == eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected '"'`)
case isControl(r) || r == '\r':
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r): case isNL(r):
return lx.errorf("strings cannot contain newlines") return lx.errorf("strings cannot contain newlines")
case r == '\\': case r == '\\':
@ -598,19 +658,40 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that // lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored. // the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn { func lexMultilineString(lx *lexer) stateFn {
switch lx.next() { r := lx.next()
switch r {
case eof: case eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected '"""'`)
case '\r':
if lx.peek() != '\n' {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineString
case '\\': case '\\':
return lexMultilineStringEscape return lexMultilineStringEscape
case stringEnd: case stringEnd:
/// Found " → try to read two more "".
if lx.accept(stringEnd) { if lx.accept(stringEnd) {
if lx.accept(stringEnd) { if lx.accept(stringEnd) {
lx.backup() /// Peek ahead: the string can contain " and "", including at the
/// end: """str"""""
/// 6 or more at the end, however, is an error.
if lx.peek() == stringEnd {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), `"""""`) {
return lx.errorf(`unexpected '""""""'`)
}
lx.backup()
lx.backup()
return lexMultilineString
}
lx.backup() /// backup: don't include the """ in the item.
lx.backup() lx.backup()
lx.backup() lx.backup()
lx.emit(itemMultilineString) lx.emit(itemMultilineString)
lx.next() lx.next() /// Read over ''' again and discard it.
lx.next() lx.next()
lx.next() lx.next()
lx.ignore() lx.ignore()
@ -619,6 +700,10 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup() lx.backup()
} }
} }
if isControl(r) {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineString return lexMultilineString
} }
@ -628,7 +713,9 @@ func lexRawString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == eof: case r == eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected "'"`)
case isControl(r) || r == '\r':
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r): case isNL(r):
return lx.errorf("strings cannot contain newlines") return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd: case r == rawStringEnd:
@ -645,17 +732,38 @@ func lexRawString(lx *lexer) stateFn {
// a string. It assumes that the beginning "'''" has already been consumed and // a string. It assumes that the beginning "'''" has already been consumed and
// ignored. // ignored.
func lexMultilineRawString(lx *lexer) stateFn { func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() { r := lx.next()
switch r {
case eof: case eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected "'''"`)
case '\r':
if lx.peek() != '\n' {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineRawString
case rawStringEnd: case rawStringEnd:
/// Found ' → try to read two more ''.
if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) {
lx.backup() /// Peek ahead: the string can contain ' and '', including at the
/// end: '''str'''''
/// 6 or more at the end, however, is an error.
if lx.peek() == rawStringEnd {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), "'''''") {
return lx.errorf(`unexpected "''''''"`)
}
lx.backup()
lx.backup()
return lexMultilineRawString
}
lx.backup() /// backup: don't include the ''' in the item.
lx.backup() lx.backup()
lx.backup() lx.backup()
lx.emit(itemRawMultilineString) lx.emit(itemRawMultilineString)
lx.next() lx.next() /// Read over ''' again and discard it.
lx.next() lx.next()
lx.next() lx.next()
lx.ignore() lx.ignore()
@ -664,6 +772,10 @@ func lexMultilineRawString(lx *lexer) stateFn {
lx.backup() lx.backup()
} }
} }
if isControl(r) {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineRawString return lexMultilineRawString
} }
@ -694,6 +806,10 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough fallthrough
case '"': case '"':
fallthrough fallthrough
case ' ', '\t':
// Inside """ .. """ strings you can use \ to escape newlines, and any
// amount of whitespace can be between the \ and \n.
fallthrough
case '\\': case '\\':
return lx.pop() return lx.pop()
case 'u': case 'u':
@ -701,8 +817,7 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U': case 'U':
return lexLongUnicodeEscape return lexLongUnicodeEscape
} }
return lx.errorf("invalid escape character %q; only the following "+ return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
} }
@ -711,8 +826,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
r = lx.next() r = lx.next()
if !isHexadecimal(r) { if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+ return lx.errorf(
"but got %q instead", lx.current()) `expected four hexadecimal digits after '\u', but got %q instead`,
lx.current())
} }
} }
return lx.pop() return lx.pop()
@ -723,28 +839,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
r = lx.next() r = lx.next()
if !isHexadecimal(r) { if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+ return lx.errorf(
"but got %q instead", lx.current()) `expected eight hexadecimal digits after '\U', but got %q instead`,
lx.current())
} }
} }
return lx.pop() return lx.pop()
} }
// lexNumberOrDateStart consumes either an integer, a float, or datetime. // lexNumberOrDateStart processes the first character of a value which begins
// with a digit. It exists to catch values starting with '0', so that
// lexBaseNumberOrDate can differentiate base prefixed integers from other
// types.
func lexNumberOrDateStart(lx *lexer) stateFn { func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next() r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r { switch r {
case '_': case '0':
return lexNumber return lexBaseNumberOrDate
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
} }
return lx.errorf("expected a digit but got %q", r)
if !isDigit(r) {
// The only way to reach this state is if the value starts
// with a digit, so specifically treat anything else as an
// error.
return lx.errorf("expected a digit but got %q", r)
}
return lexNumberOrDate
} }
// lexNumberOrDate consumes either an integer, float or datetime. // lexNumberOrDate consumes either an integer, float or datetime.
@ -754,10 +875,10 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lexNumberOrDate return lexNumberOrDate
} }
switch r { switch r {
case '-': case '-', ':':
return lexDatetime return lexDatetime
case '_': case '_':
return lexNumber return lexDecimalNumber
case '.', 'e', 'E': case '.', 'e', 'E':
return lexFloat return lexFloat
} }
@ -775,41 +896,156 @@ func lexDatetime(lx *lexer) stateFn {
return lexDatetime return lexDatetime
} }
switch r { switch r {
case '-', 'T', ':', '.', 'Z', '+': case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
return lexDatetime return lexDatetime
} }
lx.backup() lx.backup()
lx.emit(itemDatetime) lx.emitTrim(itemDatetime)
return lx.pop() return lx.pop()
} }
// lexNumberStart consumes either an integer or a float. It assumes that a sign // lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
// has already been read, but that *no* digits have been consumed. func lexHexInteger(lx *lexer) stateFn {
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next() r := lx.next()
if !isDigit(r) { if isHexadecimal(r) {
if r == '.' { return lexHexInteger
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
} }
switch r { switch r {
case '_': case '_':
return lexNumber return lexHexInteger
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
func lexOctalInteger(lx *lexer) stateFn {
r := lx.next()
if isOctal(r) {
return lexOctalInteger
}
switch r {
case '_':
return lexOctalInteger
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
func lexBinaryInteger(lx *lexer) stateFn {
r := lx.next()
if isBinary(r) {
return lexBinaryInteger
}
switch r {
case '_':
return lexBinaryInteger
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDecimalNumber consumes a decimal float or integer.
func lexDecimalNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDecimalNumber
}
switch r {
case '.', 'e', 'E': case '.', 'e', 'E':
return lexFloat return lexFloat
case '_':
return lexDecimalNumber
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDecimalNumber consumes the first digit of a number beginning with a sign.
// It assumes the sign has already been consumed. Values which start with a sign
// are only allowed to be decimal integers or floats.
//
// The special "nan" and "inf" values are also recognized.
func lexDecimalNumberStart(lx *lexer) stateFn {
r := lx.next()
// Special error cases to give users better error messages
switch r {
case 'i':
if !lx.accept('n') || !lx.accept('f') {
return lx.errorf("invalid float: '%s'", lx.current())
}
lx.emit(itemFloat)
return lx.pop()
case 'n':
if !lx.accept('a') || !lx.accept('n') {
return lx.errorf("invalid float: '%s'", lx.current())
}
lx.emit(itemFloat)
return lx.pop()
case '0':
p := lx.peek()
switch p {
case 'b', 'o', 'x':
return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
}
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
if isDigit(r) {
return lexDecimalNumber
}
return lx.errorf("expected a digit but got %q", r)
}
// lexBaseNumberOrDate differentiates between the possible values which
// start with '0'. It assumes that before reaching this state, the initial '0'
// has been consumed.
func lexBaseNumberOrDate(lx *lexer) stateFn {
r := lx.next()
// Note: All datetimes start with at least two digits, so we don't
// handle date characters (':', '-', etc.) here.
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
// Can only be decimal, because there can't be an underscore
// between the '0' and the base designator, and dates can't
// contain underscores.
return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
case 'b':
r = lx.peek()
if !isBinary(r) {
lx.errorf("not a binary number: '%s%c'", lx.current(), r)
}
return lexBinaryInteger
case 'o':
r = lx.peek()
if !isOctal(r) {
lx.errorf("not an octal number: '%s%c'", lx.current(), r)
}
return lexOctalInteger
case 'x':
r = lx.peek()
if !isHexadecimal(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
} }
lx.backup() lx.backup()
@ -867,21 +1103,22 @@ func lexCommentStart(lx *lexer) stateFn {
// It will consume *up to* the first newline character, and pass control // It will consume *up to* the first newline character, and pass control
// back to the last state on the stack. // back to the last state on the stack.
func lexComment(lx *lexer) stateFn { func lexComment(lx *lexer) stateFn {
r := lx.peek() switch r := lx.next(); {
if isNL(r) || r == eof { case isNL(r) || r == eof:
lx.backup()
lx.emit(itemText) lx.emit(itemText)
return lx.pop() return lx.pop()
case isControl(r):
return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
default:
return lexComment
} }
lx.next()
return lexComment
} }
// lexSkip ignores all slurped input and moves on to the next state. // lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn { func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn { lx.ignore()
lx.ignore() return nextState
return nextState
}
} }
// isWhitespace returns true if `r` is a whitespace character according // isWhitespace returns true if `r` is a whitespace character according
@ -894,6 +1131,16 @@ func isNL(r rune) bool {
return r == '\n' || r == '\r' return r == '\n' || r == '\r'
} }
// Control characters except \n, \t
func isControl(r rune) bool {
switch r {
case '\t', '\r', '\n':
return false
default:
return (r >= 0x00 && r <= 0x1f) || r == 0x7f
}
}
func isDigit(r rune) bool { func isDigit(r rune) bool {
return r >= '0' && r <= '9' return r >= '0' && r <= '9'
} }
@ -904,6 +1151,14 @@ func isHexadecimal(r rune) bool {
(r >= 'A' && r <= 'F') (r >= 'A' && r <= 'F')
} }
func isOctal(r rune) bool {
return r >= '0' && r <= '7'
}
func isBinary(r rune) bool {
return r == '0' || r == '1'
}
func isBareKeyChar(r rune) bool { func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') || return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') || (r >= 'a' && r <= 'z') ||
@ -912,6 +1167,17 @@ func isBareKeyChar(r rune) bool {
r == '-' r == '-'
} }
func (s stateFn) String() string {
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
if i := strings.LastIndexByte(name, '.'); i > -1 {
name = name[i+1:]
}
if s == nil {
name = "<nil>"
}
return name + "()"
}
func (itype itemType) String() string { func (itype itemType) String() string {
switch itype { switch itype {
case itemError: case itemError:
@ -938,12 +1204,18 @@ func (itype itemType) String() string {
return "TableEnd" return "TableEnd"
case itemKeyStart: case itemKeyStart:
return "KeyStart" return "KeyStart"
case itemKeyEnd:
return "KeyEnd"
case itemArray: case itemArray:
return "Array" return "Array"
case itemArrayEnd: case itemArrayEnd:
return "ArrayEnd" return "ArrayEnd"
case itemCommentStart: case itemCommentStart:
return "CommentStart" return "CommentStart"
case itemInlineTableStart:
return "InlineTableStart"
case itemInlineTableEnd:
return "InlineTableEnd"
} }
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
} }

View file

@ -1,12 +1,14 @@
package toml package toml
import ( import (
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
"unicode/utf8" "unicode/utf8"
"github.com/BurntSushi/toml/internal"
) )
type parser struct { type parser struct {
@ -14,39 +16,54 @@ type parser struct {
types map[string]tomlType types map[string]tomlType
lx *lexer lx *lexer
// A list of keys in the order that they appear in the TOML data. ordered []Key // List of keys in the order that they appear in the TOML data.
ordered []Key context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
// the full key for the current hash in scope approxLine int // Rough approximation of line number
context Key implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
} }
type parseError string // ParseError is used when a file can't be parsed: for example invalid integer
// literals, duplicate keys, etc.
type ParseError struct {
Message string
Line int
LastKey string
}
func (pe parseError) Error() string { func (pe ParseError) Error() string {
return string(pe) return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
pe.Line, pe.LastKey, pe.Message)
} }
func parse(data string) (p *parser, err error) { func parse(data string) (p *parser, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
var ok bool var ok bool
if err, ok = r.(parseError); ok { if err, ok = r.(ParseError); ok {
return return
} }
panic(r) panic(r)
} }
}() }()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
// which mangles stuff.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
data = data[2:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
// file (second byte in surrogate pair being NULL). Again, do this here to
// avoid having to deal with UTF-8/16 stuff in the lexer.
ex := 6
if len(data) < 6 {
ex = len(data)
}
if strings.ContainsRune(data[:ex], 0) {
return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
}
p = &parser{ p = &parser{
mapping: make(map[string]interface{}), mapping: make(map[string]interface{}),
types: make(map[string]tomlType), types: make(map[string]tomlType),
@ -66,13 +83,17 @@ func parse(data string) (p *parser, err error) {
} }
func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", msg := fmt.Sprintf(format, v...)
p.approxLine, p.current(), fmt.Sprintf(format, v...)) panic(ParseError{
panic(parseError(msg)) Message: msg,
Line: p.approxLine,
LastKey: p.current(),
})
} }
func (p *parser) next() item { func (p *parser) next() item {
it := p.lx.nextItem() it := p.lx.nextItem()
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
if it.typ == itemError { if it.typ == itemError {
p.panicf("%s", it.val) p.panicf("%s", it.val)
} }
@ -97,44 +118,63 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) { func (p *parser) topLevel(item item) {
switch item.typ { switch item.typ {
case itemCommentStart: case itemCommentStart: // # ..
p.approxLine = item.line p.approxLine = item.line
p.expect(itemText) p.expect(itemText)
case itemTableStart: case itemTableStart: // [ .. ]
kg := p.next() name := p.next()
p.approxLine = kg.line p.approxLine = name.line
var key Key var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(kg)) key = append(key, p.keyString(name))
} }
p.assertEqual(itemTableEnd, kg.typ) p.assertEqual(itemTableEnd, name.typ)
p.establishContext(key, false) p.addContext(key, false)
p.setType("", tomlHash) p.setType("", tomlHash)
p.ordered = append(p.ordered, key) p.ordered = append(p.ordered, key)
case itemArrayTableStart: case itemArrayTableStart: // [[ .. ]]
kg := p.next() name := p.next()
p.approxLine = kg.line p.approxLine = name.line
var key Key var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(kg)) key = append(key, p.keyString(name))
} }
p.assertEqual(itemArrayTableEnd, kg.typ) p.assertEqual(itemArrayTableEnd, name.typ)
p.establishContext(key, true) p.addContext(key, true)
p.setType("", tomlArrayHash) p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key) p.ordered = append(p.ordered, key)
case itemKeyStart: case itemKeyStart: // key = ..
kname := p.next() outerContext := p.context
p.approxLine = kname.line /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
p.currentKey = p.keyString(kname) k := p.next()
p.approxLine = k.line
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
val, typ := p.value(p.next()) /// The current key is the last part.
p.setValue(p.currentKey, val) p.currentKey = key[len(key)-1]
p.setType(p.currentKey, typ)
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey)) p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
p.currentKey = "" p.currentKey = ""
default: default:
p.bug("Unexpected type at top level: %s", item.typ) p.bug("Unexpected type at top level: %s", item.typ)
@ -148,180 +188,253 @@ func (p *parser) keyString(it item) string {
return it.val return it.val
case itemString, itemMultilineString, case itemString, itemMultilineString,
itemRawString, itemRawMultilineString: itemRawString, itemRawMultilineString:
s, _ := p.value(it) s, _ := p.value(it, false)
return s.(string) return s.(string)
default: default:
p.bug("Unexpected key type: %s", it.typ) p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
} }
panic("unreachable")
} }
var datetimeRepl = strings.NewReplacer(
"z", "Z",
"t", "T",
" ", "T")
// value translates an expected value from the lexer into a Go value wrapped // value translates an expected value from the lexer into a Go value wrapped
// as an empty interface. // as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) { func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ { switch it.typ {
case itemString: case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it) return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString: case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString: case itemRawString:
return it.val, p.typeOfPrimitive(it) return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString: case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it) return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemInteger:
return p.valueInteger(it)
case itemFloat:
return p.valueFloat(it)
case itemBool: case itemBool:
switch it.val { switch it.val {
case "true": case "true":
return true, p.typeOfPrimitive(it) return true, p.typeOfPrimitive(it)
case "false": case "false":
return false, p.typeOfPrimitive(it) return false, p.typeOfPrimitive(it)
default:
p.bug("Expected boolean value, but got '%s'.", it.val)
} }
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime: case itemDatetime:
var t time.Time return p.valueDatetime(it)
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray: case itemArray:
array := make([]interface{}, 0) return p.valueArray(it)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart: case itemInlineTableStart:
var ( return p.valueInlineTable(it, parentIsArray)
hash = make(map[string]interface{}) default:
outerContext = p.context p.bug("Unexpected value type: %s", it.typ)
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
} }
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable") panic("unreachable")
} }
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
}
if numHasLeadingZero(it.val) {
p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
}
num, err := strconv.ParseInt(it.val, 0, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
}
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
}
}
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
val = "nan"
}
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
}
var dtTypes = []struct {
fmt string
zone *time.Location
}{
{time.RFC3339Nano, time.Local},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
{"2006-01-02", internal.LocalDate},
{"15:04:05.999999999", internal.LocalTime},
}
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
ok bool
err error
)
for _, dt := range dtTypes {
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
}
func (p *parser) valueArray(it item) (interface{}, tomlType) {
p.setType(p.currentKey, tomlArray)
// p.setType(p.currentKey, typ)
var (
array []interface{}
types []tomlType
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it, true)
array = append(array, val)
types = append(types, typ)
}
return array, tomlArray
}
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
prevContext := p.context
p.currentKey = ""
p.addImplicit(p.context)
p.addContext(p.context, parentIsArray)
/// Loop over all table key/value pairs.
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
/// Read all key parts.
k := p.next()
p.approxLine = k.line
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
p.currentKey = key[len(key)-1]
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set the value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[p.currentKey] = val
/// Restore context.
p.context = prevContext
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
// +/- signs, and base prefixes.
func numHasLeadingZero(s string) bool {
if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
return true
}
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
return true
}
return false
}
// numUnderscoresOK checks whether each underscore in s is surrounded by // numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores. // characters that are not underscores.
func numUnderscoresOK(s string) bool { func numUnderscoresOK(s string) bool {
switch s {
case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
return true
}
accept := false accept := false
for _, r := range s { for _, r := range s {
if r == '_' { if r == '_' {
if !accept { if !accept {
return false return false
} }
accept = false
continue
} }
accept = true
// isHexadecimal is a superset of all the permissable characters
// surrounding an underscore.
accept = isHexadecimal(r)
} }
return accept return accept
} }
@ -338,13 +451,12 @@ func numPeriodsOK(s string) bool {
return !period return !period
} }
// establishContext sets the current context of the parser, // Set the current context of the parser, where the context is either a hash or
// where the context is either a hash or an array of hashes. Which one is // an array of hashes, depending on the value of the `array` parameter.
// set depends on the value of the `array` parameter.
// //
// Establishing the context also makes sure that the key isn't a duplicate, and // Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically. // will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) { func (p *parser) addContext(key Key, array bool) {
var ok bool var ok bool
// Always start at the top level and drill down for our context. // Always start at the top level and drill down for our context.
@ -383,7 +495,7 @@ func (p *parser) establishContext(key Key, array bool) {
// list of tables for it. // list of tables for it.
k := key[len(key)-1] k := key[len(key)-1]
if _, ok := hashContext[k]; !ok { if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5) hashContext[k] = make([]map[string]interface{}, 0, 4)
} }
// Add a new table. But make sure the key hasn't already been used // Add a new table. But make sure the key hasn't already been used
@ -391,8 +503,7 @@ func (p *parser) establishContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok { if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{})) hashContext[k] = append(hash, make(map[string]interface{}))
} else { } else {
p.panicf("Key '%s' was already created and cannot be used as "+ p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
"an array.", keyContext)
} }
} else { } else {
p.setValue(key[len(key)-1], make(map[string]interface{})) p.setValue(key[len(key)-1], make(map[string]interface{}))
@ -400,15 +511,22 @@ func (p *parser) establishContext(key Key, array bool) {
p.context = append(p.context, key[len(key)-1]) p.context = append(p.context, key[len(key)-1])
} }
// set calls setValue and setType.
func (p *parser) set(key string, val interface{}, typ tomlType) {
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
}
// setValue sets the given key to the given value in the current context. // setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for // It will make sure that the key hasn't already been defined, account for
// implicit key groups. // implicit key groups.
func (p *parser) setValue(key string, value interface{}) { func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{} var (
var ok bool tmpHash interface{}
ok bool
hash := p.mapping hash = p.mapping
keyContext := make(Key, 0) keyContext Key
)
for _, k := range p.context { for _, k := range p.context {
keyContext = append(keyContext, k) keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok { if tmpHash, ok = hash[k]; !ok {
@ -422,24 +540,26 @@ func (p *parser) setValue(key string, value interface{}) {
case map[string]interface{}: case map[string]interface{}:
hash = t hash = t
default: default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+ p.panicf("Key '%s' has already been defined.", keyContext)
"it has '%T' instead.", tmpHash)
} }
} }
keyContext = append(keyContext, key) keyContext = append(keyContext, key)
if _, ok := hash[key]; ok { if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have // Normally redefining keys isn't allowed, but the key could have been
// to raise an error since duplicate keys are disallowed. However, // defined implicitly and it's allowed to be redefined concretely. (See
// it's possible that a key was previously defined implicitly. In this // the `valid/implicit-and-explicit-after.toml` in toml-test)
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
// //
// But we have to make sure to stop marking it as an implicit. (So that // But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.) // another redefinition provokes an error.)
// //
// Note that since it has already been defined (as a hash), we don't // Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done. // want to overwrite it. So our business is done.
if p.isArray(keyContext) {
p.removeImplicit(keyContext)
hash[key] = value
return
}
if p.isImplicit(keyContext) { if p.isImplicit(keyContext) {
p.removeImplicit(keyContext) p.removeImplicit(keyContext)
return return
@ -449,6 +569,7 @@ func (p *parser) setValue(key string, value interface{}) {
// key, which is *always* wrong. // key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext) p.panicf("Key '%s' has already been defined.", keyContext)
} }
hash[key] = value hash[key] = value
} }
@ -468,21 +589,15 @@ func (p *parser) setType(key string, typ tomlType) {
p.types[keyContext.String()] = typ p.types[keyContext.String()] = typ
} }
// addImplicit sets the given Key as having been created implicitly. // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
func (p *parser) addImplicit(key Key) { // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
p.implicits[key.String()] = true func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
} func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
// removeImplicit stops tagging the given key as having been implicitly func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
// created. func (p *parser) addImplicitContext(key Key) {
func (p *parser) removeImplicit(key Key) { p.addImplicit(key)
p.implicits[key.String()] = false p.addContext(key, false)
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
} }
// current returns the full key name of the current context. // current returns the full key name of the current context.
@ -497,20 +612,54 @@ func (p *parser) current() string {
} }
func stripFirstNewline(s string) string { func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' { if len(s) > 0 && s[0] == '\n' {
return s return s[1:]
} }
return s[1:] if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
return s[2:]
}
return s
} }
func stripEscapedWhitespace(s string) string { // Remove newlines inside triple-quoted strings if a line ends with "\".
esc := strings.Split(s, "\\\n") func stripEscapedNewlines(s string) string {
if len(esc) > 1 { split := strings.Split(s, "\n")
for i := 1; i < len(esc); i++ { if len(split) < 1 {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) return s
}
escNL := false // Keep track of the last non-blank line was escaped.
for i, line := range split {
line = strings.TrimRight(line, " \t\r")
if len(line) == 0 || line[len(line)-1] != '\\' {
split[i] = strings.TrimRight(split[i], "\r")
if !escNL && i != len(split)-1 {
split[i] += "\n"
}
continue
}
escBS := true
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
escBS = !escBS
}
if escNL {
line = strings.TrimLeft(line, " \t\r")
}
escNL = !escBS
if escBS {
split[i] += "\n"
continue
}
split[i] = line[:len(line)-1] // Remove \
if len(split)-1 > i {
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
} }
} }
return strings.Join(esc, "") return strings.Join(split, "")
} }
func (p *parser) replaceEscapes(str string) string { func (p *parser) replaceEscapes(str string) string {
@ -533,6 +682,9 @@ func (p *parser) replaceEscapes(str string) string {
default: default:
p.bug("Expected valid escape code after \\, but got %q.", s[r]) p.bug("Expected valid escape code after \\, but got %q.", s[r])
return "" return ""
case ' ', '\t':
p.panicf("invalid escape: '\\%c'", s[r])
return ""
case 'b': case 'b':
replaced = append(replaced, rune(0x0008)) replaced = append(replaced, rune(0x0008))
r += 1 r += 1
@ -585,8 +737,3 @@ func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
} }
return rune(hex) return rune(hex)
} }
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View file

@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View file

@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable") panic("unreachable")
} }
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View file

@ -0,0 +1,26 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package socket
import (
"syscall"
)
// ioComplete checks the flags and result of a syscall, to be used as return
// value in a syscall.RawConn.Read or Write callback.
func ioComplete(flags int, operr error) bool {
if flags&syscall.MSG_DONTWAIT != 0 {
// Caller explicitly said don't wait, so always return immediately.
return true
}
if operr == syscall.EAGAIN || operr == syscall.EWOULDBLOCK {
// No data available, block for I/O and try again.
return false
}
return true
}

View file

@ -0,0 +1,22 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || windows || zos
// +build aix windows zos
package socket
import (
"syscall"
)
// ioComplete checks the flags and result of a syscall, to be used as return
// value in a syscall.RawConn.Read or Write callback.
func ioComplete(flags int, operr error) bool {
if operr == syscall.EAGAIN || operr == syscall.EWOULDBLOCK {
// No data available, block for I/O and try again.
return false
}
return true
}

View file

@ -10,7 +10,6 @@ package socket
import ( import (
"net" "net"
"os" "os"
"syscall"
) )
func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) {
@ -28,10 +27,7 @@ func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) {
var n int var n int
fn := func(s uintptr) bool { fn := func(s uintptr) bool {
n, operr = recvmmsg(s, hs, flags) n, operr = recvmmsg(s, hs, flags)
if operr == syscall.EAGAIN { return ioComplete(flags, operr)
return false
}
return true
} }
if err := c.c.Read(fn); err != nil { if err := c.c.Read(fn); err != nil {
return n, err return n, err
@ -60,10 +56,7 @@ func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) {
var n int var n int
fn := func(s uintptr) bool { fn := func(s uintptr) bool {
n, operr = sendmmsg(s, hs, flags) n, operr = sendmmsg(s, hs, flags)
if operr == syscall.EAGAIN { return ioComplete(flags, operr)
return false
}
return true
} }
if err := c.c.Write(fn); err != nil { if err := c.c.Write(fn); err != nil {
return n, err return n, err

View file

@ -9,7 +9,6 @@ package socket
import ( import (
"os" "os"
"syscall"
) )
func (c *Conn) recvMsg(m *Message, flags int) error { func (c *Conn) recvMsg(m *Message, flags int) error {
@ -25,10 +24,7 @@ func (c *Conn) recvMsg(m *Message, flags int) error {
var n int var n int
fn := func(s uintptr) bool { fn := func(s uintptr) bool {
n, operr = recvmsg(s, &h, flags) n, operr = recvmsg(s, &h, flags)
if operr == syscall.EAGAIN || operr == syscall.EWOULDBLOCK { return ioComplete(flags, operr)
return false
}
return true
} }
if err := c.c.Read(fn); err != nil { if err := c.c.Read(fn); err != nil {
return err return err
@ -64,10 +60,7 @@ func (c *Conn) sendMsg(m *Message, flags int) error {
var n int var n int
fn := func(s uintptr) bool { fn := func(s uintptr) bool {
n, operr = sendmsg(s, &h, flags) n, operr = sendmsg(s, &h, flags)
if operr == syscall.EAGAIN || operr == syscall.EWOULDBLOCK { return ioComplete(flags, operr)
return false
}
return true
} }
if err := c.c.Write(fn); err != nil { if err := c.c.Write(fn); err != nil {
return err return err

48
vendor/golang.org/x/sys/unix/ifreq_linux.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package unix
import "unsafe"
// Helpers for dealing with ifreq since it contains a union and thus requires a
// lot of unsafe.Pointer casts to use properly.
// newIfreq creates an ifreq with the input network interface name after
// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required)
// bytes.
func newIfreq(name string) (*ifreq, error) {
// Leave room for terminating NULL byte.
if len(name) >= IFNAMSIZ {
return nil, EINVAL
}
var ifr ifreq
copy(ifr.Ifrn[:], name)
return &ifr, nil
}
// An ifreqData is an ifreq but with a typed unsafe.Pointer field for data in
// the union. This is required in order to comply with the unsafe.Pointer rules
// since the "pointer-ness" of data would not be preserved if it were cast into
// the byte array of a raw ifreq.
type ifreqData struct {
name [IFNAMSIZ]byte
data unsafe.Pointer
// Pad to the same size as ifreq.
_ [len(ifreq{}.Ifru) - SizeofPtr]byte
}
// SetData produces an ifreqData with the pointer p set for ioctls which require
// arbitrary pointer data.
func (ifr ifreq) SetData(p unsafe.Pointer) ifreqData {
return ifreqData{
name: ifr.Ifrn,
data: p,
}
}

View file

@ -50,28 +50,19 @@ func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
return err return err
} }
type ifreqEthtool struct {
name [IFNAMSIZ]byte
data unsafe.Pointer
}
// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network // IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
// device specified by ifname. // device specified by ifname.
func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
// Leave room for terminating NULL byte. ifr, err := newIfreq(ifname)
if len(ifname) >= IFNAMSIZ { if err != nil {
return nil, EINVAL return nil, err
} }
value := EthtoolDrvinfo{ value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO}
Cmd: ETHTOOL_GDRVINFO, ifrd := ifr.SetData(unsafe.Pointer(&value))
}
ifreq := ifreqEthtool{ err = ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifrd)))
data: unsafe.Pointer(&value), runtime.KeepAlive(ifrd)
}
copy(ifreq.name[:], ifname)
err := ioctl(fd, SIOCETHTOOL, uintptr(unsafe.Pointer(&ifreq)))
runtime.KeepAlive(ifreq)
return &value, err return &value, err
} }

View file

@ -630,3 +630,8 @@ const (
PPS_GETCAP = 0x800470a3 PPS_GETCAP = 0x800470a3
PPS_FETCH = 0xc00470a4 PPS_FETCH = 0xc00470a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [16]byte
}

View file

@ -648,3 +648,8 @@ const (
PPS_GETCAP = 0x800870a3 PPS_GETCAP = 0x800870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -625,3 +625,8 @@ const (
PPS_GETCAP = 0x800470a3 PPS_GETCAP = 0x800470a3
PPS_FETCH = 0xc00470a4 PPS_FETCH = 0xc00470a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [16]byte
}

View file

@ -627,3 +627,8 @@ const (
PPS_GETCAP = 0x800870a3 PPS_GETCAP = 0x800870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -631,3 +631,8 @@ const (
PPS_GETCAP = 0x400470a3 PPS_GETCAP = 0x400470a3
PPS_FETCH = 0xc00470a4 PPS_FETCH = 0xc00470a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [16]byte
}

View file

@ -630,3 +630,8 @@ const (
PPS_GETCAP = 0x400870a3 PPS_GETCAP = 0x400870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -630,3 +630,8 @@ const (
PPS_GETCAP = 0x400870a3 PPS_GETCAP = 0x400870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -631,3 +631,8 @@ const (
PPS_GETCAP = 0x400470a3 PPS_GETCAP = 0x400470a3
PPS_FETCH = 0xc00470a4 PPS_FETCH = 0xc00470a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [16]byte
}

View file

@ -637,3 +637,8 @@ const (
PPS_GETCAP = 0x400470a3 PPS_GETCAP = 0x400470a3
PPS_FETCH = 0xc00470a4 PPS_FETCH = 0xc00470a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [16]byte
}

View file

@ -637,3 +637,8 @@ const (
PPS_GETCAP = 0x400870a3 PPS_GETCAP = 0x400870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -637,3 +637,8 @@ const (
PPS_GETCAP = 0x400870a3 PPS_GETCAP = 0x400870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -655,3 +655,8 @@ const (
PPS_GETCAP = 0x800870a3 PPS_GETCAP = 0x800870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -651,3 +651,8 @@ const (
PPS_GETCAP = 0x800870a3 PPS_GETCAP = 0x800870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

View file

@ -632,3 +632,8 @@ const (
PPS_GETCAP = 0x400870a3 PPS_GETCAP = 0x400870a3
PPS_FETCH = 0xc00870a4 PPS_FETCH = 0xc00870a4
) )
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}

7
vendor/modules.txt vendored
View file

@ -1,8 +1,9 @@
# 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a # 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a
4d63.com/gochecknoglobals/checknoglobals 4d63.com/gochecknoglobals/checknoglobals
# github.com/BurntSushi/toml v0.3.1 # github.com/BurntSushi/toml v0.4.1
## explicit ## explicit
github.com/BurntSushi/toml github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
# github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 # github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24
github.com/Djarvur/go-err113 github.com/Djarvur/go-err113
# github.com/Masterminds/semver v1.5.0 # github.com/Masterminds/semver v1.5.0
@ -416,7 +417,7 @@ golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile golang.org/x/mod/modfile
golang.org/x/mod/module golang.org/x/mod/module
golang.org/x/mod/semver golang.org/x/mod/semver
# golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 # golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d
## explicit ## explicit
golang.org/x/net/bpf golang.org/x/net/bpf
golang.org/x/net/http/httpguts golang.org/x/net/http/httpguts
@ -429,7 +430,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/ipv4 golang.org/x/net/ipv4
golang.org/x/net/ipv6 golang.org/x/net/ipv6
golang.org/x/net/proxy golang.org/x/net/proxy
# golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c # golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069
## explicit ## explicit
golang.org/x/sys/cpu golang.org/x/sys/cpu
golang.org/x/sys/execabs golang.org/x/sys/execabs