mirror of
https://github.com/SagerNet/sing.git
synced 2025-04-05 04:47:40 +03:00
Compare commits
9 commits
dev
...
v0.5.0-alp
Author | SHA1 | Date | |
---|---|---|---|
|
37bee34b73 | ||
|
c86c25365c | ||
|
3e02be0e9c | ||
|
89c9d91019 | ||
|
dd8cd39ef5 | ||
|
1482471859 | ||
|
cb5ca6e926 | ||
|
db031f7aef | ||
|
1c6ec119f1 |
33 changed files with 1611 additions and 4609 deletions
32
.github/workflows/debug.yml
vendored
32
.github/workflows/debug.yml
vendored
|
@ -30,38 +30,6 @@ jobs:
|
|||
- name: Build
|
||||
run: |
|
||||
make test
|
||||
build_go118:
|
||||
name: Linux Debug build (Go 1.18)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ~1.18
|
||||
continue-on-error: true
|
||||
- name: Build
|
||||
run: |
|
||||
make test
|
||||
build_go119:
|
||||
name: Linux Debug build (Go 1.19)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ~1.19
|
||||
continue-on-error: true
|
||||
- name: Build
|
||||
run: |
|
||||
make test
|
||||
build_go120:
|
||||
name: Linux Debug build (Go 1.20)
|
||||
runs-on: ubuntu-latest
|
||||
|
|
3
common/binary/README.md
Normal file
3
common/binary/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
# binary
|
||||
|
||||
mod from go 1.22.3
|
817
common/binary/binary.go
Normal file
817
common/binary/binary.go
Normal file
|
@ -0,0 +1,817 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package binary implements simple translation between numbers and byte
|
||||
// sequences and encoding and decoding of varints.
|
||||
//
|
||||
// Numbers are translated by reading and writing fixed-size values.
|
||||
// A fixed-size value is either a fixed-size arithmetic
|
||||
// type (bool, int8, uint8, int16, float32, complex64, ...)
|
||||
// or an array or struct containing only fixed-size values.
|
||||
//
|
||||
// The varint functions encode and decode single integer values using
|
||||
// a variable-length encoding; smaller values require fewer bytes.
|
||||
// For a specification, see
|
||||
// https://developers.google.com/protocol-buffers/docs/encoding.
|
||||
//
|
||||
// This package favors simplicity over efficiency. Clients that require
|
||||
// high-performance serialization, especially for large data structures,
|
||||
// should look at more advanced solutions such as the [encoding/gob]
|
||||
// package or [google.golang.org/protobuf] for protocol buffers.
|
||||
package binary
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A ByteOrder specifies how to convert byte slices into
|
||||
// 16-, 32-, or 64-bit unsigned integers.
|
||||
//
|
||||
// It is implemented by [LittleEndian], [BigEndian], and [NativeEndian].
|
||||
type ByteOrder interface {
|
||||
Uint16([]byte) uint16
|
||||
Uint32([]byte) uint32
|
||||
Uint64([]byte) uint64
|
||||
PutUint16([]byte, uint16)
|
||||
PutUint32([]byte, uint32)
|
||||
PutUint64([]byte, uint64)
|
||||
String() string
|
||||
}
|
||||
|
||||
// AppendByteOrder specifies how to append 16-, 32-, or 64-bit unsigned integers
|
||||
// into a byte slice.
|
||||
//
|
||||
// It is implemented by [LittleEndian], [BigEndian], and [NativeEndian].
|
||||
type AppendByteOrder interface {
|
||||
AppendUint16([]byte, uint16) []byte
|
||||
AppendUint32([]byte, uint32) []byte
|
||||
AppendUint64([]byte, uint64) []byte
|
||||
String() string
|
||||
}
|
||||
|
||||
// LittleEndian is the little-endian implementation of [ByteOrder] and [AppendByteOrder].
|
||||
var LittleEndian littleEndian
|
||||
|
||||
// BigEndian is the big-endian implementation of [ByteOrder] and [AppendByteOrder].
|
||||
var BigEndian bigEndian
|
||||
|
||||
type littleEndian struct{}
|
||||
|
||||
func (littleEndian) Uint16(b []byte) uint16 {
|
||||
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint16(b[0]) | uint16(b[1])<<8
|
||||
}
|
||||
|
||||
func (littleEndian) PutUint16(b []byte, v uint16) {
|
||||
_ = b[1] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v)
|
||||
b[1] = byte(v >> 8)
|
||||
}
|
||||
|
||||
func (littleEndian) AppendUint16(b []byte, v uint16) []byte {
|
||||
return append(b,
|
||||
byte(v),
|
||||
byte(v>>8),
|
||||
)
|
||||
}
|
||||
|
||||
func (littleEndian) Uint32(b []byte) uint32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func (littleEndian) PutUint32(b []byte, v uint32) {
|
||||
_ = b[3] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v)
|
||||
b[1] = byte(v >> 8)
|
||||
b[2] = byte(v >> 16)
|
||||
b[3] = byte(v >> 24)
|
||||
}
|
||||
|
||||
func (littleEndian) AppendUint32(b []byte, v uint32) []byte {
|
||||
return append(b,
|
||||
byte(v),
|
||||
byte(v>>8),
|
||||
byte(v>>16),
|
||||
byte(v>>24),
|
||||
)
|
||||
}
|
||||
|
||||
func (littleEndian) Uint64(b []byte) uint64 {
|
||||
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
}
|
||||
|
||||
func (littleEndian) PutUint64(b []byte, v uint64) {
|
||||
_ = b[7] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v)
|
||||
b[1] = byte(v >> 8)
|
||||
b[2] = byte(v >> 16)
|
||||
b[3] = byte(v >> 24)
|
||||
b[4] = byte(v >> 32)
|
||||
b[5] = byte(v >> 40)
|
||||
b[6] = byte(v >> 48)
|
||||
b[7] = byte(v >> 56)
|
||||
}
|
||||
|
||||
func (littleEndian) AppendUint64(b []byte, v uint64) []byte {
|
||||
return append(b,
|
||||
byte(v),
|
||||
byte(v>>8),
|
||||
byte(v>>16),
|
||||
byte(v>>24),
|
||||
byte(v>>32),
|
||||
byte(v>>40),
|
||||
byte(v>>48),
|
||||
byte(v>>56),
|
||||
)
|
||||
}
|
||||
|
||||
func (littleEndian) String() string { return "LittleEndian" }
|
||||
|
||||
func (littleEndian) GoString() string { return "binary.LittleEndian" }
|
||||
|
||||
type bigEndian struct{}
|
||||
|
||||
func (bigEndian) Uint16(b []byte) uint16 {
|
||||
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint16(b[1]) | uint16(b[0])<<8
|
||||
}
|
||||
|
||||
func (bigEndian) PutUint16(b []byte, v uint16) {
|
||||
_ = b[1] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v >> 8)
|
||||
b[1] = byte(v)
|
||||
}
|
||||
|
||||
func (bigEndian) AppendUint16(b []byte, v uint16) []byte {
|
||||
return append(b,
|
||||
byte(v>>8),
|
||||
byte(v),
|
||||
)
|
||||
}
|
||||
|
||||
func (bigEndian) Uint32(b []byte) uint32 {
|
||||
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
||||
}
|
||||
|
||||
func (bigEndian) PutUint32(b []byte, v uint32) {
|
||||
_ = b[3] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v >> 24)
|
||||
b[1] = byte(v >> 16)
|
||||
b[2] = byte(v >> 8)
|
||||
b[3] = byte(v)
|
||||
}
|
||||
|
||||
func (bigEndian) AppendUint32(b []byte, v uint32) []byte {
|
||||
return append(b,
|
||||
byte(v>>24),
|
||||
byte(v>>16),
|
||||
byte(v>>8),
|
||||
byte(v),
|
||||
)
|
||||
}
|
||||
|
||||
func (bigEndian) Uint64(b []byte) uint64 {
|
||||
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
|
||||
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
|
||||
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
|
||||
}
|
||||
|
||||
func (bigEndian) PutUint64(b []byte, v uint64) {
|
||||
_ = b[7] // early bounds check to guarantee safety of writes below
|
||||
b[0] = byte(v >> 56)
|
||||
b[1] = byte(v >> 48)
|
||||
b[2] = byte(v >> 40)
|
||||
b[3] = byte(v >> 32)
|
||||
b[4] = byte(v >> 24)
|
||||
b[5] = byte(v >> 16)
|
||||
b[6] = byte(v >> 8)
|
||||
b[7] = byte(v)
|
||||
}
|
||||
|
||||
func (bigEndian) AppendUint64(b []byte, v uint64) []byte {
|
||||
return append(b,
|
||||
byte(v>>56),
|
||||
byte(v>>48),
|
||||
byte(v>>40),
|
||||
byte(v>>32),
|
||||
byte(v>>24),
|
||||
byte(v>>16),
|
||||
byte(v>>8),
|
||||
byte(v),
|
||||
)
|
||||
}
|
||||
|
||||
func (bigEndian) String() string { return "BigEndian" }
|
||||
|
||||
func (bigEndian) GoString() string { return "binary.BigEndian" }
|
||||
|
||||
func (nativeEndian) String() string { return "NativeEndian" }
|
||||
|
||||
func (nativeEndian) GoString() string { return "binary.NativeEndian" }
|
||||
|
||||
// Read reads structured binary data from r into data.
|
||||
// Data must be a pointer to a fixed-size value or a slice
|
||||
// of fixed-size values.
|
||||
// Bytes read from r are decoded using the specified byte order
|
||||
// and written to successive fields of the data.
|
||||
// When decoding boolean values, a zero byte is decoded as false, and
|
||||
// any other non-zero byte is decoded as true.
|
||||
// When reading into structs, the field data for fields with
|
||||
// blank (_) field names is skipped; i.e., blank field names
|
||||
// may be used for padding.
|
||||
// When reading into a struct, all non-blank fields must be exported
|
||||
// or Read may panic.
|
||||
//
|
||||
// The error is [io.EOF] only if no bytes were read.
|
||||
// If an [io.EOF] happens after reading some but not all the bytes,
|
||||
// Read returns [io.ErrUnexpectedEOF].
|
||||
func Read(r io.Reader, order ByteOrder, data any) error {
|
||||
// Fast path for basic types and slices.
|
||||
if n := intDataSize(data); n != 0 {
|
||||
bs := make([]byte, n)
|
||||
if _, err := io.ReadFull(r, bs); err != nil {
|
||||
return err
|
||||
}
|
||||
switch data := data.(type) {
|
||||
case *bool:
|
||||
*data = bs[0] != 0
|
||||
case *int8:
|
||||
*data = int8(bs[0])
|
||||
case *uint8:
|
||||
*data = bs[0]
|
||||
case *int16:
|
||||
*data = int16(order.Uint16(bs))
|
||||
case *uint16:
|
||||
*data = order.Uint16(bs)
|
||||
case *int32:
|
||||
*data = int32(order.Uint32(bs))
|
||||
case *uint32:
|
||||
*data = order.Uint32(bs)
|
||||
case *int64:
|
||||
*data = int64(order.Uint64(bs))
|
||||
case *uint64:
|
||||
*data = order.Uint64(bs)
|
||||
case *float32:
|
||||
*data = math.Float32frombits(order.Uint32(bs))
|
||||
case *float64:
|
||||
*data = math.Float64frombits(order.Uint64(bs))
|
||||
case []bool:
|
||||
for i, x := range bs { // Easier to loop over the input for 8-bit values.
|
||||
data[i] = x != 0
|
||||
}
|
||||
case []int8:
|
||||
for i, x := range bs {
|
||||
data[i] = int8(x)
|
||||
}
|
||||
case []uint8:
|
||||
copy(data, bs)
|
||||
case []int16:
|
||||
for i := range data {
|
||||
data[i] = int16(order.Uint16(bs[2*i:]))
|
||||
}
|
||||
case []uint16:
|
||||
for i := range data {
|
||||
data[i] = order.Uint16(bs[2*i:])
|
||||
}
|
||||
case []int32:
|
||||
for i := range data {
|
||||
data[i] = int32(order.Uint32(bs[4*i:]))
|
||||
}
|
||||
case []uint32:
|
||||
for i := range data {
|
||||
data[i] = order.Uint32(bs[4*i:])
|
||||
}
|
||||
case []int64:
|
||||
for i := range data {
|
||||
data[i] = int64(order.Uint64(bs[8*i:]))
|
||||
}
|
||||
case []uint64:
|
||||
for i := range data {
|
||||
data[i] = order.Uint64(bs[8*i:])
|
||||
}
|
||||
case []float32:
|
||||
for i := range data {
|
||||
data[i] = math.Float32frombits(order.Uint32(bs[4*i:]))
|
||||
}
|
||||
case []float64:
|
||||
for i := range data {
|
||||
data[i] = math.Float64frombits(order.Uint64(bs[8*i:]))
|
||||
}
|
||||
default:
|
||||
n = 0 // fast path doesn't apply
|
||||
}
|
||||
if n != 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to reflect-based decoding.
|
||||
v := reflect.ValueOf(data)
|
||||
size := -1
|
||||
switch v.Kind() {
|
||||
case reflect.Pointer:
|
||||
v = v.Elem()
|
||||
size = dataSize(v)
|
||||
case reflect.Slice:
|
||||
size = dataSize(v)
|
||||
}
|
||||
if size < 0 {
|
||||
return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String())
|
||||
}
|
||||
d := &decoder{order: order, buf: make([]byte, size)}
|
||||
if _, err := io.ReadFull(r, d.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
d.value(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the binary representation of data into w.
|
||||
// Data must be a fixed-size value or a slice of fixed-size
|
||||
// values, or a pointer to such data.
|
||||
// Boolean values encode as one byte: 1 for true, and 0 for false.
|
||||
// Bytes written to w are encoded using the specified byte order
|
||||
// and read from successive fields of the data.
|
||||
// When writing structs, zero values are written for fields
|
||||
// with blank (_) field names.
|
||||
func Write(w io.Writer, order ByteOrder, data any) error {
|
||||
// Fast path for basic types and slices.
|
||||
if n := intDataSize(data); n != 0 {
|
||||
bs := make([]byte, n)
|
||||
switch v := data.(type) {
|
||||
case *bool:
|
||||
if *v {
|
||||
bs[0] = 1
|
||||
} else {
|
||||
bs[0] = 0
|
||||
}
|
||||
case bool:
|
||||
if v {
|
||||
bs[0] = 1
|
||||
} else {
|
||||
bs[0] = 0
|
||||
}
|
||||
case []bool:
|
||||
for i, x := range v {
|
||||
if x {
|
||||
bs[i] = 1
|
||||
} else {
|
||||
bs[i] = 0
|
||||
}
|
||||
}
|
||||
case *int8:
|
||||
bs[0] = byte(*v)
|
||||
case int8:
|
||||
bs[0] = byte(v)
|
||||
case []int8:
|
||||
for i, x := range v {
|
||||
bs[i] = byte(x)
|
||||
}
|
||||
case *uint8:
|
||||
bs[0] = *v
|
||||
case uint8:
|
||||
bs[0] = v
|
||||
case []uint8:
|
||||
bs = v
|
||||
case *int16:
|
||||
order.PutUint16(bs, uint16(*v))
|
||||
case int16:
|
||||
order.PutUint16(bs, uint16(v))
|
||||
case []int16:
|
||||
for i, x := range v {
|
||||
order.PutUint16(bs[2*i:], uint16(x))
|
||||
}
|
||||
case *uint16:
|
||||
order.PutUint16(bs, *v)
|
||||
case uint16:
|
||||
order.PutUint16(bs, v)
|
||||
case []uint16:
|
||||
for i, x := range v {
|
||||
order.PutUint16(bs[2*i:], x)
|
||||
}
|
||||
case *int32:
|
||||
order.PutUint32(bs, uint32(*v))
|
||||
case int32:
|
||||
order.PutUint32(bs, uint32(v))
|
||||
case []int32:
|
||||
for i, x := range v {
|
||||
order.PutUint32(bs[4*i:], uint32(x))
|
||||
}
|
||||
case *uint32:
|
||||
order.PutUint32(bs, *v)
|
||||
case uint32:
|
||||
order.PutUint32(bs, v)
|
||||
case []uint32:
|
||||
for i, x := range v {
|
||||
order.PutUint32(bs[4*i:], x)
|
||||
}
|
||||
case *int64:
|
||||
order.PutUint64(bs, uint64(*v))
|
||||
case int64:
|
||||
order.PutUint64(bs, uint64(v))
|
||||
case []int64:
|
||||
for i, x := range v {
|
||||
order.PutUint64(bs[8*i:], uint64(x))
|
||||
}
|
||||
case *uint64:
|
||||
order.PutUint64(bs, *v)
|
||||
case uint64:
|
||||
order.PutUint64(bs, v)
|
||||
case []uint64:
|
||||
for i, x := range v {
|
||||
order.PutUint64(bs[8*i:], x)
|
||||
}
|
||||
case *float32:
|
||||
order.PutUint32(bs, math.Float32bits(*v))
|
||||
case float32:
|
||||
order.PutUint32(bs, math.Float32bits(v))
|
||||
case []float32:
|
||||
for i, x := range v {
|
||||
order.PutUint32(bs[4*i:], math.Float32bits(x))
|
||||
}
|
||||
case *float64:
|
||||
order.PutUint64(bs, math.Float64bits(*v))
|
||||
case float64:
|
||||
order.PutUint64(bs, math.Float64bits(v))
|
||||
case []float64:
|
||||
for i, x := range v {
|
||||
order.PutUint64(bs[8*i:], math.Float64bits(x))
|
||||
}
|
||||
}
|
||||
_, err := w.Write(bs)
|
||||
return err
|
||||
}
|
||||
|
||||
// Fallback to reflect-based encoding.
|
||||
v := reflect.Indirect(reflect.ValueOf(data))
|
||||
size := dataSize(v)
|
||||
if size < 0 {
|
||||
return errors.New("binary.Write: some values are not fixed-sized in type " + reflect.TypeOf(data).String())
|
||||
}
|
||||
buf := make([]byte, size)
|
||||
e := &encoder{order: order, buf: buf}
|
||||
e.value(v)
|
||||
_, err := w.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Size returns how many bytes [Write] would generate to encode the value v, which
|
||||
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
|
||||
// If v is neither of these, Size returns -1.
|
||||
func Size(v any) int {
|
||||
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
|
||||
}
|
||||
|
||||
var structSize sync.Map // map[reflect.Type]int
|
||||
|
||||
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
|
||||
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
|
||||
// it returns the length of the slice times the element size and does not count the memory
|
||||
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
|
||||
func dataSize(v reflect.Value) int {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
if s := sizeof(v.Type().Elem()); s >= 0 {
|
||||
return s * v.Len()
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
if size, ok := structSize.Load(t); ok {
|
||||
return size.(int)
|
||||
}
|
||||
size := sizeof(t)
|
||||
structSize.Store(t, size)
|
||||
return size
|
||||
|
||||
default:
|
||||
if v.IsValid() {
|
||||
return sizeof(v.Type())
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
|
||||
func sizeof(t reflect.Type) int {
|
||||
switch t.Kind() {
|
||||
case reflect.Array:
|
||||
if s := sizeof(t.Elem()); s >= 0 {
|
||||
return s * t.Len()
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
sum := 0
|
||||
for i, n := 0, t.NumField(); i < n; i++ {
|
||||
s := sizeof(t.Field(i).Type)
|
||||
if s < 0 {
|
||||
return -1
|
||||
}
|
||||
sum += s
|
||||
}
|
||||
return sum
|
||||
|
||||
case reflect.Bool,
|
||||
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
return int(t.Size())
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
type coder struct {
|
||||
order ByteOrder
|
||||
buf []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
type (
|
||||
decoder coder
|
||||
encoder coder
|
||||
)
|
||||
|
||||
func (d *decoder) bool() bool {
|
||||
x := d.buf[d.offset]
|
||||
d.offset++
|
||||
return x != 0
|
||||
}
|
||||
|
||||
func (e *encoder) bool(x bool) {
|
||||
if x {
|
||||
e.buf[e.offset] = 1
|
||||
} else {
|
||||
e.buf[e.offset] = 0
|
||||
}
|
||||
e.offset++
|
||||
}
|
||||
|
||||
func (d *decoder) uint8() uint8 {
|
||||
x := d.buf[d.offset]
|
||||
d.offset++
|
||||
return x
|
||||
}
|
||||
|
||||
func (e *encoder) uint8(x uint8) {
|
||||
e.buf[e.offset] = x
|
||||
e.offset++
|
||||
}
|
||||
|
||||
func (d *decoder) uint16() uint16 {
|
||||
x := d.order.Uint16(d.buf[d.offset : d.offset+2])
|
||||
d.offset += 2
|
||||
return x
|
||||
}
|
||||
|
||||
func (e *encoder) uint16(x uint16) {
|
||||
e.order.PutUint16(e.buf[e.offset:e.offset+2], x)
|
||||
e.offset += 2
|
||||
}
|
||||
|
||||
func (d *decoder) uint32() uint32 {
|
||||
x := d.order.Uint32(d.buf[d.offset : d.offset+4])
|
||||
d.offset += 4
|
||||
return x
|
||||
}
|
||||
|
||||
func (e *encoder) uint32(x uint32) {
|
||||
e.order.PutUint32(e.buf[e.offset:e.offset+4], x)
|
||||
e.offset += 4
|
||||
}
|
||||
|
||||
func (d *decoder) uint64() uint64 {
|
||||
x := d.order.Uint64(d.buf[d.offset : d.offset+8])
|
||||
d.offset += 8
|
||||
return x
|
||||
}
|
||||
|
||||
func (e *encoder) uint64(x uint64) {
|
||||
e.order.PutUint64(e.buf[e.offset:e.offset+8], x)
|
||||
e.offset += 8
|
||||
}
|
||||
|
||||
func (d *decoder) int8() int8 { return int8(d.uint8()) }
|
||||
|
||||
func (e *encoder) int8(x int8) { e.uint8(uint8(x)) }
|
||||
|
||||
func (d *decoder) int16() int16 { return int16(d.uint16()) }
|
||||
|
||||
func (e *encoder) int16(x int16) { e.uint16(uint16(x)) }
|
||||
|
||||
func (d *decoder) int32() int32 { return int32(d.uint32()) }
|
||||
|
||||
func (e *encoder) int32(x int32) { e.uint32(uint32(x)) }
|
||||
|
||||
func (d *decoder) int64() int64 { return int64(d.uint64()) }
|
||||
|
||||
func (e *encoder) int64(x int64) { e.uint64(uint64(x)) }
|
||||
|
||||
func (d *decoder) value(v reflect.Value) {
|
||||
switch v.Kind() {
|
||||
case reflect.Array:
|
||||
l := v.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
d.value(v.Index(i))
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
l := v.NumField()
|
||||
for i := 0; i < l; i++ {
|
||||
// Note: Calling v.CanSet() below is an optimization.
|
||||
// It would be sufficient to check the field name,
|
||||
// but creating the StructField info for each field is
|
||||
// costly (run "go test -bench=ReadStruct" and compare
|
||||
// results when making changes to this code).
|
||||
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
|
||||
d.value(v)
|
||||
} else {
|
||||
d.skip(v)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
l := v.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
d.value(v.Index(i))
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
v.SetBool(d.bool())
|
||||
|
||||
case reflect.Int8:
|
||||
v.SetInt(int64(d.int8()))
|
||||
case reflect.Int16:
|
||||
v.SetInt(int64(d.int16()))
|
||||
case reflect.Int32:
|
||||
v.SetInt(int64(d.int32()))
|
||||
case reflect.Int64:
|
||||
v.SetInt(d.int64())
|
||||
|
||||
case reflect.Uint8:
|
||||
v.SetUint(uint64(d.uint8()))
|
||||
case reflect.Uint16:
|
||||
v.SetUint(uint64(d.uint16()))
|
||||
case reflect.Uint32:
|
||||
v.SetUint(uint64(d.uint32()))
|
||||
case reflect.Uint64:
|
||||
v.SetUint(d.uint64())
|
||||
|
||||
case reflect.Float32:
|
||||
v.SetFloat(float64(math.Float32frombits(d.uint32())))
|
||||
case reflect.Float64:
|
||||
v.SetFloat(math.Float64frombits(d.uint64()))
|
||||
|
||||
case reflect.Complex64:
|
||||
v.SetComplex(complex(
|
||||
float64(math.Float32frombits(d.uint32())),
|
||||
float64(math.Float32frombits(d.uint32())),
|
||||
))
|
||||
case reflect.Complex128:
|
||||
v.SetComplex(complex(
|
||||
math.Float64frombits(d.uint64()),
|
||||
math.Float64frombits(d.uint64()),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) value(v reflect.Value) {
|
||||
switch v.Kind() {
|
||||
case reflect.Array:
|
||||
l := v.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
e.value(v.Index(i))
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
l := v.NumField()
|
||||
for i := 0; i < l; i++ {
|
||||
// see comment for corresponding code in decoder.value()
|
||||
if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
|
||||
e.value(v)
|
||||
} else {
|
||||
e.skip(v)
|
||||
}
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
l := v.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
e.value(v.Index(i))
|
||||
}
|
||||
|
||||
case reflect.Bool:
|
||||
e.bool(v.Bool())
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Int8:
|
||||
e.int8(int8(v.Int()))
|
||||
case reflect.Int16:
|
||||
e.int16(int16(v.Int()))
|
||||
case reflect.Int32:
|
||||
e.int32(int32(v.Int()))
|
||||
case reflect.Int64:
|
||||
e.int64(v.Int())
|
||||
}
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Uint8:
|
||||
e.uint8(uint8(v.Uint()))
|
||||
case reflect.Uint16:
|
||||
e.uint16(uint16(v.Uint()))
|
||||
case reflect.Uint32:
|
||||
e.uint32(uint32(v.Uint()))
|
||||
case reflect.Uint64:
|
||||
e.uint64(v.Uint())
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Float32:
|
||||
e.uint32(math.Float32bits(float32(v.Float())))
|
||||
case reflect.Float64:
|
||||
e.uint64(math.Float64bits(v.Float()))
|
||||
}
|
||||
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Complex64:
|
||||
x := v.Complex()
|
||||
e.uint32(math.Float32bits(float32(real(x))))
|
||||
e.uint32(math.Float32bits(float32(imag(x))))
|
||||
case reflect.Complex128:
|
||||
x := v.Complex()
|
||||
e.uint64(math.Float64bits(real(x)))
|
||||
e.uint64(math.Float64bits(imag(x)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) skip(v reflect.Value) {
|
||||
d.offset += dataSize(v)
|
||||
}
|
||||
|
||||
func (e *encoder) skip(v reflect.Value) {
|
||||
n := dataSize(v)
|
||||
zero := e.buf[e.offset : e.offset+n]
|
||||
for i := range zero {
|
||||
zero[i] = 0
|
||||
}
|
||||
e.offset += n
|
||||
}
|
||||
|
||||
// intDataSize returns the size of the data required to represent the data when encoded.
|
||||
// It returns zero if the type cannot be implemented by the fast path in Read or Write.
|
||||
func intDataSize(data any) int {
|
||||
switch data := data.(type) {
|
||||
case bool, int8, uint8, *bool, *int8, *uint8:
|
||||
return 1
|
||||
case []bool:
|
||||
return len(data)
|
||||
case []int8:
|
||||
return len(data)
|
||||
case []uint8:
|
||||
return len(data)
|
||||
case int16, uint16, *int16, *uint16:
|
||||
return 2
|
||||
case []int16:
|
||||
return 2 * len(data)
|
||||
case []uint16:
|
||||
return 2 * len(data)
|
||||
case int32, uint32, *int32, *uint32:
|
||||
return 4
|
||||
case []int32:
|
||||
return 4 * len(data)
|
||||
case []uint32:
|
||||
return 4 * len(data)
|
||||
case int64, uint64, *int64, *uint64:
|
||||
return 8
|
||||
case []int64:
|
||||
return 8 * len(data)
|
||||
case []uint64:
|
||||
return 8 * len(data)
|
||||
case float32, *float32:
|
||||
return 4
|
||||
case float64, *float64:
|
||||
return 8
|
||||
case []float32:
|
||||
return 4 * len(data)
|
||||
case []float64:
|
||||
return 8 * len(data)
|
||||
}
|
||||
return 0
|
||||
}
|
14
common/binary/native_endian_big.go
Normal file
14
common/binary/native_endian_big.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
|
||||
|
||||
package binary
|
||||
|
||||
type nativeEndian struct {
|
||||
bigEndian
|
||||
}
|
||||
|
||||
// NativeEndian is the native-endian implementation of [ByteOrder] and [AppendByteOrder].
|
||||
var NativeEndian nativeEndian
|
14
common/binary/native_endian_little.go
Normal file
14
common/binary/native_endian_little.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm
|
||||
|
||||
package binary
|
||||
|
||||
type nativeEndian struct {
|
||||
littleEndian
|
||||
}
|
||||
|
||||
// NativeEndian is the native-endian implementation of [ByteOrder] and [AppendByteOrder].
|
||||
var NativeEndian nativeEndian
|
305
common/binary/variant_data.go
Normal file
305
common/binary/variant_data.go
Normal file
|
@ -0,0 +1,305 @@
|
|||
package binary
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
||||
func ReadDataSlice(r *bufio.Reader, order ByteOrder, data ...any) error {
|
||||
for index, item := range data {
|
||||
err := ReadData(r, order, item)
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "]")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadData(r *bufio.Reader, order ByteOrder, data any) error {
|
||||
switch dataPtr := data.(type) {
|
||||
case *[]uint8:
|
||||
bytesLen, err := ReadUvarint(r)
|
||||
if err != nil {
|
||||
return E.Cause(err, "bytes length")
|
||||
}
|
||||
newBytes := make([]uint8, bytesLen)
|
||||
_, err = io.ReadFull(r, newBytes)
|
||||
if err != nil {
|
||||
return E.Cause(err, "bytes value")
|
||||
}
|
||||
*dataPtr = newBytes
|
||||
default:
|
||||
if intBaseDataSize(data) != 0 {
|
||||
return Read(r, order, data)
|
||||
}
|
||||
}
|
||||
dataValue := reflect.ValueOf(data)
|
||||
if dataValue.Kind() == reflect.Pointer {
|
||||
dataValue = dataValue.Elem()
|
||||
}
|
||||
return readData(r, order, dataValue)
|
||||
}
|
||||
|
||||
func readData(r *bufio.Reader, order ByteOrder, data reflect.Value) error {
|
||||
switch data.Kind() {
|
||||
case reflect.Pointer:
|
||||
pointerValue, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pointerValue == 0 {
|
||||
data.SetZero()
|
||||
return nil
|
||||
}
|
||||
if data.IsNil() {
|
||||
data.Set(reflect.New(data.Type().Elem()))
|
||||
}
|
||||
return readData(r, order, data.Elem())
|
||||
case reflect.String:
|
||||
stringLength, err := ReadUvarint(r)
|
||||
if err != nil {
|
||||
return E.Cause(err, "string length")
|
||||
}
|
||||
if stringLength == 0 {
|
||||
data.SetZero()
|
||||
} else {
|
||||
stringData := make([]byte, stringLength)
|
||||
_, err = io.ReadFull(r, stringData)
|
||||
if err != nil {
|
||||
return E.Cause(err, "string value")
|
||||
}
|
||||
data.SetString(string(stringData))
|
||||
}
|
||||
case reflect.Array:
|
||||
arrayLen := data.Len()
|
||||
for i := 0; i < arrayLen; i++ {
|
||||
err := readData(r, order, data.Index(i))
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", i, "]")
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
sliceLength, err := ReadUvarint(r)
|
||||
if err != nil {
|
||||
return E.Cause(err, "slice length")
|
||||
}
|
||||
if !data.IsNil() && data.Cap() >= int(sliceLength) {
|
||||
data.SetLen(int(sliceLength))
|
||||
} else if sliceLength > 0 {
|
||||
data.Set(reflect.MakeSlice(data.Type(), int(sliceLength), int(sliceLength)))
|
||||
}
|
||||
if sliceLength > 0 {
|
||||
if data.Type().Elem().Kind() == reflect.Uint8 {
|
||||
_, err = io.ReadFull(r, data.Bytes())
|
||||
if err != nil {
|
||||
return E.Cause(err, "bytes value")
|
||||
}
|
||||
} else {
|
||||
for index := 0; index < int(sliceLength); index++ {
|
||||
err = readData(r, order, data.Index(index))
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
mapLength, err := ReadUvarint(r)
|
||||
if err != nil {
|
||||
return E.Cause(err, "map length")
|
||||
}
|
||||
data.Set(reflect.MakeMap(data.Type()))
|
||||
for index := 0; index < int(mapLength); index++ {
|
||||
key := reflect.New(data.Type().Key()).Elem()
|
||||
err = readData(r, order, key)
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "].key")
|
||||
}
|
||||
value := reflect.New(data.Type().Elem()).Elem()
|
||||
err = readData(r, order, value)
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "].value")
|
||||
}
|
||||
data.SetMapIndex(key, value)
|
||||
}
|
||||
case reflect.Struct:
|
||||
fieldType := data.Type()
|
||||
fieldLen := data.NumField()
|
||||
for i := 0; i < fieldLen; i++ {
|
||||
field := data.Field(i)
|
||||
fieldName := fieldType.Field(i).Name
|
||||
if field.CanSet() || fieldName != "_" {
|
||||
err := readData(r, order, field)
|
||||
if err != nil {
|
||||
return E.Cause(err, fieldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
size := dataSize(data)
|
||||
if size < 0 {
|
||||
return errors.New("invalid type " + reflect.TypeOf(data).String())
|
||||
}
|
||||
d := &decoder{order: order, buf: make([]byte, size)}
|
||||
_, err := io.ReadFull(r, d.buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.value(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteDataSlice(writer *bufio.Writer, order ByteOrder, data ...any) error {
|
||||
for index, item := range data {
|
||||
err := WriteData(writer, order, item)
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "]")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteData(writer *bufio.Writer, order ByteOrder, data any) error {
|
||||
switch dataPtr := data.(type) {
|
||||
case []uint8:
|
||||
_, err := writer.Write(AppendUvarint(writer.AvailableBuffer(), uint64(len(dataPtr))))
|
||||
if err != nil {
|
||||
return E.Cause(err, "bytes length")
|
||||
}
|
||||
_, err = writer.Write(dataPtr)
|
||||
if err != nil {
|
||||
return E.Cause(err, "bytes value")
|
||||
}
|
||||
default:
|
||||
if intBaseDataSize(data) != 0 {
|
||||
return Write(writer, order, data)
|
||||
}
|
||||
}
|
||||
return writeData(writer, order, reflect.Indirect(reflect.ValueOf(data)))
|
||||
}
|
||||
|
||||
func writeData(writer *bufio.Writer, order ByteOrder, data reflect.Value) error {
|
||||
switch data.Kind() {
|
||||
case reflect.Pointer:
|
||||
if data.IsNil() {
|
||||
err := writer.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := writer.WriteByte(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeData(writer, order, data.Elem())
|
||||
}
|
||||
case reflect.String:
|
||||
stringValue := data.String()
|
||||
_, err := writer.Write(AppendUvarint(writer.AvailableBuffer(), uint64(len(stringValue))))
|
||||
if err != nil {
|
||||
return E.Cause(err, "string length")
|
||||
}
|
||||
if stringValue != "" {
|
||||
_, err = writer.WriteString(stringValue)
|
||||
if err != nil {
|
||||
return E.Cause(err, "string value")
|
||||
}
|
||||
}
|
||||
case reflect.Array:
|
||||
dataLen := data.Len()
|
||||
for i := 0; i < dataLen; i++ {
|
||||
err := writeData(writer, order, data.Index(i))
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", i, "]")
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
dataLen := data.Len()
|
||||
_, err := writer.Write(AppendUvarint(writer.AvailableBuffer(), uint64(dataLen)))
|
||||
if err != nil {
|
||||
return E.Cause(err, "slice length")
|
||||
}
|
||||
if dataLen > 0 {
|
||||
if data.Type().Elem().Kind() == reflect.Uint8 {
|
||||
_, err = writer.Write(data.Bytes())
|
||||
if err != nil {
|
||||
return E.Cause(err, "bytes value")
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < dataLen; i++ {
|
||||
err = writeData(writer, order, data.Index(i))
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", i, "]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
dataLen := data.Len()
|
||||
_, err := writer.Write(AppendUvarint(writer.AvailableBuffer(), uint64(dataLen)))
|
||||
if err != nil {
|
||||
return E.Cause(err, "map length")
|
||||
}
|
||||
if dataLen > 0 {
|
||||
for index, key := range data.MapKeys() {
|
||||
err = writeData(writer, order, key)
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "].key")
|
||||
}
|
||||
err = writeData(writer, order, data.MapIndex(key))
|
||||
if err != nil {
|
||||
return E.Cause(err, "[", index, "].value")
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
fieldType := data.Type()
|
||||
fieldLen := data.NumField()
|
||||
for i := 0; i < fieldLen; i++ {
|
||||
field := data.Field(i)
|
||||
fieldName := fieldType.Field(i).Name
|
||||
if field.CanSet() || fieldName != "_" {
|
||||
err := writeData(writer, order, field)
|
||||
if err != nil {
|
||||
return E.Cause(err, fieldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
size := dataSize(data)
|
||||
if size < 0 {
|
||||
return errors.New("binary.Write: some values are not fixed-sized in type " + data.Type().String())
|
||||
}
|
||||
buf := make([]byte, size)
|
||||
e := &encoder{order: order, buf: buf}
|
||||
e.value(data)
|
||||
_, err := writer.Write(buf)
|
||||
if err != nil {
|
||||
return E.Cause(err, reflect.TypeOf(data).String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func intBaseDataSize(data any) int {
|
||||
switch data.(type) {
|
||||
case bool, int8, uint8:
|
||||
return 1
|
||||
case int16, uint16:
|
||||
return 2
|
||||
case int32, uint32:
|
||||
return 4
|
||||
case int64, uint64:
|
||||
return 8
|
||||
case float32:
|
||||
return 4
|
||||
case float64:
|
||||
return 8
|
||||
}
|
||||
return 0
|
||||
}
|
166
common/binary/varint.go
Normal file
166
common/binary/varint.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package binary
|
||||
|
||||
// This file implements "varint" encoding of 64-bit integers.
|
||||
// The encoding is:
|
||||
// - unsigned integers are serialized 7 bits at a time, starting with the
|
||||
// least significant bits
|
||||
// - the most significant bit (msb) in each output byte indicates if there
|
||||
// is a continuation byte (msb = 1)
|
||||
// - signed integers are mapped to unsigned integers using "zig-zag"
|
||||
// encoding: Positive values x are written as 2*x + 0, negative values
|
||||
// are written as 2*(^x) + 1; that is, negative numbers are complemented
|
||||
// and whether to complement is encoded in bit 0.
|
||||
//
|
||||
// Design note:
|
||||
// At most 10 bytes are needed for 64-bit values. The encoding could
|
||||
// be more dense: a full 64-bit value needs an extra byte just to hold bit 63.
|
||||
// Instead, the msb of the previous byte could be used to hold bit 63 since we
|
||||
// know there can't be more than 64 bits. This is a trivial improvement and
|
||||
// would reduce the maximum encoding length to 9 bytes. However, it breaks the
|
||||
// invariant that the msb is always the "continuation bit" and thus makes the
|
||||
// format incompatible with a varint encoding for larger numbers (say 128-bit).
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// MaxVarintLenN is the maximum length of a varint-encoded N-bit integer.
|
||||
const (
|
||||
MaxVarintLen16 = 3
|
||||
MaxVarintLen32 = 5
|
||||
MaxVarintLen64 = 10
|
||||
)
|
||||
|
||||
// AppendUvarint appends the varint-encoded form of x,
|
||||
// as generated by [PutUvarint], to buf and returns the extended buffer.
|
||||
func AppendUvarint(buf []byte, x uint64) []byte {
|
||||
for x >= 0x80 {
|
||||
buf = append(buf, byte(x)|0x80)
|
||||
x >>= 7
|
||||
}
|
||||
return append(buf, byte(x))
|
||||
}
|
||||
|
||||
// PutUvarint encodes a uint64 into buf and returns the number of bytes written.
|
||||
// If the buffer is too small, PutUvarint will panic.
|
||||
func PutUvarint(buf []byte, x uint64) int {
|
||||
i := 0
|
||||
for x >= 0x80 {
|
||||
buf[i] = byte(x) | 0x80
|
||||
x >>= 7
|
||||
i++
|
||||
}
|
||||
buf[i] = byte(x)
|
||||
return i + 1
|
||||
}
|
||||
|
||||
// Uvarint decodes a uint64 from buf and returns that value and the
|
||||
// number of bytes read (> 0). If an error occurred, the value is 0
|
||||
// and the number of bytes n is <= 0 meaning:
|
||||
//
|
||||
// n == 0: buf too small
|
||||
// n < 0: value larger than 64 bits (overflow)
|
||||
// and -n is the number of bytes read
|
||||
func Uvarint(buf []byte) (uint64, int) {
|
||||
var x uint64
|
||||
var s uint
|
||||
for i, b := range buf {
|
||||
if i == MaxVarintLen64 {
|
||||
// Catch byte reads past MaxVarintLen64.
|
||||
// See issue https://golang.org/issues/41185
|
||||
return 0, -(i + 1) // overflow
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i == MaxVarintLen64-1 && b > 1 {
|
||||
return 0, -(i + 1) // overflow
|
||||
}
|
||||
return x | uint64(b)<<s, i + 1
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// AppendVarint appends the varint-encoded form of x,
|
||||
// as generated by [PutVarint], to buf and returns the extended buffer.
|
||||
func AppendVarint(buf []byte, x int64) []byte {
|
||||
ux := uint64(x) << 1
|
||||
if x < 0 {
|
||||
ux = ^ux
|
||||
}
|
||||
return AppendUvarint(buf, ux)
|
||||
}
|
||||
|
||||
// PutVarint encodes an int64 into buf and returns the number of bytes written.
|
||||
// If the buffer is too small, PutVarint will panic.
|
||||
func PutVarint(buf []byte, x int64) int {
|
||||
ux := uint64(x) << 1
|
||||
if x < 0 {
|
||||
ux = ^ux
|
||||
}
|
||||
return PutUvarint(buf, ux)
|
||||
}
|
||||
|
||||
// Varint decodes an int64 from buf and returns that value and the
|
||||
// number of bytes read (> 0). If an error occurred, the value is 0
|
||||
// and the number of bytes n is <= 0 with the following meaning:
|
||||
//
|
||||
// n == 0: buf too small
|
||||
// n < 0: value larger than 64 bits (overflow)
|
||||
// and -n is the number of bytes read
|
||||
func Varint(buf []byte) (int64, int) {
|
||||
ux, n := Uvarint(buf) // ok to continue in presence of error
|
||||
x := int64(ux >> 1)
|
||||
if ux&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x, n
|
||||
}
|
||||
|
||||
var errOverflow = errors.New("binary: varint overflows a 64-bit integer")
|
||||
|
||||
// ReadUvarint reads an encoded unsigned integer from r and returns it as a uint64.
|
||||
// The error is [io.EOF] only if no bytes were read.
|
||||
// If an [io.EOF] happens after reading some but not all the bytes,
|
||||
// ReadUvarint returns [io.ErrUnexpectedEOF].
|
||||
func ReadUvarint(r io.ByteReader) (uint64, error) {
|
||||
var x uint64
|
||||
var s uint
|
||||
for i := 0; i < MaxVarintLen64; i++ {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
if i > 0 && err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return x, err
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i == MaxVarintLen64-1 && b > 1 {
|
||||
return x, errOverflow
|
||||
}
|
||||
return x | uint64(b)<<s, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
return x, errOverflow
|
||||
}
|
||||
|
||||
// ReadVarint reads an encoded signed integer from r and returns it as an int64.
|
||||
// The error is [io.EOF] only if no bytes were read.
|
||||
// If an [io.EOF] happens after reading some but not all the bytes,
|
||||
// ReadVarint returns [io.ErrUnexpectedEOF].
|
||||
func ReadVarint(r io.ByteReader) (int64, error) {
|
||||
ux, err := ReadUvarint(r) // ok to continue in presence of error
|
||||
x := int64(ux >> 1)
|
||||
if ux&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x, err
|
||||
}
|
|
@ -1,18 +1,21 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
type InterfaceFinder interface {
|
||||
Interfaces() []Interface
|
||||
InterfaceIndexByName(name string) (int, error)
|
||||
InterfaceNameByIndex(index int) (string, error)
|
||||
InterfaceByAddr(addr netip.Addr) (*Interface, error)
|
||||
}
|
||||
|
||||
type Interface struct {
|
||||
Index int
|
||||
MTU int
|
||||
Name string
|
||||
Addresses []netip.Prefix
|
||||
Index int
|
||||
MTU int
|
||||
Name string
|
||||
Addresses []netip.Prefix
|
||||
HardwareAddr net.HardwareAddr
|
||||
}
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
M "github.com/sagernet/sing/common/metadata"
|
||||
)
|
||||
|
||||
var _ InterfaceFinder = (*DefaultInterfaceFinder)(nil)
|
||||
|
||||
type DefaultInterfaceFinder struct {
|
||||
interfaces []Interface
|
||||
}
|
||||
|
@ -29,10 +31,11 @@ func (f *DefaultInterfaceFinder) Update() error {
|
|||
return err
|
||||
}
|
||||
interfaces = append(interfaces, Interface{
|
||||
Index: netIf.Index,
|
||||
MTU: netIf.MTU,
|
||||
Name: netIf.Name,
|
||||
Addresses: common.Map(ifAddrs, M.PrefixFromNet),
|
||||
Index: netIf.Index,
|
||||
MTU: netIf.MTU,
|
||||
Name: netIf.Name,
|
||||
Addresses: common.Map(ifAddrs, M.PrefixFromNet),
|
||||
HardwareAddr: netIf.HardwareAddr,
|
||||
})
|
||||
}
|
||||
f.interfaces = interfaces
|
||||
|
@ -43,6 +46,10 @@ func (f *DefaultInterfaceFinder) UpdateInterfaces(interfaces []Interface) {
|
|||
f.interfaces = interfaces
|
||||
}
|
||||
|
||||
func (f *DefaultInterfaceFinder) Interfaces() []Interface {
|
||||
return f.interfaces
|
||||
}
|
||||
|
||||
func (f *DefaultInterfaceFinder) InterfaceIndexByName(name string) (int, error) {
|
||||
for _, netInterface := range f.interfaces {
|
||||
if netInterface.Name == name {
|
||||
|
|
|
@ -3,6 +3,7 @@ package control
|
|||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/sagernet/sing/common"
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
)
|
||||
|
||||
|
@ -30,6 +31,14 @@ func Conn(conn syscall.Conn, block func(fd uintptr) error) error {
|
|||
return Raw(rawConn, block)
|
||||
}
|
||||
|
||||
func Conn0[T any](conn syscall.Conn, block func(fd uintptr) (T, error)) (T, error) {
|
||||
rawConn, err := conn.SyscallConn()
|
||||
if err != nil {
|
||||
return common.DefaultValue[T](), err
|
||||
}
|
||||
return Raw0[T](rawConn, block)
|
||||
}
|
||||
|
||||
func Raw(rawConn syscall.RawConn, block func(fd uintptr) error) error {
|
||||
var innerErr error
|
||||
err := rawConn.Control(func(fd uintptr) {
|
||||
|
@ -37,3 +46,14 @@ func Raw(rawConn syscall.RawConn, block func(fd uintptr) error) error {
|
|||
})
|
||||
return E.Errors(innerErr, err)
|
||||
}
|
||||
|
||||
func Raw0[T any](rawConn syscall.RawConn, block func(fd uintptr) (T, error)) (T, error) {
|
||||
var (
|
||||
value T
|
||||
innerErr error
|
||||
)
|
||||
err := rawConn.Control(func(fd uintptr) {
|
||||
value, innerErr = block(fd)
|
||||
})
|
||||
return value, E.Errors(innerErr, err)
|
||||
}
|
||||
|
|
58
common/control/redirect_darwin.go
Normal file
58
common/control/redirect_darwin.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"net/netip"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
M "github.com/sagernet/sing/common/metadata"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
PF_OUT = 0x2
|
||||
DIOCNATLOOK = 0xc0544417
|
||||
)
|
||||
|
||||
func GetOriginalDestination(conn net.Conn) (netip.AddrPort, error) {
|
||||
pfFd, err := syscall.Open("/dev/pf", 0, syscall.O_RDONLY)
|
||||
if err != nil {
|
||||
return netip.AddrPort{}, err
|
||||
}
|
||||
defer syscall.Close(pfFd)
|
||||
nl := struct {
|
||||
saddr, daddr, rsaddr, rdaddr [16]byte
|
||||
sxport, dxport, rsxport, rdxport [4]byte
|
||||
af, proto, protoVariant, direction uint8
|
||||
}{
|
||||
af: syscall.AF_INET,
|
||||
proto: syscall.IPPROTO_TCP,
|
||||
direction: PF_OUT,
|
||||
}
|
||||
localAddr := M.SocksaddrFromNet(conn.LocalAddr()).Unwrap()
|
||||
removeAddr := M.SocksaddrFromNet(conn.RemoteAddr()).Unwrap()
|
||||
if localAddr.IsIPv4() {
|
||||
copy(nl.saddr[:net.IPv4len], removeAddr.Addr.AsSlice())
|
||||
copy(nl.daddr[:net.IPv4len], localAddr.Addr.AsSlice())
|
||||
nl.af = syscall.AF_INET
|
||||
} else {
|
||||
copy(nl.saddr[:], removeAddr.Addr.AsSlice())
|
||||
copy(nl.daddr[:], localAddr.Addr.AsSlice())
|
||||
nl.af = syscall.AF_INET6
|
||||
}
|
||||
binary.BigEndian.PutUint16(nl.sxport[:], removeAddr.Port)
|
||||
binary.BigEndian.PutUint16(nl.dxport[:], localAddr.Port)
|
||||
if _, _, errno := unix.Syscall(syscall.SYS_IOCTL, uintptr(pfFd), DIOCNATLOOK, uintptr(unsafe.Pointer(&nl))); errno != 0 {
|
||||
return netip.AddrPort{}, errno
|
||||
}
|
||||
var address netip.Addr
|
||||
if nl.af == unix.AF_INET {
|
||||
address = M.AddrFromIP(nl.rdaddr[:net.IPv4len])
|
||||
} else {
|
||||
address = netip.AddrFrom16(nl.rdaddr)
|
||||
}
|
||||
return netip.AddrPortFrom(address, binary.BigEndian.Uint16(nl.rdxport[:])), nil
|
||||
}
|
38
common/control/redirect_linux.go
Normal file
38
common/control/redirect_linux.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/sagernet/sing/common"
|
||||
M "github.com/sagernet/sing/common/metadata"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func GetOriginalDestination(conn net.Conn) (netip.AddrPort, error) {
|
||||
syscallConn, loaded := common.Cast[syscall.Conn](conn)
|
||||
if !loaded {
|
||||
return netip.AddrPort{}, os.ErrInvalid
|
||||
}
|
||||
return Conn0[netip.AddrPort](syscallConn, func(fd uintptr) (netip.AddrPort, error) {
|
||||
if M.SocksaddrFromNet(conn.RemoteAddr()).Unwrap().IsIPv4() {
|
||||
raw, err := unix.GetsockoptIPv6Mreq(int(fd), unix.IPPROTO_IP, unix.SO_ORIGINAL_DST)
|
||||
if err != nil {
|
||||
return netip.AddrPort{}, err
|
||||
}
|
||||
return netip.AddrPortFrom(M.AddrFromIP(raw.Multiaddr[4:8]), uint16(raw.Multiaddr[2])<<8+uint16(raw.Multiaddr[3])), nil
|
||||
} else {
|
||||
raw, err := unix.GetsockoptIPv6MTUInfo(int(fd), unix.IPPROTO_IPV6, unix.SO_ORIGINAL_DST)
|
||||
if err != nil {
|
||||
return netip.AddrPort{}, err
|
||||
}
|
||||
var port [2]byte
|
||||
binary.BigEndian.PutUint16(port[:], raw.Addr.Port)
|
||||
return netip.AddrPortFrom(M.AddrFromIP(raw.Addr.Addr[:]), binary.LittleEndian.Uint16(port[:])), nil
|
||||
}
|
||||
})
|
||||
}
|
13
common/control/redirect_other.go
Normal file
13
common/control/redirect_other.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
//go:build !linux && !darwin
|
||||
|
||||
package control
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
)
|
||||
|
||||
func GetOriginalDestination(conn net.Conn) (netip.AddrPort, error) {
|
||||
return netip.AddrPort{}, os.ErrInvalid
|
||||
}
|
56
common/control/tproxy_linux.go
Normal file
56
common/control/tproxy_linux.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net/netip"
|
||||
"syscall"
|
||||
|
||||
E "github.com/sagernet/sing/common/exceptions"
|
||||
M "github.com/sagernet/sing/common/metadata"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TProxy(fd uintptr, family int) error {
|
||||
err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
|
||||
if err == nil {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IP, syscall.IP_TRANSPARENT, 1)
|
||||
}
|
||||
if err == nil && family == unix.AF_INET6 {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IPV6, unix.IPV6_TRANSPARENT, 1)
|
||||
}
|
||||
if err == nil {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IP, syscall.IP_RECVORIGDSTADDR, 1)
|
||||
}
|
||||
if err == nil && family == unix.AF_INET6 {
|
||||
err = syscall.SetsockoptInt(int(fd), syscall.SOL_IPV6, unix.IPV6_RECVORIGDSTADDR, 1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func TProxyWriteBack() Func {
|
||||
return func(network, address string, conn syscall.RawConn) error {
|
||||
return Raw(conn, func(fd uintptr) error {
|
||||
if M.ParseSocksaddr(address).Addr.Is6() {
|
||||
return syscall.SetsockoptInt(int(fd), syscall.SOL_IPV6, unix.IPV6_TRANSPARENT, 1)
|
||||
} else {
|
||||
return syscall.SetsockoptInt(int(fd), syscall.SOL_IP, syscall.IP_TRANSPARENT, 1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func GetOriginalDestinationFromOOB(oob []byte) (netip.AddrPort, error) {
|
||||
controlMessages, err := unix.ParseSocketControlMessage(oob)
|
||||
if err != nil {
|
||||
return netip.AddrPort{}, err
|
||||
}
|
||||
for _, message := range controlMessages {
|
||||
if message.Header.Level == unix.SOL_IP && message.Header.Type == unix.IP_RECVORIGDSTADDR {
|
||||
return netip.AddrPortFrom(M.AddrFromIP(message.Data[4:8]), binary.BigEndian.Uint16(message.Data[2:4])), nil
|
||||
} else if message.Header.Level == unix.SOL_IPV6 && message.Header.Type == unix.IPV6_RECVORIGDSTADDR {
|
||||
return netip.AddrPortFrom(M.AddrFromIP(message.Data[8:24]), binary.BigEndian.Uint16(message.Data[2:4])), nil
|
||||
}
|
||||
}
|
||||
return netip.AddrPort{}, E.New("not found")
|
||||
}
|
20
common/control/tproxy_other.go
Normal file
20
common/control/tproxy_other.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
//go:build !linux
|
||||
|
||||
package control
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"os"
|
||||
)
|
||||
|
||||
func TProxy(fd uintptr, isIPv6 bool) error {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
func TProxyWriteBack() Func {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetOriginalDestinationFromOOB(oob []byte) (netip.AddrPort, error) {
|
||||
return netip.AddrPort{}, os.ErrInvalid
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
//go:build go1.21 && !without_contextjson
|
||||
//go:build go1.20 && !without_contextjson
|
||||
|
||||
package json
|
||||
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
//go:build !go1.21 && go1.20 && !without_contextjson
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"github.com/sagernet/sing/common/json/internal/contextjson_120"
|
||||
)
|
||||
|
||||
var (
|
||||
Marshal = json.Marshal
|
||||
Unmarshal = json.Unmarshal
|
||||
NewEncoder = json.NewEncoder
|
||||
NewDecoder = json.NewDecoder
|
||||
)
|
||||
|
||||
type (
|
||||
Encoder = json.Encoder
|
||||
Decoder = json.Decoder
|
||||
Token = json.Token
|
||||
Delim = json.Delim
|
||||
SyntaxError = json.SyntaxError
|
||||
RawMessage = json.RawMessage
|
||||
)
|
|
@ -442,7 +442,7 @@ func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
b, err := m.MarshalJSON()
|
||||
if err == nil {
|
||||
e.Grow(len(b))
|
||||
out := e.AvailableBuffer()
|
||||
out := availableBuffer(&e.Buffer)
|
||||
out, err = appendCompact(out, b, opts.escapeHTML)
|
||||
e.Buffer.Write(out)
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
b, err := m.MarshalJSON()
|
||||
if err == nil {
|
||||
e.Grow(len(b))
|
||||
out := e.AvailableBuffer()
|
||||
out := availableBuffer(&e.Buffer)
|
||||
out, err = appendCompact(out, b, opts.escapeHTML)
|
||||
e.Buffer.Write(out)
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
||||
}
|
||||
e.Write(appendString(e.AvailableBuffer(), b, opts.escapeHTML))
|
||||
e.Write(appendString(availableBuffer(&e.Buffer), b, opts.escapeHTML))
|
||||
}
|
||||
|
||||
func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
|
@ -498,11 +498,11 @@ func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
||||
}
|
||||
e.Write(appendString(e.AvailableBuffer(), b, opts.escapeHTML))
|
||||
e.Write(appendString(availableBuffer(&e.Buffer), b, opts.escapeHTML))
|
||||
}
|
||||
|
||||
func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := e.AvailableBuffer()
|
||||
b := availableBuffer(&e.Buffer)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendBool(b, v.Bool())
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
|
@ -510,7 +510,7 @@ func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
}
|
||||
|
||||
func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := e.AvailableBuffer()
|
||||
b := availableBuffer(&e.Buffer)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendInt(b, v.Int(), 10)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
|
@ -518,7 +518,7 @@ func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
}
|
||||
|
||||
func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := e.AvailableBuffer()
|
||||
b := availableBuffer(&e.Buffer)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendUint(b, v.Uint(), 10)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
|
@ -538,7 +538,7 @@ func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
// See golang.org/issue/6384 and golang.org/issue/14135.
|
||||
// Like fmt %g, but the exponent cutoffs are different
|
||||
// and exponents themselves are not padded to two digits.
|
||||
b := e.AvailableBuffer()
|
||||
b := availableBuffer(&e.Buffer)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
abs := math.Abs(f)
|
||||
fmt := byte('f')
|
||||
|
@ -577,7 +577,7 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
if !isValidNumber(numStr) {
|
||||
e.error(fmt.Errorf("json: invalid number literal %q", numStr))
|
||||
}
|
||||
b := e.AvailableBuffer()
|
||||
b := availableBuffer(&e.Buffer)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = append(b, numStr...)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
|
@ -586,9 +586,9 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
}
|
||||
if opts.quoted {
|
||||
b := appendString(nil, v.String(), opts.escapeHTML)
|
||||
e.Write(appendString(e.AvailableBuffer(), b, false)) // no need to escape again since it is already escaped
|
||||
e.Write(appendString(availableBuffer(&e.Buffer), b, false)) // no need to escape again since it is already escaped
|
||||
} else {
|
||||
e.Write(appendString(e.AvailableBuffer(), v.String(), opts.escapeHTML))
|
||||
e.Write(appendString(availableBuffer(&e.Buffer), v.String(), opts.escapeHTML))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -754,7 +754,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
if i > 0 {
|
||||
e.WriteByte(',')
|
||||
}
|
||||
e.Write(appendString(e.AvailableBuffer(), kv.ks, opts.escapeHTML))
|
||||
e.Write(appendString(availableBuffer(&e.Buffer), kv.ks, opts.escapeHTML))
|
||||
e.WriteByte(':')
|
||||
me.elemEnc(e, kv.v, opts)
|
||||
}
|
||||
|
@ -786,7 +786,7 @@ func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
|
|||
e.Grow(len(`"`) + encodedLen + len(`"`))
|
||||
|
||||
// TODO(https://go.dev/issue/53693): Use base64.Encoding.AppendEncode.
|
||||
b := e.AvailableBuffer()
|
||||
b := availableBuffer(&e.Buffer)
|
||||
b = append(b, '"')
|
||||
base64.StdEncoding.Encode(b[len(b):][:encodedLen], s)
|
||||
b = b[:len(b)+encodedLen]
|
||||
|
|
|
@ -6,6 +6,11 @@ package json
|
|||
|
||||
import "bytes"
|
||||
|
||||
// TODO(https://go.dev/issue/53685): Use bytes.Buffer.AvailableBuffer instead.
|
||||
func availableBuffer(b *bytes.Buffer) []byte {
|
||||
return b.Bytes()[b.Len():]
|
||||
}
|
||||
|
||||
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
|
||||
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
|
||||
// so that the JSON will be safe to embed inside HTML <script> tags.
|
||||
|
@ -13,7 +18,7 @@ import "bytes"
|
|||
// escaping within <script> tags, so an alternative JSON encoding must be used.
|
||||
func HTMLEscape(dst *bytes.Buffer, src []byte) {
|
||||
dst.Grow(len(src))
|
||||
dst.Write(appendHTMLEscape(dst.AvailableBuffer(), src))
|
||||
dst.Write(appendHTMLEscape(availableBuffer(dst), src))
|
||||
}
|
||||
|
||||
func appendHTMLEscape(dst, src []byte) []byte {
|
||||
|
@ -40,7 +45,7 @@ func appendHTMLEscape(dst, src []byte) []byte {
|
|||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
dst.Grow(len(src))
|
||||
b := dst.AvailableBuffer()
|
||||
b := availableBuffer(dst)
|
||||
b, err := appendCompact(b, src, false)
|
||||
dst.Write(b)
|
||||
return err
|
||||
|
@ -109,7 +114,7 @@ const indentGrowthFactor = 2
|
|||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
dst.Grow(indentGrowthFactor * len(src))
|
||||
b := dst.AvailableBuffer()
|
||||
b := availableBuffer(dst)
|
||||
b, err := appendIndent(b, src, prefix, indent)
|
||||
dst.Write(b)
|
||||
return err
|
||||
|
|
|
@ -300,7 +300,7 @@ func stateEndValue(s *scanner, c byte) int {
|
|||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
s.step = stateBeginStringOrEmpty
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
|
@ -310,7 +310,7 @@ func stateEndValue(s *scanner, c byte) int {
|
|||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
s.step = stateBeginValueOrEmpty
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
|
|
|
@ -153,6 +153,10 @@ func (dec *Decoder) refill() error {
|
|||
dec.scanp = 0
|
||||
}
|
||||
|
||||
return dec.refill0()
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill0() error {
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
|
@ -402,7 +406,7 @@ func (dec *Decoder) Token() (Token, error) {
|
|||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma && dec.tokenState != tokenObjectKey {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
|
@ -410,7 +414,6 @@ func (dec *Decoder) Token() (Token, error) {
|
|||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
|
@ -483,7 +486,26 @@ func (dec *Decoder) tokenError(c byte) (Token, error) {
|
|||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
// return err == nil && c != ']' && c != '}'
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if c == ']' || c == '}' {
|
||||
return false
|
||||
}
|
||||
if c == ',' {
|
||||
scanp := dec.scanp
|
||||
dec.scanp++
|
||||
c, err = dec.peekNoRefill()
|
||||
dec.scanp = scanp
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if c == ']' || c == '}' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
|
@ -505,6 +527,25 @@ func (dec *Decoder) peek() (byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) peekNoRefill() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(c) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill0()
|
||||
}
|
||||
}
|
||||
|
||||
// InputOffset returns the input stream byte offset of the current decoder position.
|
||||
// The offset gives the location of the end of the most recently returned token
|
||||
// and the beginning of the next token.
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# contextjson
|
||||
|
||||
mod from go1.20.11
|
File diff suppressed because it is too large
Load diff
|
@ -1,49 +0,0 @@
|
|||
package json
|
||||
|
||||
import "strconv"
|
||||
|
||||
type decodeContext struct {
|
||||
parent *decodeContext
|
||||
index int
|
||||
key string
|
||||
}
|
||||
|
||||
func (d *decodeState) formatContext() string {
|
||||
var description string
|
||||
context := d.context
|
||||
var appendDot bool
|
||||
for context != nil {
|
||||
if appendDot {
|
||||
description = "." + description
|
||||
}
|
||||
if context.key != "" {
|
||||
description = context.key + description
|
||||
appendDot = true
|
||||
} else {
|
||||
description = "[" + strconv.Itoa(context.index) + "]" + description
|
||||
appendDot = false
|
||||
}
|
||||
context = context.parent
|
||||
}
|
||||
return description
|
||||
}
|
||||
|
||||
type contextError struct {
|
||||
parent error
|
||||
context string
|
||||
index bool
|
||||
}
|
||||
|
||||
func (c *contextError) Unwrap() error {
|
||||
return c.parent
|
||||
}
|
||||
|
||||
func (c *contextError) Error() string {
|
||||
//goland:noinspection GoTypeAssertionOnErrors
|
||||
switch c.parent.(type) {
|
||||
case *contextError:
|
||||
return c.context + "." + c.parent.Error()
|
||||
default:
|
||||
return c.context + ": " + c.parent.Error()
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,141 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// - S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// - k maps to K and to U+212A 'K' Kelvin sign
|
||||
//
|
||||
// See https://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
return len(t) == 0
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gofuzz
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func Fuzz(data []byte) (score int) {
|
||||
for _, ctor := range []func() any{
|
||||
func() any { return new(any) },
|
||||
func() any { return new(map[string]any) },
|
||||
func() any { return new([]any) },
|
||||
} {
|
||||
v := ctor()
|
||||
err := Unmarshal(data, v)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
score = 1
|
||||
|
||||
m, err := Marshal(v)
|
||||
if err != nil {
|
||||
fmt.Printf("v=%#v\n", v)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
u := ctor()
|
||||
err = Unmarshal(m, u)
|
||||
if err != nil {
|
||||
fmt.Printf("v=%#v\n", v)
|
||||
fmt.Printf("m=%s\n", m)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(scan, c)
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||
// Although leading space characters (space, tab, carriage return, newline)
|
||||
// at the beginning of src are dropped, trailing space characters
|
||||
// at the end of src are preserved and copied to dst.
|
||||
// For example, if src has no trailing spaces, neither will dst;
|
||||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(scan, c)
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,610 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Valid reports whether data is a valid JSON encoding.
|
||||
func Valid(data []byte) bool {
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
return checkValid(data, scan) == nil
|
||||
}
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
// checkValid returns nil or a SyntaxError.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, c) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, byte) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode (and deliberately
|
||||
// not set to zero by scan.reset)
|
||||
bytes int64
|
||||
}
|
||||
|
||||
var scannerPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &scanner{}
|
||||
},
|
||||
}
|
||||
|
||||
func newScanner() *scanner {
|
||||
scan := scannerPool.Get().(*scanner)
|
||||
// scan.reset by design doesn't set bytes to zero
|
||||
scan.bytes = 0
|
||||
scan.reset()
|
||||
return scan
|
||||
}
|
||||
|
||||
func freeScanner(scan *scanner) {
|
||||
// Avoid hanging on to too much memory in extreme cases.
|
||||
if len(scan.parseState) > 1024 {
|
||||
scan.parseState = nil
|
||||
}
|
||||
scannerPool.Put(scan)
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// This limits the max nesting depth to prevent stack overflow.
|
||||
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
|
||||
const maxNestingDepth = 10000
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
|
||||
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
|
||||
s.parseState = append(s.parseState, newParseState)
|
||||
if len(s.parseState) <= maxNestingDepth {
|
||||
return successState
|
||||
}
|
||||
return s.error(c, "exceeded max depth")
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
return s.pushParseState(c, parseObjectKey, scanBeginObject)
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
return s.pushParseState(c, parseArrayValue, scanBeginArray)
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c byte) int {
|
||||
if isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c byte) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if isSpace(c) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c byte) int {
|
||||
if !isSpace(c) {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c byte) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c byte) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
case 'u':
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c byte) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c byte) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c byte) int {
|
||||
if c == '+' || c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c byte) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c byte) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c byte) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c byte) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c byte, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal.
|
||||
func quoteChar(c byte) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
|
@ -1,517 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON values from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scanp int // start of unread data in buf
|
||||
scanned int64 // amount of data already scanned
|
||||
scan scanner
|
||||
err error
|
||||
|
||||
tokenState int
|
||||
tokenStack []int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v any) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dec.tokenValueAllowed() {
|
||||
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
|
||||
}
|
||||
|
||||
// Read whole value into buffer.
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||
dec.scanp += n
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// fixup token streaming state
|
||||
dec.tokenValueEnd()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := dec.scanp
|
||||
var err error
|
||||
Input:
|
||||
// help the compiler see that scanp is never negative, so it can remove
|
||||
// some bounds checks below.
|
||||
for scanp >= 0 {
|
||||
|
||||
// Look in the buffer for a new value.
|
||||
for ; scanp < len(dec.buf); scanp++ {
|
||||
c := dec.buf[scanp]
|
||||
dec.scan.bytes++
|
||||
switch dec.scan.step(&dec.scan, c) {
|
||||
case scanEnd:
|
||||
// scanEnd is delayed one byte so we decrement
|
||||
// the scanner bytes count by 1 to ensure that
|
||||
// this value is correct in the next call of Decode.
|
||||
dec.scan.bytes--
|
||||
break Input
|
||||
case scanEndObject, scanEndArray:
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if stateEndValue(&dec.scan, ' ') == scanEnd {
|
||||
scanp++
|
||||
break Input
|
||||
}
|
||||
case scanError:
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := scanp - dec.scanp
|
||||
err = dec.refill()
|
||||
scanp = dec.scanp + n
|
||||
}
|
||||
return scanp - dec.scanp, nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill() error {
|
||||
// Make room to read more into the buffer.
|
||||
// First slide down data already consumed.
|
||||
if dec.scanp > 0 {
|
||||
dec.scanned += int64(dec.scanp)
|
||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.scanp = 0
|
||||
}
|
||||
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON values to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
escapeHTML bool
|
||||
|
||||
indentBuf *bytes.Buffer
|
||||
indentPrefix string
|
||||
indentValue string
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w, escapeHTML: true}
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
|
||||
e := newEncodeState()
|
||||
defer encodeStatePool.Put(e)
|
||||
|
||||
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
|
||||
b := e.Bytes()
|
||||
if enc.indentPrefix != "" || enc.indentValue != "" {
|
||||
if enc.indentBuf == nil {
|
||||
enc.indentBuf = new(bytes.Buffer)
|
||||
}
|
||||
enc.indentBuf.Reset()
|
||||
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b = enc.indentBuf.Bytes()
|
||||
}
|
||||
if _, err = enc.w.Write(b); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// SetIndent instructs the encoder to format each subsequent encoded
|
||||
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
|
||||
// Calling SetIndent("", "") disables indentation.
|
||||
func (enc *Encoder) SetIndent(prefix, indent string) {
|
||||
enc.indentPrefix = prefix
|
||||
enc.indentValue = indent
|
||||
}
|
||||
|
||||
// SetEscapeHTML specifies whether problematic HTML characters
|
||||
// should be escaped inside JSON quoted strings.
|
||||
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
|
||||
// to avoid certain safety problems that can arise when embedding JSON in HTML.
|
||||
//
|
||||
// In non-HTML settings where the escaping interferes with the readability
|
||||
// of the output, SetEscapeHTML(false) disables this behavior.
|
||||
func (enc *Encoder) SetEscapeHTML(on bool) {
|
||||
enc.escapeHTML = on
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON value.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns m as the JSON encoding of m.
|
||||
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
||||
if m == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
_ Marshaler = (*RawMessage)(nil)
|
||||
_ Unmarshaler = (*RawMessage)(nil)
|
||||
)
|
||||
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
type Token any
|
||||
|
||||
const (
|
||||
tokenTopValue = iota
|
||||
tokenArrayStart
|
||||
tokenArrayValue
|
||||
tokenArrayComma
|
||||
tokenObjectStart
|
||||
tokenObjectKey
|
||||
tokenObjectColon
|
||||
tokenObjectValue
|
||||
tokenObjectComma
|
||||
)
|
||||
|
||||
// advance tokenstate from a separator state to a value state
|
||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||
// Note: Not calling peek before switch, to avoid
|
||||
// putting peek into the standard Decode path.
|
||||
// peek is only called when using the Token API.
|
||||
switch dec.tokenState {
|
||||
case tokenArrayComma:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ',' {
|
||||
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
case tokenObjectColon:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ':' {
|
||||
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueAllowed() bool {
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueEnd() {
|
||||
switch dec.tokenState {
|
||||
case tokenArrayStart, tokenArrayValue:
|
||||
dec.tokenState = tokenArrayComma
|
||||
case tokenObjectValue:
|
||||
dec.tokenState = tokenObjectComma
|
||||
}
|
||||
}
|
||||
|
||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||
type Delim rune
|
||||
|
||||
func (d Delim) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
for {
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case '[':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenArrayStart
|
||||
return Delim('['), nil
|
||||
|
||||
case ']':
|
||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim(']'), nil
|
||||
|
||||
case '{':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenObjectStart
|
||||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
continue
|
||||
|
||||
case ',':
|
||||
if dec.tokenState == tokenArrayComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
continue
|
||||
}
|
||||
if dec.tokenState == tokenObjectComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectKey
|
||||
continue
|
||||
}
|
||||
return dec.tokenError(c)
|
||||
|
||||
case '"':
|
||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||
var x string
|
||||
old := dec.tokenState
|
||||
dec.tokenState = tokenTopValue
|
||||
err := dec.Decode(&x)
|
||||
dec.tokenState = old
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dec.tokenState = tokenObjectColon
|
||||
return x, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
var x any
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||
var context string
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayComma:
|
||||
context = " after array element"
|
||||
case tokenObjectKey:
|
||||
context = " looking for beginning of object key string"
|
||||
case tokenObjectColon:
|
||||
context = " after object key"
|
||||
case tokenObjectComma:
|
||||
context = " after object key:value pair"
|
||||
}
|
||||
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
|
||||
}
|
||||
|
||||
// More reports whether there is another element in the
|
||||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(c) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill()
|
||||
}
|
||||
}
|
||||
|
||||
// InputOffset returns the input stream byte offset of the current decoder position.
|
||||
// The offset gives the location of the end of the most recently returned token
|
||||
// and the beginning of the next token.
|
||||
func (dec *Decoder) InputOffset() int64 {
|
||||
return dec.scanned + int64(dec.scanp)
|
||||
}
|
|
@ -1,218 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// safeSet holds the value true if the ASCII character with the given array
|
||||
// position can be represented inside a JSON string without any further
|
||||
// escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), and the backslash character ("\").
|
||||
var safeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': true,
|
||||
'=': true,
|
||||
'>': true,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
||||
|
||||
// htmlSafeSet holds the value true if the ASCII character with the given
|
||||
// array position can be safely represented inside a JSON string, embedded
|
||||
// inside of HTML <script> tags, without any additional escaping.
|
||||
//
|
||||
// All values are true except for the ASCII control characters (0-31), the
|
||||
// double quote ("), the backslash character ("\"), HTML opening and closing
|
||||
// tags ("<" and ">"), and the ampersand ("&").
|
||||
var htmlSafeSet = [utf8.RuneSelf]bool{
|
||||
' ': true,
|
||||
'!': true,
|
||||
'"': false,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': false,
|
||||
'\'': true,
|
||||
'(': true,
|
||||
')': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
',': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'/': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
':': true,
|
||||
';': true,
|
||||
'<': false,
|
||||
'=': true,
|
||||
'>': false,
|
||||
'?': true,
|
||||
'@': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'V': true,
|
||||
'W': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'[': true,
|
||||
'\\': false,
|
||||
']': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'{': true,
|
||||
'|': true,
|
||||
'}': true,
|
||||
'~': true,
|
||||
'\u007f': true,
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
tag, opt, _ := strings.Cut(tag, ",")
|
||||
return tag, tagOptions(opt)
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var name string
|
||||
name, s, _ = strings.Cut(s, ",")
|
||||
if name == optionName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
2
go.mod
2
go.mod
|
@ -1,6 +1,6 @@
|
|||
module github.com/sagernet/sing
|
||||
|
||||
go 1.18
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue