go get -u ./...; go mod tidy

This commit is contained in:
Frank Denis 2025-01-06 17:30:45 +01:00
parent efe8b7824c
commit 699a6a1ebc
224 changed files with 12493 additions and 4330 deletions

View file

@ -0,0 +1,128 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"strconv"
"sync/atomic"
"unsafe"
"google.golang.org/protobuf/reflect/protoreflect"
)
func (Export) UnmarshalField(msg any, fieldNum int32) {
UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
}
// Present checks the presence set for a certain field number (zero
// based, ordered by appearance in original proto file). part is
// a pointer to the correct element in the bitmask array, num is the
// field number unaltered. Example (field number 70 -> part =
// &m.XXX_presence[1], num = 70)
func (Export) Present(part *uint32, num uint32) bool {
// This hook will read an unprotected shadow presence set if
// we're unning under the race detector
raceDetectHookPresent(part, num)
return atomic.LoadUint32(part)&(1<<(num%32)) > 0
}
// SetPresent adds a field to the presence set. part is a pointer to
// the relevant element in the array and num is the field number
// unaltered. size is the number of fields in the protocol
// buffer.
func (Export) SetPresent(part *uint32, num uint32, size uint32) {
// This hook will mutate an unprotected shadow presence set if
// we're running under the race detector
raceDetectHookSetPresent(part, num, presenceSize(size))
for {
old := atomic.LoadUint32(part)
if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
return
}
}
}
// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
// It is meant for use by builder methods, where the message is known not
// to be accessible yet by other goroutines.
func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
// This hook will mutate an unprotected shadow presence set if
// we're running under the race detector
raceDetectHookSetPresent(part, num, presenceSize(size))
*part |= 1 << (num % 32)
}
// ClearPresence removes a field from the presence set. part is a
// pointer to the relevant element in the presence array and num is
// the field number unaltered.
func (Export) ClearPresent(part *uint32, num uint32) {
// This hook will mutate an unprotected shadow presence set if
// we're running under the race detector
raceDetectHookClearPresent(part, num)
for {
old := atomic.LoadUint32(part)
if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
return
}
}
}
// interfaceToPointer takes a pointer to an empty interface whose value is a
// pointer type, and converts it into a "pointer" that points to the same
// target
func interfaceToPointer(i *any) pointer {
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
func (p pointer) atomicGetPointer() pointer {
return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
}
func (p pointer) atomicSetPointer(q pointer) {
atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
}
// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
// pointer) and returns true if the pointed-to pointer is nil (using an
// atomic load). This function is inlineable and, on x86, just becomes a
// simple load and compare.
func (Export) AtomicCheckPointerIsNil(ptr any) bool {
return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
}
// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
// second is a pointer) and atomically sets the second pointer into location
// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline
// (even on x86), so this does not become a simple store on x86.
func (Export) AtomicSetPointer(dstPtr, valPtr any) {
interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
}
// AtomicLoadPointer loads the pointer at the location pointed at by src,
// and stores that pointer value into the location pointed at by dst.
func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
}
// AtomicInitializePointer makes ptr and dst point to the same value.
//
// If *ptr is a nil pointer, it sets *ptr = *dst.
//
// If *ptr is a non-nil pointer, it sets *dst = *ptr.
func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
}
}
// MessageFieldStringOf returns the field formatted as a string,
// either as the field name if resolvable otherwise as a decimal string.
func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
fd := md.Fields().ByNumber(n)
if fd != nil {
return string(fd.Name())
}
return strconv.Itoa(int(n))
}

View file

@ -0,0 +1,34 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !race
package impl
// There is no additional data as we're not running under race detector.
type RaceDetectHookData struct{}
// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
func (presence) raceDetectHookPresent(num uint32) {}
func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
func (presence) raceDetectHookClearPresent(num uint32) {}
func (presence) raceDetectHookAllocAndCopy(src presence) {}
// raceDetectHookPresent is called by the generated file interface
// (*proto.internalFuncs) Present to optionally read an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookPresent(field *uint32, num uint32) {}
// raceDetectHookSetPresent is called by the generated file interface
// (*proto.internalFuncs) SetPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
// raceDetectHookClearPresent is called by the generated file interface
// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookClearPresent(field *uint32, num uint32) {}

View file

@ -0,0 +1,126 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build race
package impl
// When running under race detector, we add a presence map of bytes, that we can access
// in the hook functions so that we trigger the race detection whenever we have concurrent
// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
type RaceDetectHookData struct {
shadowPresence *[]byte
}
// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
// using non-atomic operations.
func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
sp := make([]byte, size)
atomicStoreShadowPresence(&data.shadowPresence, &sp)
}
func (p presence) raceDetectHookPresent(num uint32) {
data := p.toRaceDetectData()
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
_ = (*sp)[num]
}
}
func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
data := p.toRaceDetectData()
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp == nil {
data.raceDetectHookAlloc(size)
sp = atomicLoadShadowPresence(&data.shadowPresence)
}
(*sp)[num] = 1
}
func (p presence) raceDetectHookClearPresent(num uint32) {
data := p.toRaceDetectData()
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
(*sp)[num] = 0
}
}
// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
// shadowPresence bytes from src to lazy.
func (p presence) raceDetectHookAllocAndCopy(q presence) {
sData := q.toRaceDetectData()
dData := p.toRaceDetectData()
if sData == nil {
return
}
srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
if srcSp == nil {
atomicStoreShadowPresence(&dData.shadowPresence, nil)
return
}
n := len(*srcSp)
dSlice := make([]byte, n)
atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
for i := 0; i < n; i++ {
dSlice[i] = (*srcSp)[i]
}
}
// raceDetectHookPresent is called by the generated file interface
// (*proto.internalFuncs) Present to optionally read an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookPresent(field *uint32, num uint32) {
data := findPointerToRaceDetectData(field, num)
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
_ = (*sp)[num]
}
}
// raceDetectHookSetPresent is called by the generated file interface
// (*proto.internalFuncs) SetPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
data := findPointerToRaceDetectData(field, num)
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp == nil {
data.raceDetectHookAlloc(size)
sp = atomicLoadShadowPresence(&data.shadowPresence)
}
(*sp)[num] = 1
}
// raceDetectHookClearPresent is called by the generated file interface
// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
// shadow bitmap when race detection is enabled. In regular code it is
// a noop.
func raceDetectHookClearPresent(field *uint32, num uint32) {
data := findPointerToRaceDetectData(field, num)
if data == nil {
return
}
sp := atomicLoadShadowPresence(&data.shadowPresence)
if sp != nil {
(*sp)[num] = 0
}
}

View file

@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
}
return nil
}
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
}
if mi.extensionOffset.IsValid() {
e := p.Apply(mi.extensionOffset).Extensions()
if err := mi.isInitExtensions(e); err != nil {
@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
if !f.isRequired && f.funcs.isInit == nil {
continue
}
if f.presenceIndex != noPresence {
if !presence.Present(f.presenceIndex) {
if f.isRequired {
return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
}
continue
}
if f.funcs.isInit != nil {
f.mi.init()
if f.mi.needsInitCheck {
if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
if !lazy.AllowedPartial() {
// Nothing to see here, it was checked on unmarshal
continue
}
mi.lazyUnmarshal(p, f.num)
}
if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
return err
}
}
}
continue
}
fptr := p.Apply(f.offset)
if f.isPointer && fptr.Elem().IsNil() {
if f.isRequired {

View file

@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
fn func() protoreflect.Value
}
type ExtensionField struct {
@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
f.lazy.value = f.lazy.fn()
panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
// This must not be called concurrently.
func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
f.typ = t
f.lazy = &lazyExtensionValue{fn: fn}
}
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {

View file

@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
if cf.funcs.isInit == nil {
out.initialized = true
}
vi.Set(vw)
return out, nil
}

View file

@ -0,0 +1,264 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/reflect/protoreflect"
)
func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
mi := getMessageInfo(ft)
if mi == nil {
panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
}
switch fd.Kind() {
case protoreflect.MessageKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueMessage,
marshal: appendOpaqueMessage,
unmarshal: consumeOpaqueMessage,
isInit: isInitOpaqueMessage,
merge: mergeOpaqueMessage,
}
case protoreflect.GroupKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueGroup,
marshal: appendOpaqueGroup,
unmarshal: consumeOpaqueGroup,
isInit: isInitOpaqueMessage,
merge: mergeOpaqueMessage,
}
}
panic("unexpected field kind")
}
func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
}
func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
mp := p.AtomicGetPointer()
calculatedSize := f.mi.sizePointer(mp, opts)
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(calculatedSize))
before := len(b)
b, err := f.mi.marshalAppendPointer(b, mp, opts)
if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
}
return b, err
}
func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
mp := p.AtomicGetPointer()
if mp.IsNil() {
mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
}
o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
if err != nil {
return out, err
}
out.n = n
out.initialized = o.initialized
return out, nil
}
func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
mp := p.AtomicGetPointer()
if mp.IsNil() {
return nil
}
return f.mi.checkInitializedPointer(mp)
}
func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
dstmp := dst.AtomicGetPointer()
if dstmp.IsNil() {
dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
}
f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
}
func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
}
func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
b = protowire.AppendVarint(b, f.wiretag) // start group
b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
b = protowire.AppendVarint(b, f.wiretag+1) // end group
return b, err
}
func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.StartGroupType {
return out, errUnknown
}
mp := p.AtomicGetPointer()
if mp.IsNil() {
mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
}
o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
return o, e
}
func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
}
mt := ft.Elem().Elem() // *[]*T -> *T
mi := getMessageInfo(mt)
if mi == nil {
panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
}
switch fd.Kind() {
case protoreflect.MessageKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueMessageSlice,
marshal: appendOpaqueMessageSlice,
unmarshal: consumeOpaqueMessageSlice,
isInit: isInitOpaqueMessageSlice,
merge: mergeOpaqueMessageSlice,
}
case protoreflect.GroupKind:
return mi, pointerCoderFuncs{
size: sizeOpaqueGroupSlice,
marshal: appendOpaqueGroupSlice,
unmarshal: consumeOpaqueGroupSlice,
isInit: isInitOpaqueMessageSlice,
merge: mergeOpaqueMessageSlice,
}
}
panic("unexpected field kind")
}
func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.AtomicGetPointer().PointerSlice()
n := 0
for _, v := range s {
n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
}
return n
}
func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.AtomicGetPointer().PointerSlice()
var err error
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag)
siz := f.mi.sizePointer(v, opts)
b = protowire.AppendVarint(b, uint64(siz))
before := len(b)
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
if measuredSize := len(b) - before; siz != measuredSize {
return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
}
}
return b, nil
}
func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.BytesType {
return out, errUnknown
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
if err != nil {
return out, err
}
sp := p.AtomicGetPointer()
if sp.IsNil() {
sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
}
sp.AppendPointerSlice(mp)
out.n = n
out.initialized = o.initialized
return out, nil
}
func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
sp := p.AtomicGetPointer()
if sp.IsNil() {
return nil
}
s := sp.PointerSlice()
for _, v := range s {
if err := f.mi.checkInitializedPointer(v); err != nil {
return err
}
}
return nil
}
func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
ds := dst.AtomicGetPointer()
if ds.IsNil() {
ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
}
for _, sp := range src.AtomicGetPointer().PointerSlice() {
dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
f.mi.mergePointer(dm, sp, opts)
ds.AppendPointerSlice(dm)
}
}
func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.AtomicGetPointer().PointerSlice()
n := 0
for _, v := range s {
n += 2*f.tagsize + f.mi.sizePointer(v, opts)
}
return n
}
func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.AtomicGetPointer().PointerSlice()
var err error
for _, v := range s {
b = protowire.AppendVarint(b, f.wiretag) // start group
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
b = protowire.AppendVarint(b, f.wiretag+1) // end group
}
return b, nil
}
func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.StartGroupType {
return out, errUnknown
}
mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
if err != nil {
return out, err
}
sp := p.AtomicGetPointer()
if sp.IsNil() {
sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
}
sp.AppendPointerSlice(mp)
return out, err
}

View file

@ -32,6 +32,10 @@ type coderMessageInfo struct {
needsInitCheck bool
isMessageSet bool
numRequiredFields uint8
lazyOffset offset
presenceOffset offset
presenceSize presenceSize
}
type coderFieldInfo struct {
@ -45,12 +49,19 @@ type coderFieldInfo struct {
tagsize int // size of the varint-encoded tag
isPointer bool // true if IsNil may be called on the struct field
isRequired bool // true if field is required
isLazy bool
presenceIndex uint32
}
const noPresence = 0xffffffff
func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
mi.sizecacheOffset = invalidOffset
mi.unknownOffset = invalidOffset
mi.extensionOffset = invalidOffset
mi.lazyOffset = invalidOffset
mi.presenceOffset = si.presenceOffset
if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
mi.sizecacheOffset = si.sizecacheOffset
@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
validation: newFieldValidationInfo(mi, si, fd, ft),
isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
isRequired: fd.Cardinality() == protoreflect.Required,
presenceIndex: noPresence,
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
@ -189,6 +202,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
if mi.methods.Equal == nil {
mi.methods.Equal = equal
}
}
// getUnknownBytes returns a *[]byte for the unknown fields.

View file

@ -0,0 +1,156 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"reflect"
"sort"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/reflect/protoreflect"
piface "google.golang.org/protobuf/runtime/protoiface"
)
func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
mi.sizecacheOffset = si.sizecacheOffset
mi.unknownOffset = si.unknownOffset
mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
mi.extensionOffset = si.extensionOffset
mi.lazyOffset = si.lazyOffset
mi.presenceOffset = si.presenceOffset
mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
fields := mi.Desc.Fields()
for i := 0; i < fields.Len(); i++ {
fd := fields.Get(i)
fs := si.fieldsByNumber[fd.Number()]
if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
fs = si.oneofsByName[fd.ContainingOneof().Name()]
}
ft := fs.Type
var wiretag uint64
if !fd.IsPacked() {
wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
} else {
wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
}
var fieldOffset offset
var funcs pointerCoderFuncs
var childMessage *MessageInfo
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
fieldOffset = offsetOf(fs, mi.Exporter)
case fd.IsWeak():
fieldOffset = si.weakOffset
funcs = makeWeakMessageFieldCoder(fd)
case fd.Message() != nil && !fd.IsMap():
fieldOffset = offsetOf(fs, mi.Exporter)
if fd.IsList() {
childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
} else {
childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
}
default:
fieldOffset = offsetOf(fs, mi.Exporter)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &coderFieldInfo{
num: fd.Number(),
offset: fieldOffset,
wiretag: wiretag,
ft: ft,
tagsize: protowire.SizeVarint(wiretag),
funcs: funcs,
mi: childMessage,
validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
isPointer: (fd.Cardinality() == protoreflect.Repeated ||
fd.Kind() == protoreflect.MessageKind ||
fd.Kind() == protoreflect.GroupKind),
isRequired: fd.Cardinality() == protoreflect.Required,
presenceIndex: noPresence,
}
// TODO: Use presence for all fields.
//
// In some cases, such as maps, presence means only "might be set" rather
// than "is definitely set", but every field should have a presence bit to
// permit us to skip over definitely-unset fields at marshal time.
var hasPresence bool
hasPresence, cf.isLazy = usePresenceForField(si, fd)
if hasPresence {
cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
}
for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
if od := oneofs.Get(i); !od.IsSynthetic() {
mi.initOneofFieldCoders(od, si.structInfo)
}
}
if messageset.IsMessageSet(mi.Desc) {
if !mi.extensionOffset.IsValid() {
panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
}
if !mi.unknownOffset.IsValid() {
panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
}
mi.isMessageSet = true
}
sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
})
var maxDense protoreflect.FieldNumber
for _, cf := range mi.orderedCoderFields {
if cf.num >= 16 && cf.num >= 2*maxDense {
break
}
maxDense = cf.num
}
mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
for _, cf := range mi.orderedCoderFields {
if int(cf.num) > len(mi.denseCoderFields) {
break
}
mi.denseCoderFields[cf.num] = cf
}
// To preserve compatibility with historic wire output, marshal oneofs last.
if mi.Desc.Oneofs().Len() > 0 {
sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
fi := fields.ByNumber(mi.orderedCoderFields[i].num)
fj := fields.ByNumber(mi.orderedCoderFields[j].num)
return order.LegacyFieldOrder(fi, fj)
})
}
mi.needsInitCheck = needsInitCheck(mi.Desc)
if mi.methods.Marshal == nil && mi.methods.Size == nil {
mi.methods.Flags |= piface.SupportMarshalDeterministic
mi.methods.Marshal = mi.marshal
mi.methods.Size = mi.size
}
if mi.methods.Unmarshal == nil {
mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
mi.methods.Unmarshal = mi.unmarshal
}
if mi.methods.CheckInitialized == nil {
mi.methods.CheckInitialized = mi.checkInitialized
}
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
if mi.methods.Equal == nil {
mi.methods.Equal = equal
}
}

View file

@ -1,210 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego || appengine
// +build purego appengine
package impl
import (
"reflect"
"google.golang.org/protobuf/encoding/protowire"
)
func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
v := p.v.Elem().Int()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := p.v.Elem().Int()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
return b, nil
}
func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
p.v.Elem().SetInt(int64(v))
out.n = n
return out, nil
}
func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
dst.v.Elem().Set(src.v.Elem())
}
var coderEnum = pointerCoderFuncs{
size: sizeEnum,
marshal: appendEnum,
unmarshal: consumeEnum,
merge: mergeEnum,
}
func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
if p.v.Elem().Int() == 0 {
return 0
}
return sizeEnum(p, f, opts)
}
func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
if p.v.Elem().Int() == 0 {
return b, nil
}
return appendEnum(b, p, f, opts)
}
func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
if src.v.Elem().Int() != 0 {
dst.v.Elem().Set(src.v.Elem())
}
}
var coderEnumNoZero = pointerCoderFuncs{
size: sizeEnumNoZero,
marshal: appendEnumNoZero,
unmarshal: consumeEnum,
merge: mergeEnumNoZero,
}
func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return sizeEnum(pointer{p.v.Elem()}, f, opts)
}
func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
return appendEnum(b, pointer{p.v.Elem()}, f, opts)
}
func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
if p.v.Elem().IsNil() {
p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
}
return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
}
func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
if !src.v.Elem().IsNil() {
v := reflect.New(dst.v.Type().Elem().Elem())
v.Elem().Set(src.v.Elem().Elem())
dst.v.Elem().Set(v)
}
}
var coderEnumPtr = pointerCoderFuncs{
size: sizeEnumPtr,
marshal: appendEnumPtr,
unmarshal: consumeEnumPtr,
merge: mergeEnumPtr,
}
func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.v.Elem()
for i, llen := 0, s.Len(); i < llen; i++ {
size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
}
return size
}
func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.v.Elem()
for i, llen := 0, s.Len(); i < llen; i++ {
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
}
return b, nil
}
func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
s := p.v.Elem()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
s.Set(reflect.Append(s, rv))
b = b[n:]
}
out.n = n
return out, nil
}
if wtyp != protowire.VarintType {
return out, errUnknown
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
s.Set(reflect.Append(s, rv))
out.n = n
return out, nil
}
func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
}
var coderEnumSlice = pointerCoderFuncs{
size: sizeEnumSlice,
marshal: appendEnumSlice,
unmarshal: consumeEnumSlice,
merge: mergeEnumSlice,
}
func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.v.Elem()
llen := s.Len()
if llen == 0 {
return 0
}
n := 0
for i := 0; i < llen; i++ {
n += protowire.SizeVarint(uint64(s.Index(i).Int()))
}
return f.tagsize + protowire.SizeBytes(n)
}
func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.v.Elem()
llen := s.Len()
if llen == 0 {
return b, nil
}
b = protowire.AppendVarint(b, f.wiretag)
n := 0
for i := 0; i < llen; i++ {
n += protowire.SizeVarint(uint64(s.Index(i).Int()))
}
b = protowire.AppendVarint(b, uint64(n))
for i := 0; i < llen; i++ {
b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
}
return b, nil
}
var coderEnumPackedSlice = pointerCoderFuncs{
size: sizeEnumPackedSlice,
marshal: appendEnumPackedSlice,
unmarshal: consumeEnumSlice,
merge: mergeEnumSlice,
}

View file

@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego && !appengine
// +build !purego,!appengine
package impl
// When using unsafe pointers, we can just treat enum values as int32s.

View file

@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
// pref.Value.String never panics, so we go through an interface
// protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {

View file

@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
AllowPartial: true,
DiscardUnknown: o.DiscardUnknown(),
Resolver: o.resolver,
NoLazyDecoding: o.NoLazyDecoding(),
}
}
@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
return o.flags&protoiface.UnmarshalDiscardUnknown != 0
}
func (o unmarshalOptions) IsDefault() bool {
return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 }
func (o unmarshalOptions) NoLazyDecoding() bool {
return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
}
func (o unmarshalOptions) CanBeLazy() bool {
if o.resolver != protoregistry.GlobalTypes {
return false
}
// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
}
var lazyUnmarshalOptions = unmarshalOptions{
resolver: protoregistry.GlobalTypes,
depth: protowire.DefaultRecursionLimit,
flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
depth: protowire.DefaultRecursionLimit,
}
type unmarshalOutput struct {
@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
if flags.ProtoLegacy && mi.isMessageSet {
return unmarshalMessageSet(mi, b, p, opts)
}
lazyDecoding := LazyEnabled() // default
if opts.NoLazyDecoding() {
lazyDecoding = false // explicitly disabled
}
if mi.lazyOffset.IsValid() && lazyDecoding {
return mi.unmarshalPointerLazy(b, p, groupTag, opts)
}
return mi.unmarshalPointerEager(b, p, groupTag, opts)
}
// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
// The corresponding function for Lazy is in google_lazy.go.
func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
initialized := true
var requiredMask uint64
var exts *map[int32]ExtensionField
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
}
start := len(b)
for len(b) > 0 {
// Parse the tag (field number and wire type).
@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
if f.funcs.isInit != nil && !o.initialized {
initialized = false
}
if f.presenceIndex != noPresence {
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
}
default:
// Possible extension.
if exts == nil && mi.extensionOffset.IsValid() {
@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
return out, errUnknown
}
if flags.LazyUnmarshalExtensions {
if opts.IsDefault() && x.canLazy(xt) {
if opts.CanBeLazy() && x.canLazy(xt) {
out, valid := skipExtension(b, xi, num, wtyp, opts)
switch valid {
case ValidationValid:
@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
if n < 0 {
return out, ValidationUnknown
}
if opts.Validated() {
out.initialized = true
out.n = n
return out, ValidationValid
}
out, st := xi.validation.mi.validate(v, 0, opts)
out.n = n
return out, st

View file

@ -10,7 +10,8 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
proto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/internal/protolazy"
"google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
e := p.Apply(mi.extensionOffset).Extensions()
size += mi.sizeExtensions(e, opts)
}
var lazy **protolazy.XXX_lazyUnmarshalInfo
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
if mi.lazyOffset.IsValid() {
lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
}
}
for _, f := range mi.orderedCoderFields {
if f.funcs.size == nil {
continue
}
fptr := p.Apply(f.offset)
if f.presenceIndex != noPresence {
if !presence.Present(f.presenceIndex) {
continue
}
if f.isLazy && fptr.AtomicGetPointer().IsNil() {
if lazyFields(opts) {
size += (*lazy).SizeField(uint32(f.num))
continue
} else {
mi.lazyUnmarshal(p, f.num)
}
}
size += f.funcs.size(fptr, f, opts)
continue
}
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
return b, err
}
}
var lazy **protolazy.XXX_lazyUnmarshalInfo
var presence presence
if mi.presenceOffset.IsValid() {
presence = p.Apply(mi.presenceOffset).PresenceInfo()
if mi.lazyOffset.IsValid() {
lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
}
}
for _, f := range mi.orderedCoderFields {
if f.funcs.marshal == nil {
continue
}
fptr := p.Apply(f.offset)
if f.presenceIndex != noPresence {
if !presence.Present(f.presenceIndex) {
continue
}
if f.isLazy {
// Be careful, this field needs to be read atomically, like for a get
if f.isPointer && fptr.AtomicGetPointer().IsNil() {
if lazyFields(opts) {
b, _ = (*lazy).AppendField(b, uint32(f.num))
continue
} else {
mi.lazyUnmarshal(p, f.num)
}
}
b, err = f.funcs.marshal(b, fptr, f, opts)
if err != nil {
return b, err
}
continue
} else if f.isPointer && fptr.Elem().IsNil() {
continue
}
b, err = f.funcs.marshal(b, fptr, f, opts)
if err != nil {
return b, err
}
continue
}
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool {
return opts.flags&piface.MarshalDeterministic == 0
}
// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
func lazyFields(opts marshalOptions) bool {
// When deterministic marshaling is requested, force an unmarshal for lazy
// fields to produce a deterministic result, instead of passing through
// bytes lazily that may or may not match what Go Protobuf would produce.
return opts.flags&piface.MarshalDeterministic == 0
}
func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
if ext == nil {
return 0

View file

@ -0,0 +1,224 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"bytes"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
)
func equal(in protoiface.EqualInput) protoiface.EqualOutput {
return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
}
// equalMessage is a fast-path variant of protoreflect.equalMessage.
// It takes advantage of the internal messageState type to avoid
// unnecessary allocations, type assertions.
func equalMessage(mx, my protoreflect.Message) bool {
if mx == nil || my == nil {
return mx == my
}
if mx.Descriptor() != my.Descriptor() {
return false
}
msx, ok := mx.(*messageState)
if !ok {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
msy, ok := my.(*messageState)
if !ok {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
mi := msx.messageInfo()
miy := msy.messageInfo()
if mi != miy {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
mi.init()
// Compares regular fields
// Modified Message.Range code that compares two messages of the same type
// while going over the fields.
for _, ri := range mi.rangeInfos {
var fd protoreflect.FieldDescriptor
var vx, vy protoreflect.Value
switch ri := ri.(type) {
case *fieldInfo:
hx := ri.has(msx.pointer())
hy := ri.has(msy.pointer())
if hx != hy {
return false
}
if !hx {
continue
}
fd = ri.fieldDesc
vx = ri.get(msx.pointer())
vy = ri.get(msy.pointer())
case *oneofInfo:
fnx := ri.which(msx.pointer())
fny := ri.which(msy.pointer())
if fnx != fny {
return false
}
if fnx <= 0 {
continue
}
fi := mi.fields[fnx]
fd = fi.fieldDesc
vx = fi.get(msx.pointer())
vy = fi.get(msy.pointer())
}
if !equalValue(fd, vx, vy) {
return false
}
}
// Compare extensions.
// This is more complicated because mx or my could have empty/nil extension maps,
// however some populated extension map values are equal to nil extension maps.
emx := mi.extensionMap(msx.pointer())
emy := mi.extensionMap(msy.pointer())
if emx != nil {
for k, x := range *emx {
xd := x.Type().TypeDescriptor()
xv := x.Value()
var y ExtensionField
ok := false
if emy != nil {
y, ok = (*emy)[k]
}
// We need to treat empty lists as equal to nil values
if emy == nil || !ok {
if xd.IsList() && xv.List().Len() == 0 {
continue
}
return false
}
if !equalValue(xd, xv, y.Value()) {
return false
}
}
}
if emy != nil {
// emy may have extensions emx does not have, need to check them as well
for k, y := range *emy {
if emx != nil {
// emx has the field, so we already checked it
if _, ok := (*emx)[k]; ok {
continue
}
}
// Empty lists are equal to nil
if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
continue
}
// Cant be equal if the extension is populated
return false
}
}
return equalUnknown(mx.GetUnknown(), my.GetUnknown())
}
func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
// slow path
if fd.Kind() != protoreflect.MessageKind {
return vx.Equal(vy)
}
// fast path special cases
if fd.IsMap() {
if fd.MapValue().Kind() == protoreflect.MessageKind {
return equalMessageMap(vx.Map(), vy.Map())
}
return vx.Equal(vy)
}
if fd.IsList() {
return equalMessageList(vx.List(), vy.List())
}
return equalMessage(vx.Message(), vy.Message())
}
// Mostly copied from protoreflect.equalMap.
// This variant only works for messages as map types.
// All other map types should be handled via Value.Equal.
func equalMessageMap(mx, my protoreflect.Map) bool {
if mx.Len() != my.Len() {
return false
}
equal := true
mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
if !my.Has(k) {
equal = false
return false
}
vy := my.Get(k)
equal = equalMessage(vx.Message(), vy.Message())
return equal
})
return equal
}
// Mostly copied from protoreflect.equalList.
// The only change is the usage of equalImpl instead of protoreflect.equalValue.
func equalMessageList(lx, ly protoreflect.List) bool {
if lx.Len() != ly.Len() {
return false
}
for i := 0; i < lx.Len(); i++ {
// We only operate on messages here since equalImpl will not call us in any other case.
if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
return false
}
}
return true
}
// equalUnknown compares unknown fields by direct comparison on the raw bytes
// of each individual field number.
// Copied from protoreflect.equalUnknown.
func equalUnknown(x, y protoreflect.RawFields) bool {
if len(x) != len(y) {
return false
}
if bytes.Equal([]byte(x), []byte(y)) {
return true
}
mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
for len(x) > 0 {
fnum, _, n := protowire.ConsumeField(x)
mx[fnum] = append(mx[fnum], x[:n]...)
x = x[n:]
}
for len(y) > 0 {
fnum, _, n := protowire.ConsumeField(y)
my[fnum] = append(my[fnum], y[:n]...)
y = y[n:]
}
if len(mx) != len(my) {
return false
}
for k, v1 := range mx {
if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
return false
}
}
return true
}

433
vendor/google.golang.org/protobuf/internal/impl/lazy.go generated vendored Normal file
View file

@ -0,0 +1,433 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"math/bits"
"os"
"reflect"
"sort"
"sync/atomic"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/protolazy"
"google.golang.org/protobuf/reflect/protoreflect"
preg "google.golang.org/protobuf/reflect/protoregistry"
piface "google.golang.org/protobuf/runtime/protoiface"
)
var enableLazy int32 = func() int32 {
if os.Getenv("GOPROTODEBUG") == "nolazy" {
return 0
}
return 1
}()
// EnableLazyUnmarshal enables lazy unmarshaling.
func EnableLazyUnmarshal(enable bool) {
if enable {
atomic.StoreInt32(&enableLazy, 1)
return
}
atomic.StoreInt32(&enableLazy, 0)
}
// LazyEnabled reports whether lazy unmarshalling is currently enabled.
func LazyEnabled() bool {
return atomic.LoadInt32(&enableLazy) != 0
}
// UnmarshalField unmarshals a field in a message.
func UnmarshalField(m interface{}, num protowire.Number) {
switch m := m.(type) {
case *messageState:
m.messageInfo().lazyUnmarshal(m.pointer(), num)
case *messageReflectWrapper:
m.messageInfo().lazyUnmarshal(m.pointer(), num)
default:
panic(fmt.Sprintf("unsupported wrapper type %T", m))
}
}
func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
var f *coderFieldInfo
if int(num) < len(mi.denseCoderFields) {
f = mi.denseCoderFields[num]
} else {
f = mi.coderFields[num]
}
if f == nil {
panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
}
lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
if !found && multipleEntries == nil {
panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
}
// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
fp := pointerOfValue(reflect.New(f.ft))
if multipleEntries != nil {
for _, entry := range multipleEntries {
mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
}
} else {
mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
}
p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
}
func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
opts := lazyUnmarshalOptions
opts.flags |= flags
for len(b) > 0 {
// Parse the tag (field number and wire type).
var tag uint64
if b[0] < 0x80 {
tag = uint64(b[0])
b = b[1:]
} else if len(b) >= 2 && b[1] < 128 {
tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
b = b[2:]
} else {
var n int
tag, n = protowire.ConsumeVarint(b)
if n < 0 {
return errors.New("invalid wire data")
}
b = b[n:]
}
var num protowire.Number
if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
return errors.New("invalid wire data")
} else {
num = protowire.Number(n)
}
wtyp := protowire.Type(tag & 7)
if num == f.num {
o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
if err == nil {
b = b[o.n:]
continue
}
if err != errUnknown {
return err
}
}
n := protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return errors.New("invalid wire data")
}
b = b[n:]
}
return nil
}
func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
fmi := f.validation.mi
if fmi == nil {
fd := mi.Desc.Fields().ByNumber(f.num)
if fd == nil || !fd.IsWeak() {
return out, ValidationUnknown
}
messageName := fd.Message().FullName()
messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
if err != nil {
return out, ValidationUnknown
}
var ok bool
fmi, ok = messageType.(*MessageInfo)
if !ok {
return out, ValidationUnknown
}
}
fmi.init()
switch f.validation.typ {
case validationTypeMessage:
if wtyp != protowire.BytesType {
return out, ValidationWrongWireType
}
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, ValidationInvalid
}
out, st := fmi.validate(v, 0, opts)
out.n = n
return out, st
case validationTypeGroup:
if wtyp != protowire.StartGroupType {
return out, ValidationWrongWireType
}
out, st := fmi.validate(b, f.num, opts)
return out, st
default:
return out, ValidationUnknown
}
}
// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
// specifically handles lazy unmarshalling. it expects lazyOffset and
// presenceOffset to both be valid.
func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
initialized := true
var requiredMask uint64
var lazy **protolazy.XXX_lazyUnmarshalInfo
var presence presence
var lazyIndex []protolazy.IndexEntry
var lastNum protowire.Number
outOfOrder := false
lazyDecode := false
presence = p.Apply(mi.presenceOffset).PresenceInfo()
lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
if !presence.AnyPresent(mi.presenceSize) {
if opts.CanBeLazy() {
// If the message contains existing data, we need to merge into it.
// Lazy unmarshaling doesn't merge, so only enable it when the
// message is empty (has no presence bitmap).
lazyDecode = true
if *lazy == nil {
*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
}
(*lazy).SetUnmarshalFlags(opts.flags)
if !opts.AliasBuffer() {
// Make a copy of the buffer for lazy unmarshaling.
// Set the AliasBuffer flag so recursive unmarshal
// operations reuse the copy.
b = append([]byte{}, b...)
opts.flags |= piface.UnmarshalAliasBuffer
}
(*lazy).SetBuffer(b)
}
}
// Track special handling of lazy fields.
//
// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
// In the event that validation for a field fails, this map tracks handling of the field.
type lazyAction uint8
const (
lazyValidateOnly lazyAction = iota // validate the field only
lazyUnmarshalNow // eagerly unmarshal the field
lazyUnmarshalLater // unmarshal the field after the message is fully processed
)
var lazyFields map[*coderFieldInfo]lazyAction
var exts *map[int32]ExtensionField
start := len(b)
pos := 0
for len(b) > 0 {
// Parse the tag (field number and wire type).
var tag uint64
if b[0] < 0x80 {
tag = uint64(b[0])
b = b[1:]
} else if len(b) >= 2 && b[1] < 128 {
tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
b = b[2:]
} else {
var n int
tag, n = protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
b = b[n:]
}
var num protowire.Number
if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
return out, errors.New("invalid field number")
} else {
num = protowire.Number(n)
}
wtyp := protowire.Type(tag & 7)
if wtyp == protowire.EndGroupType {
if num != groupTag {
return out, errors.New("mismatching end group marker")
}
groupTag = 0
break
}
var f *coderFieldInfo
if int(num) < len(mi.denseCoderFields) {
f = mi.denseCoderFields[num]
} else {
f = mi.coderFields[num]
}
var n int
err := errUnknown
discardUnknown := false
Field:
switch {
case f != nil:
if f.funcs.unmarshal == nil {
break
}
if f.isLazy && lazyDecode {
switch {
case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
// Attempt to validate this field and leave it for later lazy unmarshaling.
o, valid := mi.skipField(b, f, wtyp, opts)
switch valid {
case ValidationValid:
// Skip over the valid field and continue.
err = nil
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
requiredMask |= f.validation.requiredBit
if !o.initialized {
initialized = false
}
n = o.n
break Field
case ValidationInvalid:
return out, errors.New("invalid proto wire format")
case ValidationWrongWireType:
break Field
case ValidationUnknown:
if lazyFields == nil {
lazyFields = make(map[*coderFieldInfo]lazyAction)
}
if presence.Present(f.presenceIndex) {
// We were unable to determine if the field is valid or not,
// and we've already skipped over at least one instance of this
// field. Clear the presence bit (so if we stop decoding early,
// we don't leave a partially-initialized field around) and flag
// the field for unmarshaling before we return.
presence.ClearPresent(f.presenceIndex)
lazyFields[f] = lazyUnmarshalLater
discardUnknown = true
break Field
} else {
// We were unable to determine if the field is valid or not,
// but this is the first time we've seen it. Flag it as needing
// eager unmarshaling and fall through to the eager unmarshal case below.
lazyFields[f] = lazyUnmarshalNow
}
}
case lazyFields[f] == lazyUnmarshalLater:
// This field will be unmarshaled in a separate pass below.
// Skip over it here.
discardUnknown = true
break Field
default:
// Eagerly unmarshal the field.
}
}
if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
if p.Apply(f.offset).AtomicGetPointer().IsNil() {
mi.lazyUnmarshal(p, f.num)
}
}
var o unmarshalOutput
o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
n = o.n
if err != nil {
break
}
requiredMask |= f.validation.requiredBit
if f.funcs.isInit != nil && !o.initialized {
initialized = false
}
if f.presenceIndex != noPresence {
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
}
default:
// Possible extension.
if exts == nil && mi.extensionOffset.IsValid() {
exts = p.Apply(mi.extensionOffset).Extensions()
if *exts == nil {
*exts = make(map[int32]ExtensionField)
}
}
if exts == nil {
break
}
var o unmarshalOutput
o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
if err != nil {
break
}
n = o.n
if !o.initialized {
initialized = false
}
}
if err != nil {
if err != errUnknown {
return out, err
}
n = protowire.ConsumeFieldValue(num, wtyp, b)
if n < 0 {
return out, errDecode
}
if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
u := mi.mutableUnknownBytes(p)
*u = protowire.AppendTag(*u, num, wtyp)
*u = append(*u, b[:n]...)
}
}
b = b[n:]
end := start - len(b)
if lazyDecode && f != nil && f.isLazy {
if num != lastNum {
lazyIndex = append(lazyIndex, protolazy.IndexEntry{
FieldNum: uint32(num),
Start: uint32(pos),
End: uint32(end),
})
} else {
i := len(lazyIndex) - 1
lazyIndex[i].End = uint32(end)
lazyIndex[i].MultipleContiguous = true
}
}
if num < lastNum {
outOfOrder = true
}
pos = end
lastNum = num
}
if groupTag != 0 {
return out, errors.New("missing end group marker")
}
if lazyFields != nil {
// Some fields failed validation, and now need to be unmarshaled.
for f, action := range lazyFields {
if action != lazyUnmarshalLater {
continue
}
initialized = false
if *lazy == nil {
*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
}
if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
return out, err
}
presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
}
}
if lazyDecode {
if outOfOrder {
sort.Slice(lazyIndex, func(i, j int) bool {
return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
lazyIndex[i].Start < lazyIndex[j].Start)
})
}
if *lazy == nil {
*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
}
(*lazy).SetIndex(lazyIndex)
}
if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
initialized = false
}
if initialized {
out.initialized = true
}
out.n = start - len(b)
return out, nil
}

View file

@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }

View file

@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
if src.IsNil() {
return
}
var presenceSrc presence
var presenceDst presence
if mi.presenceOffset.IsValid() {
presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
}
for _, f := range mi.orderedCoderFields {
if f.funcs.merge == nil {
continue
}
sfptr := src.Apply(f.offset)
if f.presenceIndex != noPresence {
if !presenceSrc.Present(f.presenceIndex) {
continue
}
dfptr := dst.Apply(f.offset)
if f.isLazy {
if sfptr.AtomicGetPointer().IsNil() {
mi.lazyUnmarshal(src, f.num)
}
if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
mi.lazyUnmarshal(dst, f.num)
}
}
f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
continue
}
if f.isPointer && sfptr.Elem().IsNil() {
continue
}

View file

@ -30,8 +30,8 @@ type MessageInfo struct {
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
// Exporter must be provided in a purego environment in order to provide
// access to unexported fields.
// Deprecated: Exporter will be removed the next time we bump
// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() {
if mi.initDone == 1 {
return
}
if opaqueInitHook(mi) {
return
}
t := mi.GoReflectType
if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@ -133,6 +136,9 @@ type structInfo struct {
extensionOffset offset
extensionType reflect.Type
lazyOffset offset
presenceOffset offset
fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField
oneofsByName map[protoreflect.Name]reflect.StructField
oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber
@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
weakOffset: invalidOffset,
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
lazyOffset: invalidOffset,
presenceOffset: invalidOffset,
fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{},
oneofsByName: map[protoreflect.Name]reflect.StructField{},
@ -175,6 +183,10 @@ fieldLoop:
si.extensionOffset = offsetOf(f, mi.Exporter)
si.extensionType = f.Type
}
case "lazyFields", "XXX_lazyUnmarshalInfo":
si.lazyOffset = offsetOf(f, mi.Exporter)
case "XXX_presence":
si.presenceOffset = offsetOf(f, mi.Exporter)
default:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {

View file

@ -0,0 +1,614 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"fmt"
"math"
"reflect"
"strings"
"sync/atomic"
"google.golang.org/protobuf/reflect/protoreflect"
)
type opaqueStructInfo struct {
structInfo
}
// isOpaque determines whether a protobuf message type is on the Opaque API. It
// checks whether the type is a Go struct that protoc-gen-go would generate.
//
// This function only detects newly generated messages from the v2
// implementation of protoc-gen-go. It is unable to classify generated messages
// that are too old or those that are generated by a different generator
// such as protoc-gen-gogo.
func isOpaque(t reflect.Type) bool {
// The current detection mechanism is to simply check the first field
// for a struct tag with the "protogen" key.
if t.Kind() == reflect.Struct && t.NumField() > 0 {
pgt := t.Field(0).Tag.Get("protogen")
return strings.HasPrefix(pgt, "opaque.")
}
return false
}
func opaqueInitHook(mi *MessageInfo) bool {
mt := mi.GoReflectType.Elem()
si := opaqueStructInfo{
structInfo: mi.makeStructInfo(mt),
}
if !isOpaque(mt) {
return false
}
defer atomic.StoreUint32(&mi.initDone, 1)
mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
fds := mi.Desc.Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
fs := si.fieldsByNumber[fd.Number()]
var fi fieldInfo
usePresence, _ := usePresenceForField(si, fd)
switch {
case fd.IsWeak():
// Weak fields are no different for opaque.
fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
// Oneofs are no different for opaque.
fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
case fd.IsMap():
fi = mi.fieldInfoForMapOpaque(si, fd, fs)
case fd.IsList() && fd.Message() == nil && usePresence:
fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
case fd.IsList() && fd.Message() == nil:
// Proto3 lists without presence can use same access methods as open
fi = fieldInfoForList(fd, fs, mi.Exporter)
case fd.IsList() && usePresence:
fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
case fd.IsList():
// Proto3 opaque messages that does not need presence bitmap.
// Different representation than open struct, but same logic
fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
case fd.Message() != nil && usePresence:
fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
case fd.Message() != nil:
// Proto3 messages without presence can use same access methods as open
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
}
mi.fields[fd.Number()] = &fi
}
mi.oneofs = map[protoreflect.Name]*oneofInfo{}
for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
od := mi.Desc.Oneofs().Get(i)
if !od.IsSynthetic() {
mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter)
}
}
mi.denseFields = make([]*fieldInfo, fds.Len()*2)
for i := 0; i < fds.Len(); i++ {
if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
}
}
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
i += od.Fields().Len()
} else {
mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
i++
}
}
mi.makeExtensionFieldsFunc(mt, si.structInfo)
mi.makeUnknownFieldsFunc(mt, si.structInfo)
mi.makeOpaqueCoderMethods(mt, si)
mi.makeFieldTypes(si.structInfo)
return true
}
func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
}
fieldOffset := offsetOf(fs, mi.Exporter)
conv := NewConverter(ft, fd)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
// Don't bother checking presence bits, since we need to
// look at the map length even if the presence bit is set.
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return rv.Len() > 0
},
clear: func(p pointer) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
pv := conv.GoValueOf(v)
if pv.IsNil() {
panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(pv)
},
mutable: func(p pointer) protoreflect.Value {
v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if v.IsNil() {
v.Set(reflect.MakeMap(fs.Type))
}
return conv.PBValueOf(v)
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(reflect.PtrTo(ft), fd)
fieldOffset := offsetOf(fs, mi.Exporter)
index, _ := presenceIndex(mi.Desc, fd)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return rv.Len() > 0
},
clear: func(p pointer) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
if rv.Elem().Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
pv := conv.GoValueOf(v)
if pv.IsNil() {
panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
}
mi.setPresent(p, index)
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(pv.Elem())
},
mutable: func(p pointer) protoreflect.Value {
mi.setPresent(p, index)
return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs, mi.Exporter)
index, _ := presenceIndex(mi.Desc, fd)
fieldNumber := fd.Number()
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
if !mi.present(p, index) {
return false
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
sp = p.Apply(fieldOffset).AtomicGetPointer()
}
rv := sp.AsValueOf(fs.Type.Elem())
return rv.Elem().Len() > 0
},
clear: func(p pointer) {
fp := p.Apply(fieldOffset)
sp := fp.AtomicGetPointer()
if sp.IsNil() {
sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
mi.setPresent(p, index)
}
rv := sp.AsValueOf(fs.Type.Elem())
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
if !mi.present(p, index) {
return conv.Zero()
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
sp = p.Apply(fieldOffset).AtomicGetPointer()
}
rv := sp.AsValueOf(fs.Type.Elem())
if rv.Elem().Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
fp := p.Apply(fieldOffset)
sp := fp.AtomicGetPointer()
if sp.IsNil() {
sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
mi.setPresent(p, index)
}
rv := sp.AsValueOf(fs.Type.Elem())
val := conv.GoValueOf(v)
if val.IsNil() {
panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
} else {
rv.Elem().Set(val.Elem())
}
},
mutable: func(p pointer) protoreflect.Value {
fp := p.Apply(fieldOffset)
sp := fp.AtomicGetPointer()
if sp.IsNil() {
if mi.present(p, index) {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
sp = p.Apply(fieldOffset).AtomicGetPointer()
} else {
sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
mi.setPresent(p, index)
}
}
rv := sp.AsValueOf(fs.Type.Elem())
return conv.PBValueOf(rv)
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs, mi.Exporter)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
return false
}
rv := sp.AsValueOf(fs.Type.Elem())
return rv.Elem().Len() > 0
},
clear: func(p pointer) {
sp := p.Apply(fieldOffset).AtomicGetPointer()
if !sp.IsNil() {
rv := sp.AsValueOf(fs.Type.Elem())
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
}
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
sp := p.Apply(fieldOffset).AtomicGetPointer()
if sp.IsNil() {
return conv.Zero()
}
rv := sp.AsValueOf(fs.Type.Elem())
if rv.Elem().Len() == 0 {
return conv.Zero()
}
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.IsNil() {
rv.Set(reflect.New(fs.Type.Elem()))
}
val := conv.GoValueOf(v)
if val.IsNil() {
panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
} else {
rv.Elem().Set(val.Elem())
}
},
mutable: func(p pointer) protoreflect.Value {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if rv.IsNil() {
rv.Set(reflect.New(fs.Type.Elem()))
}
return conv.PBValueOf(rv)
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
nullable := fd.HasPresence()
if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
nullable = true
}
deref := false
if nullable && ft.Kind() == reflect.Ptr {
ft = ft.Elem()
deref = true
}
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs, mi.Exporter)
index, _ := presenceIndex(mi.Desc, fd)
var getter func(p pointer) protoreflect.Value
if !nullable {
getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
} else {
getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
}
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
if nullable {
return mi.present(p, index)
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
switch rv.Kind() {
case reflect.Bool:
return rv.Bool()
case reflect.Int32, reflect.Int64:
return rv.Int() != 0
case reflect.Uint32, reflect.Uint64:
return rv.Uint() != 0
case reflect.Float32, reflect.Float64:
return rv.Float() != 0 || math.Signbit(rv.Float())
case reflect.String, reflect.Slice:
return rv.Len() > 0
default:
panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
}
},
clear: func(p pointer) {
if nullable {
mi.clearPresent(p, index)
}
// This is only valuable for bytes and strings, but we do it unconditionally.
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: getter,
// TODO: Implement unsafe fast path for set?
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if deref {
if rv.IsNil() {
rv.Set(reflect.New(ft))
}
rv = rv.Elem()
}
rv.Set(conv.GoValueOf(v))
if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
rv.Set(emptyBytes)
}
if nullable {
mi.setPresent(p, index)
}
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
fieldOffset := offsetOf(fs, mi.Exporter)
index, _ := presenceIndex(mi.Desc, fd)
fieldNumber := fd.Number()
elemType := fs.Type.Elem()
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
return mi.present(p, index)
},
clear: func(p pointer) {
mi.clearPresent(p, index)
p.Apply(fieldOffset).AtomicSetNilPointer()
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
fp := p.Apply(fieldOffset)
mp := fp.AtomicGetPointer()
if mp.IsNil() {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
mp = fp.AtomicGetPointer()
}
rv := mp.AsValueOf(elemType)
return conv.PBValueOf(rv)
},
set: func(p pointer, v protoreflect.Value) {
val := pointerOfValue(conv.GoValueOf(v))
if val.IsNil() {
panic("invalid nil pointer")
}
p.Apply(fieldOffset).AtomicSetPointer(val)
mi.setPresent(p, index)
},
mutable: func(p pointer) protoreflect.Value {
fp := p.Apply(fieldOffset)
mp := fp.AtomicGetPointer()
if mp.IsNil() {
if mi.present(p, index) {
// Lazily unmarshal this field.
mi.lazyUnmarshal(p, fieldNumber)
mp = fp.AtomicGetPointer()
} else {
mp = pointerOfValue(conv.GoValueOf(conv.New()))
fp.AtomicSetPointer(mp)
mi.setPresent(p, index)
}
}
return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
},
newMessage: func() protoreflect.Message {
return conv.New().Message()
},
newField: func() protoreflect.Value {
return conv.New()
},
}
}
// A presenceList wraps a List, updating presence bits as necessary when the
// list contents change.
type presenceList struct {
pvalueList
setPresence func(bool)
}
type pvalueList interface {
protoreflect.List
//Unwrapper
}
func (list presenceList) Append(v protoreflect.Value) {
list.pvalueList.Append(v)
list.setPresence(true)
}
func (list presenceList) Truncate(i int) {
list.pvalueList.Truncate(i)
list.setPresence(i > 0)
}
// presenceIndex returns the index to pass to presence functions.
//
// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
found := false
var index, numIndices uint32
for i := 0; i < md.Fields().Len(); i++ {
f := md.Fields().Get(i)
if f == fd {
found = true
index = numIndices
}
if f.ContainingOneof() == nil || isLastOneofField(f) {
numIndices++
}
}
if !found {
panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
}
return index, presenceSize(numIndices)
}
func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
fields := fd.ContainingOneof().Fields()
return fields.Get(fields.Len()-1) == fd
}
func (mi *MessageInfo) setPresent(p pointer, index uint32) {
p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
}
func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
}
func (mi *MessageInfo) present(p pointer, index uint32) bool {
return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
}
// usePresenceForField implements the somewhat intricate logic of when
// the presence bitmap is used for a field. The main logic is that a
// field that is optional or that can be lazy will use the presence
// bit, but for proto2, also maps have a presence bit. It also records
// if the field can ever be lazy, which is true if we have a
// lazyOffset and the field is a message or a slice of messages. A
// field that is lazy will always need a presence bit. Oneofs are not
// lazy and do not use presence, unless they are a synthetic oneof,
// which is a proto3 optional field. For proto3 optionals, we use the
// presence and they can also be lazy when applicable (a message).
func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
// Non-oneof scalar fields with explicit field presence use the presence array.
usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
return false, false
case fd.IsWeak():
return false, false
case fd.IsMap():
return false, false
case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
return hasLazyField, hasLazyField
default:
return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
}
}

View file

@ -0,0 +1,132 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package impl
import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
)
func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
ft := fs.Type
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
if fd.Kind() == protoreflect.EnumKind {
// Enums for nullable opaque types.
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return conv.PBValueOf(rv)
}
}
switch ft.Kind() {
case reflect.Bool:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bool()
return protoreflect.ValueOfBool(*x)
}
case reflect.Int32:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int32()
return protoreflect.ValueOfInt32(*x)
}
case reflect.Uint32:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint32()
return protoreflect.ValueOfUint32(*x)
}
case reflect.Int64:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int64()
return protoreflect.ValueOfInt64(*x)
}
case reflect.Uint64:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint64()
return protoreflect.ValueOfUint64(*x)
}
case reflect.Float32:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float32()
return protoreflect.ValueOfFloat32(*x)
}
case reflect.Float64:
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float64()
return protoreflect.ValueOfFloat64(*x)
}
case reflect.String:
if fd.Kind() == protoreflect.BytesKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
if len(**x) == 0 {
return protoreflect.ValueOfBytes(nil)
}
return protoreflect.ValueOfBytes([]byte(**x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfString(**x)
}
case reflect.Slice:
if fd.Kind() == protoreflect.StringKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfString(string(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() || !mi.present(p, index) {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfBytes(*x)
}
}
panic("unexpected protobuf kind: " + ft.Kind().String())
}

View file

@ -205,6 +205,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
case fd.IsList():
if fd.Enum() != nil || fd.Message() != nil {
ft = fs.Type.Elem()
if ft.Kind() == reflect.Slice {
ft = ft.Elem()
}
}
isMessage = fd.Message() != nil
case fd.Enum() != nil:

View file

@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
ft := fs.Type
nullable := fd.HasPresence()
isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
var getter func(p pointer) protoreflect.Value
if nullable {
if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
// This never occurs for generated message types.
@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
fieldOffset := offsetOf(fs, x)
// Generate specialized getter functions to avoid going through reflect.Value
if nullable {
getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
} else {
getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
}
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable {
return !rv.IsNil()
return !p.Apply(fieldOffset).Elem().IsNil()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
switch rv.Kind() {
case reflect.Bool:
return rv.Bool()
@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
get: func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable {
if rv.IsNil() {
return conv.Zero()
}
if rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
}
return conv.PBValueOf(rv)
},
get: getter,
// TODO: Implement unsafe fast path for set?
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable && rv.Kind() == reflect.Ptr {

View file

@ -0,0 +1,273 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by generate-types. DO NOT EDIT.
package impl
import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
)
func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
ft := fs.Type
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
if fd.Kind() == protoreflect.EnumKind {
elemType := fs.Type.Elem()
// Enums for nullable types.
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
if rv.IsNil() {
return conv.Zero()
}
return conv.PBValueOf(rv.Elem())
}
}
switch ft.Kind() {
case reflect.Bool:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).BoolPtr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfBool(**x)
}
case reflect.Int32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int32Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfInt32(**x)
}
case reflect.Uint32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint32Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfUint32(**x)
}
case reflect.Int64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int64Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfInt64(**x)
}
case reflect.Uint64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint64Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfUint64(**x)
}
case reflect.Float32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float32Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfFloat32(**x)
}
case reflect.Float64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float64Ptr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfFloat64(**x)
}
case reflect.String:
if fd.Kind() == protoreflect.BytesKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
if len(**x) == 0 {
return protoreflect.ValueOfBytes(nil)
}
return protoreflect.ValueOfBytes([]byte(**x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).StringPtr()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfString(**x)
}
case reflect.Slice:
if fd.Kind() == protoreflect.StringKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
if len(*x) == 0 {
return conv.Zero()
}
return protoreflect.ValueOfString(string(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
if *x == nil {
return conv.Zero()
}
return protoreflect.ValueOfBytes(*x)
}
}
panic("unexpected protobuf kind: " + ft.Kind().String())
}
func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
ft := fs.Type
if fd.Kind() == protoreflect.EnumKind {
// Enums for non nullable types.
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
return conv.PBValueOf(rv)
}
}
switch ft.Kind() {
case reflect.Bool:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bool()
return protoreflect.ValueOfBool(*x)
}
case reflect.Int32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int32()
return protoreflect.ValueOfInt32(*x)
}
case reflect.Uint32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint32()
return protoreflect.ValueOfUint32(*x)
}
case reflect.Int64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Int64()
return protoreflect.ValueOfInt64(*x)
}
case reflect.Uint64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Uint64()
return protoreflect.ValueOfUint64(*x)
}
case reflect.Float32:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float32()
return protoreflect.ValueOfFloat32(*x)
}
case reflect.Float64:
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Float64()
return protoreflect.ValueOfFloat64(*x)
}
case reflect.String:
if fd.Kind() == protoreflect.BytesKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).String()
if len(*x) == 0 {
return protoreflect.ValueOfBytes(nil)
}
return protoreflect.ValueOfBytes([]byte(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).String()
return protoreflect.ValueOfString(*x)
}
case reflect.Slice:
if fd.Kind() == protoreflect.StringKind {
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfString(string(*x))
}
}
return func(p pointer) protoreflect.Value {
if p.IsNil() {
return conv.Zero()
}
x := p.Apply(fieldOffset).Bytes()
return protoreflect.ValueOfBytes(*x)
}
}
panic("unexpected protobuf kind: " + ft.Kind().String())
}

View file

@ -1,215 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego || appengine
// +build purego appengine
package impl
import (
"fmt"
"reflect"
"sync"
)
const UnsafeEnabled = false
// Pointer is an opaque pointer type.
type Pointer any
// offset represents the offset to a struct field, accessible from a pointer.
// The offset is the field index into a struct.
type offset struct {
index int
export exporter
}
// offsetOf returns a field offset for the struct field.
func offsetOf(f reflect.StructField, x exporter) offset {
if len(f.Index) != 1 {
panic("embedded structs are not supported")
}
if f.PkgPath == "" {
return offset{index: f.Index[0]} // field is already exported
}
if x == nil {
panic("exporter must be provided for unexported field")
}
return offset{index: f.Index[0], export: x}
}
// IsValid reports whether the offset is valid.
func (f offset) IsValid() bool { return f.index >= 0 }
// invalidOffset is an invalid field offset.
var invalidOffset = offset{index: -1}
// zeroOffset is a noop when calling pointer.Apply.
var zeroOffset = offset{index: 0}
// pointer is an abstract representation of a pointer to a struct or field.
type pointer struct{ v reflect.Value }
// pointerOf returns p as a pointer.
func pointerOf(p Pointer) pointer {
return pointerOfIface(p)
}
// pointerOfValue returns v as a pointer.
func pointerOfValue(v reflect.Value) pointer {
return pointer{v: v}
}
// pointerOfIface returns the pointer portion of an interface.
func pointerOfIface(v any) pointer {
return pointer{v: reflect.ValueOf(v)}
}
// IsNil reports whether the pointer is nil.
func (p pointer) IsNil() bool {
return p.v.IsNil()
}
// Apply adds an offset to the pointer to derive a new pointer
// to a specified field. The current pointer must be pointing at a struct.
func (p pointer) Apply(f offset) pointer {
if f.export != nil {
if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
return pointer{v: v}
}
}
return pointer{v: p.v.Elem().Field(f.index).Addr()}
}
// AsValueOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
if got := p.v.Type().Elem(); got != t {
panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
}
return p.v
}
// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to p.AsValueOf(t).Interface()
func (p pointer) AsIfaceOf(t reflect.Type) any {
return p.AsValueOf(t).Interface()
}
func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
func (p pointer) String() *string { return p.v.Interface().(*string) }
func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
func (p pointer) Extensions() *map[int32]ExtensionField {
return p.v.Interface().(*map[int32]ExtensionField)
}
func (p pointer) Elem() pointer {
return pointer{v: p.v.Elem()}
}
// PointerSlice copies []*T from p as a new []pointer.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) PointerSlice() []pointer {
// TODO: reconsider this
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
}
// AppendPointerSlice appends v to p, which must be a []*T.
func (p pointer) AppendPointerSlice(v pointer) {
sp := p.v.Elem()
sp.Set(reflect.Append(sp, v.v))
}
// SetPointer sets *p to v.
func (p pointer) SetPointer(v pointer) {
p.v.Elem().Set(v.v)
}
func growSlice(p pointer, addCap int) {
// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
in := p.v.Elem()
out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
reflect.Copy(out, in)
p.v.Elem().Set(out)
}
func (p pointer) growBoolSlice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growInt32Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growUint32Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growInt64Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growUint64Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growFloat64Slice(addCap int) {
growSlice(p, addCap)
}
func (p pointer) growFloat32Slice(addCap int) {
growSlice(p, addCap)
}
func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
func (ms *messageState) pointer() pointer { panic("not supported") }
func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
type atomicNilMessage struct {
once sync.Once
m messageReflectWrapper
}
func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
m.once.Do(func() {
m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
m.m.mi = mi
})
return &m.m
}

View file

@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego && !appengine
// +build !purego,!appengine
package impl
import (
"reflect"
"sync/atomic"
"unsafe"
"google.golang.org/protobuf/internal/protolazy"
)
const UnsafeEnabled = true
@ -114,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p)
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
}
func (p pointer) PresenceInfo() presence {
return presence{P: p.p}
}
func (p pointer) Elem() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}

View file

@ -0,0 +1,42 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"sync/atomic"
"unsafe"
)
func (p pointer) AtomicGetPointer() pointer {
return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
}
func (p pointer) AtomicSetPointer(v pointer) {
atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
}
func (p pointer) AtomicSetNilPointer() {
atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
}
func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
return v
}
return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
}
type atomicV1MessageInfo struct{ p Pointer }
func (mi *atomicV1MessageInfo) Get() Pointer {
return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
}
func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
return p
}
return mi.Get()
}

View file

@ -0,0 +1,142 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"sync/atomic"
"unsafe"
)
// presenceSize represents the size of a presence set, which should be the largest index of the set+1
type presenceSize uint32
// presence is the internal representation of the bitmap array in a generated protobuf
type presence struct {
// This is a pointer to the beginning of an array of uint32
P unsafe.Pointer
}
func (p presence) toElem(num uint32) (ret *uint32) {
const (
bitsPerByte = 8
siz = unsafe.Sizeof(*ret)
)
// p.P points to an array of uint32, num is the bit in this array that the
// caller wants to check/manipulate. Calculate the index in the array that
// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
offset := uintptr(num) / (siz * bitsPerByte) * siz
return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
}
// Present checks for the presence of a specific field number in a presence set.
func (p presence) Present(num uint32) bool {
if p.P == nil {
return false
}
return Export{}.Present(p.toElem(num), num)
}
// SetPresent adds presence for a specific field number in a presence set.
func (p presence) SetPresent(num uint32, size presenceSize) {
Export{}.SetPresent(p.toElem(num), num, uint32(size))
}
// SetPresentUnatomic adds presence for a specific field number in a presence set without using
// atomic operations. Only to be called during unmarshaling.
func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
}
// ClearPresent removes presence for a specific field number in a presence set.
func (p presence) ClearPresent(num uint32) {
Export{}.ClearPresent(p.toElem(num), num)
}
// LoadPresenceCache (together with PresentInCache) allows for a
// cached version of checking for presence without re-reading the word
// for every field. It is optimized for efficiency and assumes no
// simltaneous mutation of the presence set (or at least does not have
// a problem with simultaneous mutation giving inconsistent results).
func (p presence) LoadPresenceCache() (current uint32) {
if p.P == nil {
return 0
}
return atomic.LoadUint32((*uint32)(p.P))
}
// PresentInCache reads presence from a cached word in the presence
// bitmap. It caches up a new word if the bit is outside the
// word. This is for really fast iteration through bitmaps in cases
// where we either know that the bitmap will not be altered, or we
// don't care about inconsistencies caused by simultaneous writes.
func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
if num/32 != *cachedElement {
o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
*current = atomic.LoadUint32(q)
*cachedElement = num / 32
}
return (*current & (1 << (num % 32))) > 0
}
// AnyPresent checks if any field is marked as present in the bitmap.
func (p presence) AnyPresent(size presenceSize) bool {
n := uintptr((size + 31) / 32)
for j := uintptr(0); j < n; j++ {
o := j * unsafe.Sizeof(uint32(0))
q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
b := atomic.LoadUint32(q)
if b > 0 {
return true
}
}
return false
}
// toRaceDetectData finds the preceding RaceDetectHookData in a
// message by using pointer arithmetic. As the type of the presence
// set (bitmap) varies with the number of fields in the protobuf, we
// can not have a struct type containing the array and the
// RaceDetectHookData. instead the RaceDetectHookData is placed
// immediately before the bitmap array, and we find it by walking
// backwards in the struct.
//
// This method is only called from the race-detect version of the code,
// so RaceDetectHookData is never an empty struct.
func (p presence) toRaceDetectData() *RaceDetectHookData {
var template struct {
d RaceDetectHookData
a [1]uint32
}
o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
}
func atomicLoadShadowPresence(p **[]byte) *[]byte {
return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
}
// findPointerToRaceDetectData finds the preceding RaceDetectHookData
// in a message by using pointer arithmetic. For the methods called
// directy from generated code, we don't have a pointer to the
// beginning of the presence set, but a pointer inside the array. As
// we know the index of the bit we're manipulating (num), we can
// calculate which element of the array ptr is pointing to. With that
// information we find the preceding RaceDetectHookData and can
// manipulate the shadow bitmap.
//
// This method is only called from the race-detect version of the
// code, so RaceDetectHookData is never an empty struct.
func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
var template struct {
d RaceDetectHookData
a [1]uint32
}
o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
}

View file

@ -37,6 +37,10 @@ const (
// ValidationValid indicates that unmarshaling the message will succeed.
ValidationValid
// ValidationWrongWireType indicates that a validated field does not have
// the expected wire type.
ValidationWrongWireType
)
func (v ValidationStatus) String() string {
@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
if ft.Kind() == reflect.Ptr {
// Repeated opaque message fields are *[]*T.
ft = ft.Elem()
}
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
if ft.Kind() == reflect.Ptr {
// Repeated opaque message fields are *[]*T.
ft = ft.Elem()
}
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}