916 lines
23 KiB
Go
916 lines
23 KiB
Go
package boltease
|
|
|
|
import (
|
|
"encoding"
|
|
"fmt"
|
|
"math"
|
|
"reflect"
|
|
"slices"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
)
|
|
|
|
func (db *DB) Save(path []string, k string, v any) error {
|
|
e := newWriterState(db, path)
|
|
defer writerStatePool.Put(e)
|
|
|
|
err := e.marshal(db, path, k, v)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Marshaler is the interfacce implemented by types that
|
|
// can marshal themselves into a db
|
|
type Marshaler interface {
|
|
MarshalBoltease(db *DB, path []string, key string) error
|
|
}
|
|
|
|
// An UnsupportedTypeError is returned by [Marsha] when attempting
|
|
// to write an unsupported value type.
|
|
type UnsupportedTypeError struct {
|
|
Type reflect.Type
|
|
}
|
|
|
|
func (e *UnsupportedTypeError) Error() string {
|
|
return "boltease: unsupported type: " + e.Type.String()
|
|
}
|
|
|
|
// An UnsupportedValueError is returned by [Marshal] when attempting
|
|
// to encode an unsupported value.
|
|
type UnsupportedValueError struct {
|
|
Value reflect.Value
|
|
Str string
|
|
}
|
|
|
|
func (e *UnsupportedValueError) Error() string {
|
|
return "boltease: unsupported value: " + e.Str
|
|
}
|
|
|
|
// A MarshalError represents an error from calling a
|
|
// [Marshaler.MarshelBoltease] or [encoding.TextMarshaler.MarshalText] method.
|
|
type MarshalerError struct {
|
|
Type reflect.Type
|
|
Err error
|
|
sourceFunc string
|
|
}
|
|
|
|
func (e *MarshalerError) Error() string {
|
|
srcFunc := e.sourceFunc
|
|
if srcFunc == "" {
|
|
srcFunc = "MarshalBoltease"
|
|
}
|
|
return "boltease: error calling " + srcFunc +
|
|
" for type " + e.Type.String() +
|
|
": " + e.Err.Error()
|
|
}
|
|
|
|
// Unwrap returns the underlying error.
|
|
func (e *MarshalerError) Unwrap() error { return e.Err }
|
|
|
|
type writerState struct {
|
|
db *DB
|
|
path []string
|
|
ptrLevel uint
|
|
ptrSeen map[any]struct{}
|
|
}
|
|
|
|
func (es *writerState) WriteString(key, val string) error {
|
|
return es.Write([]byte(key), []byte(val))
|
|
}
|
|
|
|
func (es *writerState) Write(key []byte, val []byte) error {
|
|
return es.db.SetBBytes(es.path, key, val)
|
|
}
|
|
|
|
const startDetectingCyclesAfter = 1000
|
|
|
|
var writerStatePool sync.Pool
|
|
|
|
func newWriterState(db *DB, path []string) *writerState {
|
|
if v := writerStatePool.Get(); v != nil {
|
|
e := v.(*writerState)
|
|
if len(e.ptrSeen) > 0 {
|
|
panic("ptrWriter.write should have emptied ptrSeen via defers")
|
|
}
|
|
e.ptrLevel = 0
|
|
return e
|
|
}
|
|
return &writerState{
|
|
db: db,
|
|
path: path,
|
|
ptrSeen: make(map[any]struct{}),
|
|
}
|
|
}
|
|
|
|
// bolteaseError is an error wrapper type for internal use only.
|
|
// Panics with errors are wrapped in bolteaseError so that the top-level recover
|
|
// can distinguish intentional panics from this package.
|
|
type bolteaseError struct{ error }
|
|
|
|
func (e *writerState) marshal(db *DB, path []string, k string, v any) (err error) {
|
|
defer func() {
|
|
if r := recover(); r != nil {
|
|
if be, ok := r.(bolteaseError); ok {
|
|
err = be.error
|
|
} else {
|
|
panic(r)
|
|
}
|
|
}
|
|
}()
|
|
e.reflectValue(k, reflect.ValueOf(v))
|
|
return nil
|
|
}
|
|
|
|
// error aborts the encoding by panicking with err wrapped in bolteaseError.
|
|
func (e *writerState) error(err error) {
|
|
panic(bolteaseError{err})
|
|
}
|
|
|
|
func isEmptyValue(v reflect.Value) bool {
|
|
switch v.Kind() {
|
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
|
return v.Len() == 0
|
|
case reflect.Bool,
|
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
|
reflect.Float32, reflect.Float64,
|
|
reflect.Interface, reflect.Pointer:
|
|
return v.IsZero()
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (e *writerState) reflectValue(k string, v reflect.Value) {
|
|
valueWriter(v)(e, k, v)
|
|
}
|
|
|
|
type writerFunc func(e *writerState, k string, v reflect.Value)
|
|
|
|
var writerCache sync.Map // map[reflect.Type]writerFunc
|
|
|
|
func valueWriter(v reflect.Value) writerFunc {
|
|
if !v.IsValid() {
|
|
return invalidValueWriter
|
|
}
|
|
return typeWriter(v.Type())
|
|
}
|
|
|
|
func typeWriter(t reflect.Type) writerFunc {
|
|
if fi, ok := writerCache.Load(t); ok {
|
|
return fi.(writerFunc)
|
|
}
|
|
|
|
// To deal with recursive types, populate the map with an
|
|
// indirect func before we build it. This type waits on the
|
|
// real func (f) to be ready and then calls it. This indirect
|
|
// func is only used for recursive types.
|
|
var (
|
|
wg sync.WaitGroup
|
|
f writerFunc
|
|
)
|
|
wg.Add(1)
|
|
fi, loaded := writerCache.LoadOrStore(t, writerFunc(func(e *writerState, k string, v reflect.Value) {
|
|
wg.Wait()
|
|
f(e, k, v)
|
|
}))
|
|
if loaded {
|
|
return fi.(writerFunc)
|
|
}
|
|
|
|
// Compute the real writer and replace the indirect func with it.
|
|
f = newTypeWriter(t, true)
|
|
wg.Done()
|
|
writerCache.Store(t, f)
|
|
return f
|
|
}
|
|
|
|
var (
|
|
marshalerType = reflect.TypeFor[Marshaler]()
|
|
textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]()
|
|
)
|
|
|
|
// newTypeWriter constructs an writerFunc for a type.
|
|
// The returned writer only checks CanAddr when allowAddr is true.
|
|
func newTypeWriter(t reflect.Type, allowAddr bool) writerFunc {
|
|
// if we have a non-pointer value whose type implements
|
|
// Marshaler with a value receiver, then we're better off taking
|
|
// the address of the value - otherwise we end up with an
|
|
// allocation as we cast the value to an interface.
|
|
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(marshalerType) {
|
|
return newCondAddrWriter(addrMarshalerWriter, newTypeWriter(t, false))
|
|
}
|
|
if t.Implements(marshalerType) {
|
|
return marshalerWriter
|
|
}
|
|
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(textMarshalerType) {
|
|
return newCondAddrWriter(addrTextMarshalerWriter, newTypeWriter(t, false))
|
|
}
|
|
if t.Implements(textMarshalerType) {
|
|
return textMarshalerWriter
|
|
}
|
|
|
|
switch t.Kind() {
|
|
case reflect.Bool:
|
|
return boolWriter
|
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
return intWriter
|
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
return uintWriter
|
|
case reflect.Float32:
|
|
return float32Writer
|
|
case reflect.Float64:
|
|
return float64Writer
|
|
case reflect.String:
|
|
return stringWriter
|
|
case reflect.Interface:
|
|
return interfaceWriter
|
|
case reflect.Struct:
|
|
return newStructWriter(t)
|
|
case reflect.Map:
|
|
return newMapWriter(t)
|
|
case reflect.Slice:
|
|
return newSliceWriter(t)
|
|
case reflect.Array:
|
|
return newArrayWriter(t)
|
|
case reflect.Pointer:
|
|
return newPtrWriter(t)
|
|
default:
|
|
return unsupportedTypeWriter
|
|
}
|
|
}
|
|
|
|
func invalidValueWriter(e *writerState, k string, v reflect.Value) {
|
|
e.WriteString(k, "null")
|
|
}
|
|
|
|
func marshalerWriter(e *writerState, k string, v reflect.Value) {
|
|
if v.Kind() == reflect.Pointer && v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
m, ok := v.Interface().(Marshaler)
|
|
if !ok {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
err := m.MarshalBoltease(e.db, e.path, k)
|
|
if err != nil {
|
|
e.error(&MarshalerError{v.Type(), err, "MarshalBoltease"})
|
|
}
|
|
}
|
|
|
|
func addrMarshalerWriter(e *writerState, k string, v reflect.Value) {
|
|
va := v.Addr()
|
|
if va.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
m := va.Interface().(Marshaler)
|
|
err := m.MarshalBoltease(e.db, e.path, k)
|
|
if err != nil {
|
|
e.error(&MarshalerError{v.Type(), err, "MarshalBoltease"})
|
|
}
|
|
}
|
|
|
|
func textMarshalerWriter(e *writerState, k string, v reflect.Value) {
|
|
if v.Kind() == reflect.Pointer && v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
m, ok := v.Interface().(encoding.TextMarshaler)
|
|
if !ok {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
b, err := m.MarshalText()
|
|
if err != nil {
|
|
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
|
}
|
|
e.Write([]byte(k), b)
|
|
}
|
|
|
|
func addrTextMarshalerWriter(e *writerState, k string, v reflect.Value) {
|
|
va := v.Addr()
|
|
if va.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
m := va.Interface().(encoding.TextMarshaler)
|
|
b, err := m.MarshalText()
|
|
if err != nil {
|
|
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
|
}
|
|
e.Write([]byte(k), b)
|
|
}
|
|
|
|
func boolWriter(e *writerState, k string, v reflect.Value) {
|
|
e.WriteString(k, strconv.FormatBool(v.Bool()))
|
|
}
|
|
|
|
func intWriter(e *writerState, k string, v reflect.Value) {
|
|
e.WriteString(k, strconv.FormatInt(v.Int(), 10))
|
|
}
|
|
|
|
func uintWriter(e *writerState, k string, v reflect.Value) {
|
|
e.WriteString(k, strconv.FormatUint(v.Uint(), 10))
|
|
}
|
|
|
|
type floatWriter int // number of bits
|
|
|
|
func (bits floatWriter) write(e *writerState, k string, v reflect.Value) {
|
|
f := v.Float()
|
|
if math.IsInf(f, 0) || math.IsNaN(f) {
|
|
e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
|
|
}
|
|
|
|
// Connvert as if by ES6 number to string conversion.
|
|
// This matches most other JSON generators.
|
|
// See golang.org/issue/6384 and golang.org/issue/14135.
|
|
// Like fmt %g, but the exponent cutoffs are different
|
|
// and exponents themselves are not padded to two digits.
|
|
b := []byte{}
|
|
abs := math.Abs(f)
|
|
fmt := byte('f')
|
|
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
|
if abs != 0 {
|
|
if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
|
|
fmt = 'e'
|
|
}
|
|
}
|
|
b = strconv.AppendFloat(b, f, fmt, -1, int(bits))
|
|
if fmt == 'e' {
|
|
// clean up e-09 to e-9
|
|
n := len(b)
|
|
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
|
|
b[n-2] = b[n-1]
|
|
b = b[:n-1]
|
|
}
|
|
}
|
|
e.Write([]byte(k), b)
|
|
}
|
|
|
|
var (
|
|
float32Writer = (floatWriter(32)).write
|
|
float64Writer = (floatWriter(64)).write
|
|
)
|
|
|
|
func stringWriter(e *writerState, k string, v reflect.Value) {
|
|
if v.Type() == numberType {
|
|
numStr := v.String()
|
|
// In Go1.5 the empty string writes to "0", while this is not a valid number literal
|
|
// we keep compatibility so check validity after this.
|
|
if numStr == "" {
|
|
numStr = "0" // Number's zero-val
|
|
}
|
|
if !isValidNumber(numStr) {
|
|
e.error(fmt.Errorf("boltease: invalid number literal %q", numStr))
|
|
}
|
|
b := []byte{}
|
|
b = append(b, numStr...)
|
|
e.Write([]byte(k), b)
|
|
return
|
|
}
|
|
e.Write([]byte(k), []byte(v.String()))
|
|
}
|
|
|
|
func isValidNumber(s string) bool {
|
|
// This function implements the JSON numbers grammar.
|
|
// See https://tools.ietf.org/html/rfc7159#section-6
|
|
// and https://www.json.org/img/number.png
|
|
|
|
if s == "" {
|
|
return false
|
|
}
|
|
|
|
// Optional -
|
|
if s[0] == '-' {
|
|
s = s[1:]
|
|
if s == "" {
|
|
return false
|
|
}
|
|
}
|
|
|
|
// Digits
|
|
switch {
|
|
default:
|
|
return false
|
|
|
|
case s[0] == '0':
|
|
s = s[1:]
|
|
|
|
case '1' <= s[0] && s[0] <= '9':
|
|
s = s[1:]
|
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
|
s = s[1:]
|
|
}
|
|
}
|
|
|
|
// . followed by 1 or more digits.
|
|
if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
|
|
s = s[2:]
|
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
|
s = s[1:]
|
|
}
|
|
}
|
|
|
|
// e or E followed by an optional - or + and
|
|
// 1 or more digits.
|
|
if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
|
|
s = s[1:]
|
|
if s[0] == '+' || s[0] == '-' {
|
|
s = s[1:]
|
|
if s == "" {
|
|
return false
|
|
}
|
|
}
|
|
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
|
|
s = s[1:]
|
|
}
|
|
}
|
|
|
|
// Make sure we are at the end.
|
|
return s == ""
|
|
}
|
|
|
|
func interfaceWriter(e *writerState, k string, v reflect.Value) {
|
|
if v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
e.reflectValue(k, v.Elem())
|
|
}
|
|
|
|
func unsupportedTypeWriter(e *writerState, k string, v reflect.Value) {
|
|
e.error(&UnsupportedTypeError{v.Type()})
|
|
}
|
|
|
|
type structWriter struct {
|
|
fields structFields
|
|
}
|
|
|
|
type structFields struct {
|
|
list []field
|
|
byExactName map[string]*field
|
|
byFoldedName map[string]*field
|
|
}
|
|
|
|
// Write a struct at e.path
|
|
func (se structWriter) write(e *writerState, k string, v reflect.Value) {
|
|
// Add the key for this struct to the writerState
|
|
e.path = append(e.path, k)
|
|
// Pop it when we're done.
|
|
defer func() { e.path = e.path[:len(e.path)-1] }()
|
|
|
|
FieldLoop:
|
|
for i := range se.fields.list {
|
|
f := &se.fields.list[i]
|
|
// Find the nested struct field by following f.index.
|
|
fv := v
|
|
for _, i := range f.index {
|
|
if fv.Kind() == reflect.Pointer {
|
|
if fv.IsNil() {
|
|
continue FieldLoop
|
|
}
|
|
fv = fv.Elem()
|
|
}
|
|
fv = fv.Field(i)
|
|
}
|
|
if f.omitEmpty && isEmptyValue(fv) {
|
|
continue
|
|
}
|
|
f.writer(e, f.name, fv)
|
|
}
|
|
}
|
|
|
|
func newStrucWriter(t reflect.Type) writerFunc {
|
|
se := structWriter{fields: cachedTypeFields(t)}
|
|
return se.write
|
|
}
|
|
|
|
func newStructWriter(t reflect.Type) writerFunc {
|
|
se := structWriter{fields: cachedTypeFields(t)}
|
|
return se.write
|
|
}
|
|
|
|
type mapWriter struct {
|
|
elemEnc writerFunc
|
|
}
|
|
|
|
func (me mapWriter) write(e *writerState, k string, v reflect.Value) {
|
|
if v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
|
// We're a large number of nested ptrWriter.write calls deep;
|
|
// start checking if we've run into a pointer cycle.
|
|
ptr := v.UnsafePointer()
|
|
if _, ok := e.ptrSeen[ptr]; ok {
|
|
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
|
}
|
|
e.ptrSeen[ptr] = struct{}{}
|
|
defer delete(e.ptrSeen, ptr)
|
|
}
|
|
|
|
// Extract and sort the keys.
|
|
var (
|
|
sv = make([]reflectWithString, v.Len())
|
|
mi = v.MapRange()
|
|
err error
|
|
)
|
|
for i := 0; mi.Next(); i++ {
|
|
if sv[i].ks, err = resolveKeyName(mi.Key()); err != nil {
|
|
e.error(fmt.Errorf("boltease: encoding error for type %q: %q", v.Type().String(), err.Error()))
|
|
}
|
|
sv[i].v = mi.Value()
|
|
}
|
|
slices.SortFunc(sv, func(i, j reflectWithString) int {
|
|
return strings.Compare(i.ks, j.ks)
|
|
})
|
|
|
|
for i, kv := range sv {
|
|
me.elemEnc(e, sv[i].ks, kv.v)
|
|
}
|
|
e.ptrLevel--
|
|
}
|
|
|
|
func newMapWriter(t reflect.Type) writerFunc {
|
|
switch t.Key().Kind() {
|
|
case reflect.String,
|
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
default:
|
|
if !t.Key().Implements(textMarshalerType) {
|
|
return unsupportedTypeWriter
|
|
}
|
|
}
|
|
me := mapWriter{typeWriter(t.Elem())}
|
|
return me.write
|
|
}
|
|
|
|
func writeByteSlice(e *writerState, k string, v reflect.Value) {
|
|
if v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
e.Write([]byte(k), v.Bytes())
|
|
}
|
|
|
|
// sliceWriter just wraps an arrayWriter, checking to make sure the value isn't nil.
|
|
type sliceWriter struct {
|
|
arrayWriter writerFunc
|
|
}
|
|
|
|
func (se sliceWriter) write(e *writerState, k string, v reflect.Value) {
|
|
if v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
|
// We're a large number of nested ptrWriter.write calls deep;
|
|
// start checking if we've run into a pointer cycle.
|
|
// Here we use a struct to memorize the pointer to the first element of the slice
|
|
// and its length.
|
|
ptr := struct {
|
|
ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe
|
|
len int
|
|
}{v.UnsafePointer(), v.Len()}
|
|
if _, ok := e.ptrSeen[ptr]; ok {
|
|
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
|
}
|
|
e.ptrSeen[ptr] = struct{}{}
|
|
defer delete(e.ptrSeen, ptr)
|
|
}
|
|
se.arrayWriter(e, k, v)
|
|
e.ptrLevel--
|
|
}
|
|
|
|
func newSliceWriter(t reflect.Type) writerFunc {
|
|
// Byte slices get special treatment; arrays don't.
|
|
if t.Elem().Kind() == reflect.Uint8 {
|
|
p := reflect.PointerTo(t.Elem())
|
|
if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) {
|
|
return writeByteSlice
|
|
}
|
|
}
|
|
enc := sliceWriter{newArrayWriter(t)}
|
|
return enc.write
|
|
}
|
|
|
|
type arrayWriter struct {
|
|
elemWrite writerFunc
|
|
}
|
|
|
|
func (ae arrayWriter) write(e *writerState, k string, v reflect.Value) {
|
|
e.path = append(e.path, k)
|
|
defer func() { e.path = e.path[:len(e.path)-1] }()
|
|
n := v.Len()
|
|
for i := 0; i < n; i++ {
|
|
ae.elemWrite(e, strconv.Itoa(i), v.Index(i))
|
|
}
|
|
}
|
|
|
|
func newArrayWriter(t reflect.Type) writerFunc {
|
|
w := arrayWriter{typeWriter(t.Elem())}
|
|
return w.write
|
|
}
|
|
|
|
type ptrWriter struct {
|
|
elemWrite writerFunc
|
|
}
|
|
|
|
func (pe ptrWriter) write(e *writerState, k string, v reflect.Value) {
|
|
if v.IsNil() {
|
|
e.WriteString(k, "null")
|
|
return
|
|
}
|
|
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
|
// We're a large number of nested ptrWriter.write calls deep;
|
|
// start checking if we've run into a pointer cycle.
|
|
ptr := v.Interface()
|
|
if _, ok := e.ptrSeen[ptr]; ok {
|
|
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
|
}
|
|
e.ptrSeen[ptr] = struct{}{}
|
|
defer delete(e.ptrSeen, ptr)
|
|
}
|
|
pe.elemWrite(e, k, v.Elem())
|
|
e.ptrLevel--
|
|
}
|
|
|
|
func newPtrWriter(t reflect.Type) writerFunc {
|
|
w := ptrWriter{typeWriter(t.Elem())}
|
|
return w.write
|
|
}
|
|
|
|
type condAddrWriter struct {
|
|
canAddrWrite, elseWrite writerFunc
|
|
}
|
|
|
|
func (ce condAddrWriter) write(e *writerState, k string, v reflect.Value) {
|
|
if v.CanAddr() {
|
|
ce.canAddrWrite(e, k, v)
|
|
} else {
|
|
ce.elseWrite(e, k, v)
|
|
}
|
|
}
|
|
|
|
// newCondAddrWriter returns a writer that checks whether its value
|
|
// CanAddr and delegates to canAddrWrite if so, else to elseWrite.
|
|
func newCondAddrWriter(canAddrWrite, elseWrite writerFunc) writerFunc {
|
|
w := condAddrWriter{canAddrWrite: canAddrWrite, elseWrite: elseWrite}
|
|
return w.write
|
|
}
|
|
|
|
func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
|
for _, i := range index {
|
|
if t.Kind() == reflect.Pointer {
|
|
t = t.Elem()
|
|
}
|
|
t = t.Field(i).Type
|
|
}
|
|
return t
|
|
}
|
|
|
|
type reflectWithString struct {
|
|
v reflect.Value
|
|
ks string
|
|
}
|
|
|
|
func resolveKeyName(k reflect.Value) (string, error) {
|
|
if k.Kind() == reflect.String {
|
|
return k.String(), nil
|
|
}
|
|
if tm, ok := k.Interface().(encoding.TextMarshaler); ok {
|
|
if k.Kind() == reflect.Pointer && k.IsNil() {
|
|
return "", nil
|
|
}
|
|
buf, err := tm.MarshalText()
|
|
return string(buf), err
|
|
}
|
|
switch k.Kind() {
|
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
return strconv.FormatInt(k.Int(), 10), nil
|
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
return strconv.FormatUint(k.Uint(), 10), nil
|
|
}
|
|
panic("unexpected map key type")
|
|
}
|
|
|
|
// A field represents a single field found in a struct.
|
|
type field struct {
|
|
name string
|
|
nameBytes []byte
|
|
|
|
tag bool
|
|
index []int
|
|
typ reflect.Type
|
|
omitEmpty bool
|
|
|
|
writer writerFunc
|
|
}
|
|
|
|
// byIndex sorts field by index sequence.
|
|
type byIndex []field
|
|
|
|
func (x byIndex) Len() int { return len(x) }
|
|
|
|
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
|
|
func (x byIndex) Less(i, j int) bool {
|
|
for k, xik := range x[i].index {
|
|
if k >= len(x[j].index) {
|
|
return false
|
|
}
|
|
if xik != x[j].index[k] {
|
|
return xik < x[j].index[k]
|
|
}
|
|
}
|
|
return len(x[i].index) < len(x[j].index)
|
|
}
|
|
|
|
// typeFields returns a list of fields that boltease should recognize for the given type.
|
|
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
|
// and then any reachable anonymous structs.
|
|
func typeFields(t reflect.Type) structFields {
|
|
// Anonymous fields to explore at the current level and the next
|
|
current := []field{}
|
|
next := []field{{typ: t}}
|
|
|
|
// Count of queued names for current level and the next.
|
|
var count, nextCount map[reflect.Type]int
|
|
|
|
// Types already visited at an earlier level.
|
|
visited := map[reflect.Type]bool{}
|
|
|
|
// Fields found.
|
|
var fields []field
|
|
|
|
for len(next) > 0 {
|
|
current, next = next, current[:0]
|
|
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
|
|
for _, f := range current {
|
|
if visited[f.typ] {
|
|
continue
|
|
}
|
|
visited[f.typ] = true
|
|
// Scan f.typ for fields to include.
|
|
for i := 0; i < f.typ.NumField(); i++ {
|
|
sf := f.typ.Field(i)
|
|
if sf.Anonymous {
|
|
t := sf.Type
|
|
if t.Kind() == reflect.Pointer {
|
|
t = t.Elem()
|
|
}
|
|
if !sf.IsExported() && t.Kind() != reflect.Struct {
|
|
// Ignore embedded fields of unexported non-struct types.
|
|
continue
|
|
}
|
|
// Do not ignore embedded fields of unexported struct types
|
|
// /since they may have exported fields.
|
|
} else if !sf.IsExported() {
|
|
// Ignore unexported non-embedded fields.
|
|
continue
|
|
}
|
|
tag := sf.Tag.Get("boltease")
|
|
if tag == "-" {
|
|
continue
|
|
}
|
|
name, opts := parseTag(tag)
|
|
if !isValidTag(name) {
|
|
name = ""
|
|
}
|
|
index := make([]int, len(f.index)+1)
|
|
copy(index, f.index)
|
|
index[len(f.index)] = i
|
|
|
|
ft := sf.Type
|
|
if ft.Name() == "" && ft.Kind() == reflect.Pointer {
|
|
// Follow pointer.
|
|
ft = ft.Elem()
|
|
}
|
|
|
|
// Record found field and index sequence.
|
|
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
tagged := name != ""
|
|
if name == "" {
|
|
name = sf.Name
|
|
}
|
|
field := field{
|
|
name: name,
|
|
tag: tagged,
|
|
index: index,
|
|
typ: ft,
|
|
omitEmpty: opts.Contains("omitempty"),
|
|
}
|
|
field.nameBytes = []byte(field.name)
|
|
fields = append(fields, field)
|
|
if count[f.typ] > 1 {
|
|
// If there were multiple instances, add a second,
|
|
// so thta the annihilation code will see a duplicate.
|
|
// it only cares about the distinction between 1 or 2,
|
|
// so don't bother generating any more copies.
|
|
fields = append(fields, fields[len(fields)-1])
|
|
}
|
|
continue
|
|
}
|
|
// Record new anonymous struct to explore in next round.
|
|
nextCount[ft]++
|
|
if nextCount[ft] == 1 {
|
|
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
sort.Slice(fields, func(i, j int) bool {
|
|
x := fields
|
|
// sort field by name, breaking ties with depth, then
|
|
// breaking ties with "name came from boltease tag", then
|
|
// breaking ties with index sequence
|
|
if x[i].name != x[j].name {
|
|
return x[i].name < x[j].name
|
|
}
|
|
if len(x[i].index) != len(x[j].index) {
|
|
return len(x[i].index) < len(x[j].index)
|
|
}
|
|
if x[i].tag != x[j].tag {
|
|
return x[i].tag
|
|
}
|
|
return byIndex(x).Less(i, j)
|
|
})
|
|
|
|
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
// except that fields with boltease tags are promoted.
|
|
//
|
|
// The fields are sorted in primary order of name, secondary order
|
|
// of field index length. Loop over names; for each name, delete
|
|
// hidden fields by choosing the one dominant field that survives.
|
|
out := fields[:0]
|
|
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
// One iteration per name.
|
|
// Find the sequence of fields with th ename of this first field.
|
|
fi := fields[i]
|
|
name := fi.name
|
|
for advance = 1; i+advance < len(fields); advance++ {
|
|
fj := fields[i+advance]
|
|
if fj.name != name {
|
|
break
|
|
}
|
|
}
|
|
if advance == 1 { // Only one field with this name
|
|
out = append(out, fi)
|
|
continue
|
|
}
|
|
dominant, ok := dominantField(fields[i : i+advance])
|
|
if ok {
|
|
out = append(out, dominant)
|
|
}
|
|
}
|
|
|
|
fields = out
|
|
sort.Sort(byIndex(fields))
|
|
|
|
exactNameIndex := make(map[string]*field, len(fields))
|
|
foldedNameIndex := make(map[string]*field, len(fields))
|
|
for i, field := range fields {
|
|
exactNameIndex[field.name] = &fields[i]
|
|
// For historical reasons, first folded match takes precedence.
|
|
if _, ok := foldedNameIndex[string(foldName(field.nameBytes))]; !ok {
|
|
foldedNameIndex[string(foldName(field.nameBytes))] = &fields[i]
|
|
}
|
|
}
|
|
return structFields{fields, exactNameIndex, foldedNameIndex}
|
|
}
|
|
|
|
// dominantField looks through the fields, all of which are known to have the
|
|
// same name, to find the single field that dominates the others using Go's
|
|
// embedding rules, modified by the presence of boltease tags. if there are
|
|
// multiple top-level fields, the boolean will be false: This condition is an
|
|
// error in Go and we skip all fields.
|
|
func dominantField(fields []field) (field, bool) {
|
|
// The fields are sorted in increasing index-length order, then by presence of tag.
|
|
// That means that the first field is the dominant one. We need only check
|
|
// for error cases: two fields at top level, either both tagged or neither tagged.
|
|
if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
|
|
return field{}, false
|
|
}
|
|
return fields[0], true
|
|
}
|
|
|
|
var fieldCache sync.Map // map[reflect.Type]structFields
|
|
|
|
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
func cachedTypeFields(t reflect.Type) structFields {
|
|
if f, ok := fieldCache.Load(t); ok {
|
|
return f.(structFields)
|
|
}
|
|
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
|
|
return f.(structFields)
|
|
}
|