Working on decode now
This commit is contained in:
parent
741eac472a
commit
5a85c195a7
12
be_decode.go
Normal file
12
be_decode.go
Normal file
@ -0,0 +1,12 @@
|
||||
package boltease
|
||||
|
||||
import "reflect"
|
||||
|
||||
func (db *DB) Load(path []string, dest any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Number represents a Boltease number literal.
|
||||
type Number string
|
||||
|
||||
var numberType = reflect.TypeFor[Number]()
|
585
be_encode.go
585
be_encode.go
@ -6,16 +6,17 @@ import (
|
||||
"math"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func (db *DB) Save(path []string, v any) error {
|
||||
func (db *DB) Save(path []string, k string, v any) error {
|
||||
e := newWriterState(db, path)
|
||||
defer writerStatePool.Put(e)
|
||||
|
||||
err := e.marshal(db, path, v, encOpts{})
|
||||
err := e.marshal(db, path, k, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -25,11 +26,11 @@ func (db *DB) Save(path []string, v any) error {
|
||||
// Marshaler is the interfacce implemented by types that
|
||||
// can marshal themselves into a db
|
||||
type Marshaler interface {
|
||||
MarshalBoltease(db *DB, path []string) error
|
||||
MarshalBoltease(db *DB, path []string, key string) error
|
||||
}
|
||||
|
||||
// An UnsupportedTypeError is returned by [Marsha] when attempting
|
||||
// to encode an unsupported value type.
|
||||
// to write an unsupported value type.
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
@ -38,6 +39,17 @@ func (e *UnsupportedTypeError) Error() string {
|
||||
return "boltease: unsupported type: " + e.Type.String()
|
||||
}
|
||||
|
||||
// An UnsupportedValueError is returned by [Marshal] when attempting
|
||||
// to encode an unsupported value.
|
||||
type UnsupportedValueError struct {
|
||||
Value reflect.Value
|
||||
Str string
|
||||
}
|
||||
|
||||
func (e *UnsupportedValueError) Error() string {
|
||||
return "boltease: unsupported value: " + e.Str
|
||||
}
|
||||
|
||||
// A MarshalError represents an error from calling a
|
||||
// [Marshaler.MarshelBoltease] or [encoding.TextMarshaler.MarshalText] method.
|
||||
type MarshalerError struct {
|
||||
@ -66,8 +78,8 @@ type writerState struct {
|
||||
ptrSeen map[any]struct{}
|
||||
}
|
||||
|
||||
func (es *writerState) WriteString(val string) error {
|
||||
return es.Write(key, []byte(val))
|
||||
func (es *writerState) WriteString(key, val string) error {
|
||||
return es.Write([]byte(key), []byte(val))
|
||||
}
|
||||
|
||||
func (es *writerState) Write(key []byte, val []byte) error {
|
||||
@ -82,7 +94,7 @@ func newWriterState(db *DB, path []string) *writerState {
|
||||
if v := writerStatePool.Get(); v != nil {
|
||||
e := v.(*writerState)
|
||||
if len(e.ptrSeen) > 0 {
|
||||
panic("ptrEncoder.encode should have emptied ptrSeen via defers")
|
||||
panic("ptrWriter.write should have emptied ptrSeen via defers")
|
||||
}
|
||||
e.ptrLevel = 0
|
||||
return e
|
||||
@ -99,7 +111,7 @@ func newWriterState(db *DB, path []string) *writerState {
|
||||
// can distinguish intentional panics from this package.
|
||||
type bolteaseError struct{ error }
|
||||
|
||||
func (e *writerState) marshal(db *DB, path []string, v any, opts writerOpts) (err error) {
|
||||
func (e *writerState) marshal(db *DB, path []string, k string, v any) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if be, ok := r.(bolteaseError); ok {
|
||||
@ -109,7 +121,7 @@ func (e *writerState) marshal(db *DB, path []string, v any, opts writerOpts) (er
|
||||
}
|
||||
}
|
||||
}()
|
||||
e.reflectValue(reflect.ValueOf(v), opts)
|
||||
e.reflectValue(k, reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -132,16 +144,11 @@ func isEmptyValue(v reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *writerState) reflectValue(v reflect.Value, opts writerOpts) {
|
||||
valueWriter(v)(e, v, opts)
|
||||
func (e *writerState) reflectValue(k string, v reflect.Value) {
|
||||
valueWriter(v)(e, k, v)
|
||||
}
|
||||
|
||||
type writerOpts struct {
|
||||
// quoted causes primitive fields to be encoded inside Boltease strings.
|
||||
quoted bool
|
||||
}
|
||||
|
||||
type writerFunc func(e *writerState, v reflect.Value, opts writerOpts)
|
||||
type writerFunc func(e *writerState, k string, v reflect.Value)
|
||||
|
||||
var writerCache sync.Map // map[reflect.Type]writerFunc
|
||||
|
||||
@ -166,15 +173,15 @@ func typeWriter(t reflect.Type) writerFunc {
|
||||
f writerFunc
|
||||
)
|
||||
wg.Add(1)
|
||||
fi, loaded := writerCache.LoadOrStore(t, writerFunc(func(e *writerState, v reflect.Value, opts writerOpts) {
|
||||
fi, loaded := writerCache.LoadOrStore(t, writerFunc(func(e *writerState, k string, v reflect.Value) {
|
||||
wg.Wait()
|
||||
f(e, v, opts)
|
||||
f(e, k, v)
|
||||
}))
|
||||
if loaded {
|
||||
return fi.(writerFunc)
|
||||
}
|
||||
|
||||
// Compute the real encoder and replace the indirect func with it.
|
||||
// Compute the real writer and replace the indirect func with it.
|
||||
f = newTypeWriter(t, true)
|
||||
wg.Done()
|
||||
writerCache.Store(t, f)
|
||||
@ -186,133 +193,135 @@ var (
|
||||
textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]()
|
||||
)
|
||||
|
||||
// newTypeEncoder constructs an encoderFunc for a type.
|
||||
// The returned encoder only checks CanAddr when allowAddr is true.
|
||||
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
|
||||
// newTypeWriter constructs an writerFunc for a type.
|
||||
// The returned writer only checks CanAddr when allowAddr is true.
|
||||
func newTypeWriter(t reflect.Type, allowAddr bool) writerFunc {
|
||||
// if we have a non-pointer value whose type implements
|
||||
// Marshaler with a value receiver, then we're better off taking
|
||||
// the address of the value - otherwise we end up with an
|
||||
// allocation as we cast the value to an interface.
|
||||
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(marshalerType) {
|
||||
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
|
||||
return newCondAddrWriter(addrMarshalerWriter, newTypeWriter(t, false))
|
||||
}
|
||||
if t.Implements(marshalerType) {
|
||||
return marshalerEncoder
|
||||
return marshalerWriter
|
||||
}
|
||||
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(textMarshalerType) {
|
||||
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
|
||||
return newCondAddrWriter(addrTextMarshalerWriter, newTypeWriter(t, false))
|
||||
}
|
||||
if t.Implements(textMarshalerType) {
|
||||
return textMarshalerEncoder
|
||||
return textMarshalerWriter
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolEncoder
|
||||
return boolWriter
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intEncoder
|
||||
return intWriter
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintEncoder
|
||||
return uintWriter
|
||||
case reflect.Float32:
|
||||
return float32Encoder
|
||||
return float32Writer
|
||||
case reflect.Float64:
|
||||
return float64Encoder
|
||||
return float64Writer
|
||||
case reflect.String:
|
||||
return stringEncoder
|
||||
return stringWriter
|
||||
case reflect.Interface:
|
||||
return interfaceEncoder
|
||||
return interfaceWriter
|
||||
case reflect.Struct:
|
||||
return newStructEncoder(t)
|
||||
return newStructWriter(t)
|
||||
case reflect.Map:
|
||||
return newMapEncoder(t)
|
||||
return newMapWriter(t)
|
||||
case reflect.Slice:
|
||||
return newSliceEncoder(t)
|
||||
return newSliceWriter(t)
|
||||
case reflect.Array:
|
||||
return newArrayEncoder(t)
|
||||
return newArrayWriter(t)
|
||||
case reflect.Pointer:
|
||||
return newPtrEncoder(t)
|
||||
return newPtrWriter(t)
|
||||
default:
|
||||
return unsupportedTypeEncoder
|
||||
return unsupportedTypeWriter
|
||||
}
|
||||
}
|
||||
|
||||
func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
|
||||
e.WriteString("null")
|
||||
func invalidValueWriter(e *writerState, k string, v reflect.Value) {
|
||||
e.WriteString(k, "null")
|
||||
}
|
||||
|
||||
func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
func marshalerWriter(e *writerState, k string, v reflect.Value) {
|
||||
if v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
m, ok := v.Interface().(Marshaler)
|
||||
if !ok {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
err := m.MarshalBoltease(e.db, e.path)
|
||||
err := m.MarshalBoltease(e.db, e.path, k)
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalBoltease"})
|
||||
}
|
||||
}
|
||||
|
||||
func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
func addrMarshalerWriter(e *writerState, k string, v reflect.Value) {
|
||||
va := v.Addr()
|
||||
if va.IsNil() {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
m := va.Interface().(Marshaler)
|
||||
err := m.MarshalBoltease()
|
||||
err := m.MarshalBoltease(e.db, e.path, k)
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalBoltease"})
|
||||
}
|
||||
}
|
||||
|
||||
func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
func textMarshalerWriter(e *writerState, k string, v reflect.Value) {
|
||||
if v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
m, ok := v.Interface().(encoding.TextMarshaler)
|
||||
if !ok {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
||||
}
|
||||
e.Write(b)
|
||||
e.Write([]byte(k), b)
|
||||
}
|
||||
|
||||
func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := []byte{}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendBool(b, v.Bool())
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
func addrTextMarshalerWriter(e *writerState, k string, v reflect.Value) {
|
||||
va := v.Addr()
|
||||
if va.IsNil() {
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
m := va.Interface().(encoding.TextMarshaler)
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
||||
}
|
||||
e.Write([]byte(k), b)
|
||||
}
|
||||
|
||||
func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := []byte{}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendInt(b, v.Int(), 10)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
func boolWriter(e *writerState, k string, v reflect.Value) {
|
||||
e.WriteString(k, strconv.FormatBool(v.Bool()))
|
||||
}
|
||||
|
||||
func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := []byte{}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendUint(b, v.Uint(), 10)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
func intWriter(e *writerState, k string, v reflect.Value) {
|
||||
e.WriteString(k, strconv.FormatInt(v.Int(), 10))
|
||||
}
|
||||
|
||||
type floatEncoder int // number of bits
|
||||
func uintWriter(e *writerState, k string, v reflect.Value) {
|
||||
e.WriteString(k, strconv.FormatUint(v.Uint(), 10))
|
||||
}
|
||||
|
||||
func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
type floatWriter int // number of bits
|
||||
|
||||
func (bits floatWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
f := v.Float()
|
||||
if math.IsInf(f, 0) || math.IsNaN(f) {
|
||||
e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
|
||||
@ -324,7 +333,6 @@ func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
// Like fmt %g, but the exponent cutoffs are different
|
||||
// and exponents themselves are not padded to two digits.
|
||||
b := []byte{}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
abs := math.Abs(f)
|
||||
fmt := byte('f')
|
||||
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
@ -342,39 +350,31 @@ func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b = b[:n-1]
|
||||
}
|
||||
}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
e.Write([]byte(k), b)
|
||||
}
|
||||
|
||||
var (
|
||||
float32Encoder = (floatEncoder(32)).encode
|
||||
float64Encoder = (floatEncoder(64)).encode
|
||||
float32Writer = (floatWriter(32)).write
|
||||
float64Writer = (floatWriter(64)).write
|
||||
)
|
||||
|
||||
func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
func stringWriter(e *writerState, k string, v reflect.Value) {
|
||||
if v.Type() == numberType {
|
||||
numStr := v.String()
|
||||
// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
|
||||
// In Go1.5 the empty string writes to "0", while this is not a valid number literal
|
||||
// we keep compatibility so check validity after this.
|
||||
if numStr == "" {
|
||||
numStr = "0" // Number's zero-val
|
||||
}
|
||||
if !isValidNumber(numStr) {
|
||||
e.error(fmt.Errorf("json: invalid number literal %q", numStr))
|
||||
e.error(fmt.Errorf("boltease: invalid number literal %q", numStr))
|
||||
}
|
||||
b := []byte{}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = append(b, numStr...)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
e.Write([]byte(k), b)
|
||||
return
|
||||
}
|
||||
if opts.quoted {
|
||||
b := appendString(nil, v.String(), opts.escapeHTML)
|
||||
e.Write(b) // no need to escape again since it is already escaped
|
||||
} else {
|
||||
e.Write([]byte(v.String()))
|
||||
}
|
||||
e.Write([]byte(k), []byte(v.String()))
|
||||
}
|
||||
|
||||
func isValidNumber(s string) bool {
|
||||
@ -436,19 +436,19 @@ func isValidNumber(s string) bool {
|
||||
return s == ""
|
||||
}
|
||||
|
||||
func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
func interfaceWriter(e *writerState, k string, v reflect.Value) {
|
||||
if v.IsNil() {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
e.reflectValue(v.Elem(), opts)
|
||||
e.reflectValue(k, v.Elem())
|
||||
}
|
||||
|
||||
func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
|
||||
func unsupportedTypeWriter(e *writerState, k string, v reflect.Value) {
|
||||
e.error(&UnsupportedTypeError{v.Type()})
|
||||
}
|
||||
|
||||
type structEncoder struct {
|
||||
type structWriter struct {
|
||||
fields structFields
|
||||
}
|
||||
|
||||
@ -458,7 +458,13 @@ type structFields struct {
|
||||
byFoldedName map[string]*field
|
||||
}
|
||||
|
||||
func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
// Write a struct at e.path
|
||||
func (se structWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
// Add the key for this struct to the writerState
|
||||
e.path = append(e.path, k)
|
||||
// Pop it when we're done.
|
||||
defer func() { e.path = e.path[:len(e.path)-1] }()
|
||||
|
||||
FieldLoop:
|
||||
for i := range se.fields.list {
|
||||
f := &se.fields.list[i]
|
||||
@ -473,31 +479,34 @@ FieldLoop:
|
||||
}
|
||||
fv = fv.Field(i)
|
||||
}
|
||||
|
||||
if f.omitEmpty && isEmptyValue(fv) {
|
||||
continue
|
||||
}
|
||||
opts.quoted = f.quoted
|
||||
f.encoder(e, fv, opts)
|
||||
f.writer(e, f.name, fv)
|
||||
}
|
||||
}
|
||||
|
||||
func newStrucEncoder(t reflect.Type) encoderFunc {
|
||||
se := structEncoder{fields: cachedTypeFields(t)}
|
||||
return se.encode
|
||||
func newStrucWriter(t reflect.Type) writerFunc {
|
||||
se := structWriter{fields: cachedTypeFields(t)}
|
||||
return se.write
|
||||
}
|
||||
|
||||
type mapEncoder struct {
|
||||
elemEnc encoderFunc
|
||||
func newStructWriter(t reflect.Type) writerFunc {
|
||||
se := structWriter{fields: cachedTypeFields(t)}
|
||||
return se.write
|
||||
}
|
||||
|
||||
func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
type mapWriter struct {
|
||||
elemEnc writerFunc
|
||||
}
|
||||
|
||||
func (me mapWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
if v.IsNil() {
|
||||
e.WriteString("null")
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
||||
// We're a large number of nested ptrEncoder.encode calls deep;
|
||||
// We're a large number of nested ptrWriter.write calls deep;
|
||||
// start checking if we've run into a pointer cycle.
|
||||
ptr := v.UnsafePointer()
|
||||
if _, ok := e.ptrSeen[ptr]; ok {
|
||||
@ -524,26 +533,173 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
})
|
||||
|
||||
for i, kv := range sv {
|
||||
kv.ks, me.elemEnc(e, kv.v, opts))
|
||||
me.elemEnc(e, sv[i].ks, kv.v)
|
||||
}
|
||||
e.ptrLevel--
|
||||
}
|
||||
|
||||
func newMapEncoder(t reflect.Type) encoderFunc {
|
||||
func newMapWriter(t reflect.Type) writerFunc {
|
||||
switch t.Key().Kind() {
|
||||
case reflect.String,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
default:
|
||||
if !t.Key().Implements(textMarshalerType) {
|
||||
return unsupportedTypeEncoder
|
||||
return unsupportedTypeWriter
|
||||
}
|
||||
}
|
||||
me := mapEncoder{typeEncoder(t.Elem())}
|
||||
return me.encode
|
||||
me := mapWriter{typeWriter(t.Elem())}
|
||||
return me.write
|
||||
}
|
||||
|
||||
// TODO: HERE: json/encode.go:793
|
||||
func writeByteSlice(e *writerState, k string, v reflect.Value) {
|
||||
if v.IsNil() {
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
e.Write([]byte(k), v.Bytes())
|
||||
}
|
||||
|
||||
// sliceWriter just wraps an arrayWriter, checking to make sure the value isn't nil.
|
||||
type sliceWriter struct {
|
||||
arrayWriter writerFunc
|
||||
}
|
||||
|
||||
func (se sliceWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
if v.IsNil() {
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
||||
// We're a large number of nested ptrWriter.write calls deep;
|
||||
// start checking if we've run into a pointer cycle.
|
||||
// Here we use a struct to memorize the pointer to the first element of the slice
|
||||
// and its length.
|
||||
ptr := struct {
|
||||
ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe
|
||||
len int
|
||||
}{v.UnsafePointer(), v.Len()}
|
||||
if _, ok := e.ptrSeen[ptr]; ok {
|
||||
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
||||
}
|
||||
e.ptrSeen[ptr] = struct{}{}
|
||||
defer delete(e.ptrSeen, ptr)
|
||||
}
|
||||
se.arrayWriter(e, k, v)
|
||||
e.ptrLevel--
|
||||
}
|
||||
|
||||
func newSliceWriter(t reflect.Type) writerFunc {
|
||||
// Byte slices get special treatment; arrays don't.
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
p := reflect.PointerTo(t.Elem())
|
||||
if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) {
|
||||
return writeByteSlice
|
||||
}
|
||||
}
|
||||
enc := sliceWriter{newArrayWriter(t)}
|
||||
return enc.write
|
||||
}
|
||||
|
||||
type arrayWriter struct {
|
||||
elemWrite writerFunc
|
||||
}
|
||||
|
||||
func (ae arrayWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
e.path = append(e.path, k)
|
||||
defer func() { e.path = e.path[:len(e.path)-1] }()
|
||||
n := v.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
ae.elemWrite(e, strconv.Itoa(i), v.Index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func newArrayWriter(t reflect.Type) writerFunc {
|
||||
w := arrayWriter{typeWriter(t.Elem())}
|
||||
return w.write
|
||||
}
|
||||
|
||||
type ptrWriter struct {
|
||||
elemWrite writerFunc
|
||||
}
|
||||
|
||||
func (pe ptrWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
if v.IsNil() {
|
||||
e.WriteString(k, "null")
|
||||
return
|
||||
}
|
||||
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
||||
// We're a large number of nested ptrWriter.write calls deep;
|
||||
// start checking if we've run into a pointer cycle.
|
||||
ptr := v.Interface()
|
||||
if _, ok := e.ptrSeen[ptr]; ok {
|
||||
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
||||
}
|
||||
e.ptrSeen[ptr] = struct{}{}
|
||||
defer delete(e.ptrSeen, ptr)
|
||||
}
|
||||
pe.elemWrite(e, k, v.Elem())
|
||||
e.ptrLevel--
|
||||
}
|
||||
|
||||
func newPtrWriter(t reflect.Type) writerFunc {
|
||||
w := ptrWriter{typeWriter(t.Elem())}
|
||||
return w.write
|
||||
}
|
||||
|
||||
type condAddrWriter struct {
|
||||
canAddrWrite, elseWrite writerFunc
|
||||
}
|
||||
|
||||
func (ce condAddrWriter) write(e *writerState, k string, v reflect.Value) {
|
||||
if v.CanAddr() {
|
||||
ce.canAddrWrite(e, k, v)
|
||||
} else {
|
||||
ce.elseWrite(e, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// newCondAddrWriter returns a writer that checks whether its value
|
||||
// CanAddr and delegates to canAddrWrite if so, else to elseWrite.
|
||||
func newCondAddrWriter(canAddrWrite, elseWrite writerFunc) writerFunc {
|
||||
w := condAddrWriter{canAddrWrite: canAddrWrite, elseWrite: elseWrite}
|
||||
return w.write
|
||||
}
|
||||
|
||||
func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
||||
for _, i := range index {
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
t = t.Field(i).Type
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
type reflectWithString struct {
|
||||
v reflect.Value
|
||||
ks string
|
||||
}
|
||||
|
||||
func resolveKeyName(k reflect.Value) (string, error) {
|
||||
if k.Kind() == reflect.String {
|
||||
return k.String(), nil
|
||||
}
|
||||
if tm, ok := k.Interface().(encoding.TextMarshaler); ok {
|
||||
if k.Kind() == reflect.Pointer && k.IsNil() {
|
||||
return "", nil
|
||||
}
|
||||
buf, err := tm.MarshalText()
|
||||
return string(buf), err
|
||||
}
|
||||
switch k.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(k.Int(), 10), nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return strconv.FormatUint(k.Uint(), 10), nil
|
||||
}
|
||||
panic("unexpected map key type")
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
@ -554,7 +710,206 @@ type field struct {
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
|
||||
encoder encoderFunc
|
||||
writer writerFunc
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that boltease should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) structFields {
|
||||
// Anonymous fields to explore at the current level and the next
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
var count, nextCount map[reflect.Type]int
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.Anonymous {
|
||||
t := sf.Type
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
if !sf.IsExported() && t.Kind() != reflect.Struct {
|
||||
// Ignore embedded fields of unexported non-struct types.
|
||||
continue
|
||||
}
|
||||
// Do not ignore embedded fields of unexported struct types
|
||||
// /since they may have exported fields.
|
||||
} else if !sf.IsExported() {
|
||||
// Ignore unexported non-embedded fields.
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("boltease")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Pointer {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
field := field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
}
|
||||
field.nameBytes = []byte(field.name)
|
||||
fields = append(fields, field)
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so thta the annihilation code will see a duplicate.
|
||||
// it only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(fields, func(i, j int) bool {
|
||||
x := fields
|
||||
// sort field by name, breaking ties with depth, then
|
||||
// breaking ties with "name came from boltease tag", then
|
||||
// breaking ties with index sequence
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
})
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with boltease tags are promoted.
|
||||
//
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with th ename of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
exactNameIndex := make(map[string]*field, len(fields))
|
||||
foldedNameIndex := make(map[string]*field, len(fields))
|
||||
for i, field := range fields {
|
||||
exactNameIndex[field.name] = &fields[i]
|
||||
// For historical reasons, first folded match takes precedence.
|
||||
if _, ok := foldedNameIndex[string(foldName(field.nameBytes))]; !ok {
|
||||
foldedNameIndex[string(foldName(field.nameBytes))] = &fields[i]
|
||||
}
|
||||
}
|
||||
return structFields{fields, exactNameIndex, foldedNameIndex}
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to have the
|
||||
// same name, to find the single field that dominates the others using Go's
|
||||
// embedding rules, modified by the presence of boltease tags. if there are
|
||||
// multiple top-level fields, the boolean will be false: This condition is an
|
||||
// error in Go and we skip all fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order, then by presence of tag.
|
||||
// That means that the first field is the dominant one. We need only check
|
||||
// for error cases: two fields at top level, either both tagged or neither tagged.
|
||||
if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache sync.Map // map[reflect.Type]structFields
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) structFields {
|
||||
if f, ok := fieldCache.Load(t); ok {
|
||||
return f.(structFields)
|
||||
}
|
||||
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
|
||||
return f.(structFields)
|
||||
}
|
||||
|
222
bolteasable.go
222
bolteasable.go
@ -4,20 +4,19 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type any = interface{}
|
||||
|
||||
func (b *DB) SaveOld(path []string, src any) error {
|
||||
/*
|
||||
func (b *DB) Save(path []string, key string, src any) error {
|
||||
t := reflect.TypeOf(src)
|
||||
if t.Kind() == reflect.Pointer {
|
||||
// Save the actual struct
|
||||
elem := reflect.ValueOf(src).Elem()
|
||||
return b.Save(path, elem.Interface())
|
||||
return b.Save(path, key, elem.Interface())
|
||||
}
|
||||
|
||||
if t.Kind() == reflect.Struct {
|
||||
@ -48,6 +47,7 @@ func (b *DB) SaveOld(path []string, src any) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
func (b *DB) LoadOld(path []string, dest any) error {
|
||||
destValue := reflect.ValueOf(dest)
|
||||
@ -212,220 +212,6 @@ func ReflectValueToInterface(val reflect.Value) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
|
||||
tag bool
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
}
|
||||
|
||||
// byIndex sorts fields by index sequence
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
type structFields struct {
|
||||
list []field
|
||||
byExactName map[string]*field
|
||||
byFoldedName map[string]*field
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) structFields {
|
||||
// Anonymous fields to explore at the current level and the next
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
var count, nextCount map[reflect.Type]int
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.Anonymous {
|
||||
t := sf.Type
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
if !sf.IsExported() && t.Kind() != reflect.Struct {
|
||||
// Ignore embedded fields of unexported non-struct types.
|
||||
continue
|
||||
}
|
||||
// Do not ignore embedded fields of unexported struct types
|
||||
// /since they may have exported fields.
|
||||
} else if !sf.IsExported() {
|
||||
// Ignore unexported non-embedded fields.
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("boltease")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Pointer {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
field := field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
}
|
||||
field.nameBytes = []byte(field.name)
|
||||
fields = append(fields, field)
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so thta the annihilation code will see a duplicate.
|
||||
// it only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, field{name: ft.Name(), index: index, typ: ft})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(fields, func(i, j int) bool {
|
||||
x := fields
|
||||
// sort field by name, breaking ties with depth, then
|
||||
// breaking ties with "name came from boltease tag", then
|
||||
// breaking ties with index sequence
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
})
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with boltease tags are promoted.
|
||||
//
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with th ename of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
exactNameIndex := make(map[string]*field, len(fields))
|
||||
foldedNameIndex := make(map[string]*field, len(fields))
|
||||
for i, field := range fields {
|
||||
exactNameIndex[field.name] = &fields[i]
|
||||
// For historical reasons, first folded match takes precedence.
|
||||
if _, ok := foldedNameIndex[string(foldName(field.nameBytes))]; !ok {
|
||||
foldedNameIndex[string(foldName(field.nameBytes))] = &fields[i]
|
||||
}
|
||||
}
|
||||
return structFields{fields, exactNameIndex, foldedNameIndex}
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to have the
|
||||
// same name, to find the single field that dominates the others using Go's
|
||||
// embedding rules, modified by the presence of boltease tags. if there are
|
||||
// multiple top-level fields, the boolean will be false: This condition is an
|
||||
// error in Go and we skip all fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order, then by presence of tag.
|
||||
// That means that the first field is the dominant one. We need only check
|
||||
// for error cases: two fields at top level, either both tagged or neither tagged.
|
||||
if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache sync.Map // map[reflect.Type]structFields
|
||||
|
||||
func cachedTypeFields(t reflect.Type) structFields {
|
||||
if f, ok := fieldCache.Load(t); ok {
|
||||
return f.(structFields)
|
||||
}
|
||||
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
|
||||
return f.(structFields)
|
||||
}
|
||||
|
||||
// foldName returns a folded string such that foldName(x) == foldName(y)
|
||||
// is identical to bytes.EqualFold(x, y).
|
||||
func foldName(in []byte) []byte {
|
||||
|
@ -9,14 +9,14 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
//example1()
|
||||
//fmt.Println()
|
||||
example2()
|
||||
example1()
|
||||
// fmt.Println()
|
||||
// example2()
|
||||
}
|
||||
|
||||
func example1() {
|
||||
fmt.Println("# Example 1")
|
||||
db, err := boltease.Create("example.db", 0600, nil)
|
||||
db, err := boltease.Create("example.db", 0o600, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("Error Opening File: %s\n", err.Error())
|
||||
os.Exit(1)
|
||||
@ -24,7 +24,8 @@ func example1() {
|
||||
|
||||
fmt.Println("## Saving Struct")
|
||||
err = db.Save(
|
||||
[]string{"examples", "example1"},
|
||||
[]string{"examples"},
|
||||
"example1",
|
||||
ExampleType{
|
||||
Name: "Example 1",
|
||||
Age: 5,
|
||||
@ -34,46 +35,48 @@ func example1() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("## Example 1-1: Simple")
|
||||
var v string
|
||||
err = db.GetForInterface([]string{"examples", "example1"}, "name", &v)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err.Error())
|
||||
}
|
||||
fmt.Println("Name:", v)
|
||||
var age int
|
||||
err = db.GetForInterface([]string{"examples", "example1"}, "age", &age)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err.Error())
|
||||
}
|
||||
fmt.Println("Age:", age)
|
||||
fmt.Println("")
|
||||
/*
|
||||
fmt.Println("## Example 1-1: Simple")
|
||||
var v string
|
||||
err = db.GetForInterface([]string{"examples", "example1"}, "name", &v)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err.Error())
|
||||
}
|
||||
fmt.Println("Name:", v)
|
||||
var age int
|
||||
err = db.GetForInterface([]string{"examples", "example1"}, "age", &age)
|
||||
if err != nil {
|
||||
fmt.Println("Error:", err.Error())
|
||||
}
|
||||
fmt.Println("Age:", age)
|
||||
fmt.Println("")
|
||||
|
||||
fmt.Println("## Example 1-2: LoadStruct, simple")
|
||||
var name string
|
||||
err = db.Load(
|
||||
[]string{"examples", "example1", "name"},
|
||||
&name,
|
||||
)
|
||||
fmt.Println("Name:", name)
|
||||
if err != nil {
|
||||
fmt.Println("Err:", err)
|
||||
}
|
||||
fmt.Println("")
|
||||
fmt.Println("## Example 1-2: LoadStruct, simple")
|
||||
var name string
|
||||
err = db.Load(
|
||||
[]string{"examples", "example1", "name"},
|
||||
&name,
|
||||
)
|
||||
fmt.Println("Name:", name)
|
||||
if err != nil {
|
||||
fmt.Println("Err:", err)
|
||||
}
|
||||
fmt.Println("")
|
||||
|
||||
fmt.Println("## Example 1-3: Struct")
|
||||
fmt.Println("Loading into Struct")
|
||||
newStruct := ExampleType{}
|
||||
err = db.Load(
|
||||
[]string{"examples", "example1"},
|
||||
&newStruct,
|
||||
)
|
||||
fmt.Println(newStruct)
|
||||
fmt.Println("## Example 1-3: Struct")
|
||||
fmt.Println("Loading into Struct")
|
||||
newStruct := ExampleType{}
|
||||
err = db.Load(
|
||||
[]string{"examples", "example1"},
|
||||
&newStruct,
|
||||
)
|
||||
fmt.Println(newStruct)
|
||||
*/
|
||||
}
|
||||
|
||||
func example2() {
|
||||
fmt.Println("# Example 2")
|
||||
db, err := boltease.Create("example.db", 0600, nil)
|
||||
db, err := boltease.Create("example.db", 0o600, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("Error Opening File: %s\n", err.Error())
|
||||
os.Exit(1)
|
||||
@ -81,7 +84,8 @@ func example2() {
|
||||
fmt.Println("## Saving Struct")
|
||||
num := 12345
|
||||
err = db.Save(
|
||||
[]string{"examples", "example2"},
|
||||
[]string{"examples"},
|
||||
"example2",
|
||||
&ExampleType2{
|
||||
Name: "Example 2",
|
||||
Age: 20,
|
||||
|
2
go.mod
2
go.mod
@ -1,6 +1,6 @@
|
||||
module git.bullercodeworks.com/brian/boltease
|
||||
|
||||
go 1.16
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
|
2
tags.go
2
tags.go
@ -26,7 +26,7 @@ func isValidTag(s string) bool {
|
||||
// tag, or the empty string. It does not include the leading comma
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's boltease tag into it sname and
|
||||
// parseTag splits a struct field's boltease tag into it's name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
tag, opt, _ := strings.Cut(tag, ",")
|
||||
|
Loading…
Reference in New Issue
Block a user