Apply goext specific patches to gojson
Some checks failed
Build Docker and Deploy / Run goext test-suite (push) Has been cancelled

This commit is contained in:
Mike Schwörer 2025-01-10 14:01:13 +01:00
parent c8e9c34706
commit ff821390f7
Signed by: Mikescher
GPG Key ID: D3C7172E0A70F8CF
10 changed files with 269 additions and 638 deletions

2
go.mod
View File

@ -38,7 +38,7 @@ require (
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect

2
go.sum
View File

@ -95,6 +95,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=

View File

@ -1,5 +1,5 @@
package goext
const GoextVersion = "0.0.556"
const GoextVersion = "0.0.557"
const GoextVersionTimestamp = "2025-01-09T10:41:00+0100"
const GoextVersionTimestamp = "2025-01-10T14:06:27+0100"

View File

@ -4,9 +4,12 @@ JSON serializer which serializes nil-Arrays as `[]` and nil-maps als `{}`.
Idea from: https://github.com/homelight/json
Forked from https://github.com/golang/go/tree/547e8e22fe565d65d1fd4d6e71436a5a855447b0/src/encoding/json ( tag go1.20.2 )
Forked from https://github.com/golang/go/tree/194de8fbfaf4c3ed54e1a3c1b14fc67a830b8d95/src/encoding/json ( tag go1.23.4 )
-> https://github.com/golang/go/tree/go1.23.4/src/encoding/json
Added:
- `MarshalSafeCollections()` method
- `Encoder.nilSafeSlices` and `Encoder.nilSafeMaps` fields
- `Add 'tagkey' to use different key than json (set on Decoder struct)`
- `Add 'jsonfilter' to filter printed fields (set via MarshalSafeCollections)`

View File

@ -1,584 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Large data benchmark.
// The JSON data is a summary of agl's changes in the
// go, webkit, and chromium open source projects.
// We benchmark converting between the JSON form
// and in-memory data structures.
package json
import (
"bytes"
"compress/gzip"
"fmt"
"internal/testenv"
"io"
"os"
"reflect"
"regexp"
"runtime"
"strings"
"sync"
"testing"
)
type codeResponse struct {
Tree *codeNode `json:"tree"`
Username string `json:"username"`
}
type codeNode struct {
Name string `json:"name"`
Kids []*codeNode `json:"kids"`
CLWeight float64 `json:"cl_weight"`
Touches int `json:"touches"`
MinT int64 `json:"min_t"`
MaxT int64 `json:"max_t"`
MeanT int64 `json:"mean_t"`
}
var codeJSON []byte
var codeStruct codeResponse
func codeInit() {
f, err := os.Open("testdata/code.json.gz")
if err != nil {
panic(err)
}
defer f.Close()
gz, err := gzip.NewReader(f)
if err != nil {
panic(err)
}
data, err := io.ReadAll(gz)
if err != nil {
panic(err)
}
codeJSON = data
if err := Unmarshal(codeJSON, &codeStruct); err != nil {
panic("unmarshal code.json: " + err.Error())
}
if data, err = Marshal(&codeStruct); err != nil {
panic("marshal code.json: " + err.Error())
}
if !bytes.Equal(data, codeJSON) {
println("different lengths", len(data), len(codeJSON))
for i := 0; i < len(data) && i < len(codeJSON); i++ {
if data[i] != codeJSON[i] {
println("re-marshal: changed at byte", i)
println("orig: ", string(codeJSON[i-10:i+10]))
println("new: ", string(data[i-10:i+10]))
break
}
}
panic("re-marshal code.json: different result")
}
}
func BenchmarkCodeEncoder(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
b.RunParallel(func(pb *testing.PB) {
enc := NewEncoder(io.Discard)
for pb.Next() {
if err := enc.Encode(&codeStruct); err != nil {
b.Fatalf("Encode error: %v", err)
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeEncoderError(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
// Trigger an error in Marshal with cyclic data.
type Dummy struct {
Name string
Next *Dummy
}
dummy := Dummy{Name: "Dummy"}
dummy.Next = &dummy
b.RunParallel(func(pb *testing.PB) {
enc := NewEncoder(io.Discard)
for pb.Next() {
if err := enc.Encode(&codeStruct); err != nil {
b.Fatalf("Encode error: %v", err)
}
if _, err := Marshal(dummy); err == nil {
b.Fatal("Marshal error: got nil, want non-nil")
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeMarshal(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := Marshal(&codeStruct); err != nil {
b.Fatalf("Marshal error: %v", err)
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeMarshalError(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
// Trigger an error in Marshal with cyclic data.
type Dummy struct {
Name string
Next *Dummy
}
dummy := Dummy{Name: "Dummy"}
dummy.Next = &dummy
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := Marshal(&codeStruct); err != nil {
b.Fatalf("Marshal error: %v", err)
}
if _, err := Marshal(dummy); err == nil {
b.Fatal("Marshal error: got nil, want non-nil")
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func benchMarshalBytes(n int) func(*testing.B) {
sample := []byte("hello world")
// Use a struct pointer, to avoid an allocation when passing it as an
// interface parameter to Marshal.
v := &struct {
Bytes []byte
}{
bytes.Repeat(sample, (n/len(sample))+1)[:n],
}
return func(b *testing.B) {
for i := 0; i < b.N; i++ {
if _, err := Marshal(v); err != nil {
b.Fatalf("Marshal error: %v", err)
}
}
}
}
func benchMarshalBytesError(n int) func(*testing.B) {
sample := []byte("hello world")
// Use a struct pointer, to avoid an allocation when passing it as an
// interface parameter to Marshal.
v := &struct {
Bytes []byte
}{
bytes.Repeat(sample, (n/len(sample))+1)[:n],
}
// Trigger an error in Marshal with cyclic data.
type Dummy struct {
Name string
Next *Dummy
}
dummy := Dummy{Name: "Dummy"}
dummy.Next = &dummy
return func(b *testing.B) {
for i := 0; i < b.N; i++ {
if _, err := Marshal(v); err != nil {
b.Fatalf("Marshal error: %v", err)
}
if _, err := Marshal(dummy); err == nil {
b.Fatal("Marshal error: got nil, want non-nil")
}
}
}
}
func BenchmarkMarshalBytes(b *testing.B) {
b.ReportAllocs()
// 32 fits within encodeState.scratch.
b.Run("32", benchMarshalBytes(32))
// 256 doesn't fit in encodeState.scratch, but is small enough to
// allocate and avoid the slower base64.NewEncoder.
b.Run("256", benchMarshalBytes(256))
// 4096 is large enough that we want to avoid allocating for it.
b.Run("4096", benchMarshalBytes(4096))
}
func BenchmarkMarshalBytesError(b *testing.B) {
b.ReportAllocs()
// 32 fits within encodeState.scratch.
b.Run("32", benchMarshalBytesError(32))
// 256 doesn't fit in encodeState.scratch, but is small enough to
// allocate and avoid the slower base64.NewEncoder.
b.Run("256", benchMarshalBytesError(256))
// 4096 is large enough that we want to avoid allocating for it.
b.Run("4096", benchMarshalBytesError(4096))
}
func BenchmarkMarshalMap(b *testing.B) {
b.ReportAllocs()
m := map[string]int{
"key3": 3,
"key2": 2,
"key1": 1,
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := Marshal(m); err != nil {
b.Fatal("Marshal:", err)
}
}
})
}
func BenchmarkCodeDecoder(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
b.RunParallel(func(pb *testing.PB) {
var buf bytes.Buffer
dec := NewDecoder(&buf)
var r codeResponse
for pb.Next() {
buf.Write(codeJSON)
// hide EOF
buf.WriteByte('\n')
buf.WriteByte('\n')
buf.WriteByte('\n')
if err := dec.Decode(&r); err != nil {
b.Fatalf("Decode error: %v", err)
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkUnicodeDecoder(b *testing.B) {
b.ReportAllocs()
j := []byte(`"\uD83D\uDE01"`)
b.SetBytes(int64(len(j)))
r := bytes.NewReader(j)
dec := NewDecoder(r)
var out string
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := dec.Decode(&out); err != nil {
b.Fatalf("Decode error: %v", err)
}
r.Seek(0, 0)
}
}
func BenchmarkDecoderStream(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
var buf bytes.Buffer
dec := NewDecoder(&buf)
buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
var x any
if err := dec.Decode(&x); err != nil {
b.Fatalf("Decode error: %v", err)
}
ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
b.StartTimer()
for i := 0; i < b.N; i++ {
if i%300000 == 0 {
buf.WriteString(ones)
}
x = nil
switch err := dec.Decode(&x); {
case err != nil:
b.Fatalf("Decode error: %v", err)
case x != 1.0:
b.Fatalf("Decode: got %v want 1.0", i)
}
}
}
func BenchmarkCodeUnmarshal(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var r codeResponse
if err := Unmarshal(codeJSON, &r); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkCodeUnmarshalReuse(b *testing.B) {
b.ReportAllocs()
if codeJSON == nil {
b.StopTimer()
codeInit()
b.StartTimer()
}
b.RunParallel(func(pb *testing.PB) {
var r codeResponse
for pb.Next() {
if err := Unmarshal(codeJSON, &r); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
b.SetBytes(int64(len(codeJSON)))
}
func BenchmarkUnmarshalString(b *testing.B) {
b.ReportAllocs()
data := []byte(`"hello, world"`)
b.RunParallel(func(pb *testing.PB) {
var s string
for pb.Next() {
if err := Unmarshal(data, &s); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
}
func BenchmarkUnmarshalFloat64(b *testing.B) {
b.ReportAllocs()
data := []byte(`3.14`)
b.RunParallel(func(pb *testing.PB) {
var f float64
for pb.Next() {
if err := Unmarshal(data, &f); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
}
func BenchmarkUnmarshalInt64(b *testing.B) {
b.ReportAllocs()
data := []byte(`3`)
b.RunParallel(func(pb *testing.PB) {
var x int64
for pb.Next() {
if err := Unmarshal(data, &x); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
}
func BenchmarkUnmarshalMap(b *testing.B) {
b.ReportAllocs()
data := []byte(`{"key1":"value1","key2":"value2","key3":"value3"}`)
b.RunParallel(func(pb *testing.PB) {
x := make(map[string]string, 3)
for pb.Next() {
if err := Unmarshal(data, &x); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
}
func BenchmarkIssue10335(b *testing.B) {
b.ReportAllocs()
j := []byte(`{"a":{ }}`)
b.RunParallel(func(pb *testing.PB) {
var s struct{}
for pb.Next() {
if err := Unmarshal(j, &s); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
}
func BenchmarkIssue34127(b *testing.B) {
b.ReportAllocs()
j := struct {
Bar string `json:"bar,string"`
}{
Bar: `foobar`,
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if _, err := Marshal(&j); err != nil {
b.Fatalf("Marshal error: %v", err)
}
}
})
}
func BenchmarkUnmapped(b *testing.B) {
b.ReportAllocs()
j := []byte(`{"s": "hello", "y": 2, "o": {"x": 0}, "a": [1, 99, {"x": 1}]}`)
b.RunParallel(func(pb *testing.PB) {
var s struct{}
for pb.Next() {
if err := Unmarshal(j, &s); err != nil {
b.Fatalf("Unmarshal error: %v", err)
}
}
})
}
func BenchmarkTypeFieldsCache(b *testing.B) {
b.ReportAllocs()
var maxTypes int = 1e6
if testenv.Builder() != "" {
maxTypes = 1e3 // restrict cache sizes on builders
}
// Dynamically generate many new types.
types := make([]reflect.Type, maxTypes)
fs := []reflect.StructField{{
Type: reflect.TypeFor[string](),
Index: []int{0},
}}
for i := range types {
fs[0].Name = fmt.Sprintf("TypeFieldsCache%d", i)
types[i] = reflect.StructOf(fs)
}
// clearClear clears the cache. Other JSON operations, must not be running.
clearCache := func() {
fieldCache = sync.Map{}
}
// MissTypes tests the performance of repeated cache misses.
// This measures the time to rebuild a cache of size nt.
for nt := 1; nt <= maxTypes; nt *= 10 {
ts := types[:nt]
b.Run(fmt.Sprintf("MissTypes%d", nt), func(b *testing.B) {
nc := runtime.GOMAXPROCS(0)
for i := 0; i < b.N; i++ {
clearCache()
var wg sync.WaitGroup
for j := 0; j < nc; j++ {
wg.Add(1)
go func(j int) {
for _, t := range ts[(j*len(ts))/nc : ((j+1)*len(ts))/nc] {
cachedTypeFields(t)
}
wg.Done()
}(j)
}
wg.Wait()
}
})
}
// HitTypes tests the performance of repeated cache hits.
// This measures the average time of each cache lookup.
for nt := 1; nt <= maxTypes; nt *= 10 {
// Pre-warm a cache of size nt.
clearCache()
for _, t := range types[:nt] {
cachedTypeFields(t)
}
b.Run(fmt.Sprintf("HitTypes%d", nt), func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cachedTypeFields(types[0])
}
})
})
}
}
func BenchmarkEncodeMarshaler(b *testing.B) {
b.ReportAllocs()
m := struct {
A int
B RawMessage
}{}
b.RunParallel(func(pb *testing.PB) {
enc := NewEncoder(io.Discard)
for pb.Next() {
if err := enc.Encode(&m); err != nil {
b.Fatalf("Encode error: %v", err)
}
}
})
}
func BenchmarkEncoderEncode(b *testing.B) {
b.ReportAllocs()
type T struct {
X, Y string
}
v := &T{"foo", "bar"}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if err := NewEncoder(io.Discard).Encode(v); err != nil {
b.Fatalf("Encode error: %v", err)
}
}
})
}
func BenchmarkNumberIsValid(b *testing.B) {
s := "-61657.61667E+61673"
for i := 0; i < b.N; i++ {
isValidNumber(s)
}
}
func BenchmarkNumberIsValidRegexp(b *testing.B) {
var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
s := "-61657.61667E+61673"
for i := 0; i < b.N; i++ {
jsonNumberRegexp.MatchString(s)
}
}
func BenchmarkUnmarshalNumber(b *testing.B) {
b.ReportAllocs()
data := []byte(`"-61657.61667E+61673"`)
var number Number
for i := 0; i < b.N; i++ {
if err := Unmarshal(data, &number); err != nil {
b.Fatal("Unmarshal:", err)
}
}
}

View File

@ -217,6 +217,7 @@ type decodeState struct {
savedError error
useNumber bool
disallowUnknownFields bool
tagkey *string
}
// readIndex returns the position of the last byte read.
@ -643,7 +644,11 @@ func (d *decodeState) object(v reflect.Value) error {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
fields = cachedTypeFields(t)
tagkey := "json"
if d.tagkey != nil {
tagkey = *d.tagkey
}
fields = cachedTypeFields(t, tagkey)
// ok
default:
d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})

View File

@ -170,6 +170,32 @@ func Marshal(v any) ([]byte, error) {
return buf, nil
}
type IndentOpt struct {
Prefix string
Indent string
}
// MarshalSafeCollections is like Marshal except it will marshal nil maps and
// slices as '{}' and '[]' respectfully instead of 'null'
func MarshalSafeCollections(v interface{}, nilSafeSlices bool, nilSafeMaps bool, indent *IndentOpt, filter *string) ([]byte, error) {
e := &encodeState{}
err := e.marshal(v, encOpts{escapeHTML: true, nilSafeSlices: nilSafeSlices, nilSafeMaps: nilSafeMaps, filter: filter})
if err != nil {
return nil, err
}
b := e.Bytes()
if indent != nil {
var buf bytes.Buffer
err = Indent(&buf, b, indent.Prefix, indent.Indent)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
} else {
return e.Bytes(), nil
}
}
// MarshalIndent is like [Marshal] but applies [Indent] to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting.
@ -319,7 +345,11 @@ func isEmptyValue(v reflect.Value) bool {
}
func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
valueEncoder(v)(e, v, opts)
tagkey := "json"
if opts.tagkey != nil {
tagkey = *opts.tagkey
}
valueEncoder(v, tagkey)(e, v, opts)
}
type encOpts struct {
@ -327,21 +357,30 @@ type encOpts struct {
quoted bool
// escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
escapeHTML bool
// nilSafeSlices marshals a nil slices into '[]' instead of 'null'
nilSafeSlices bool
// nilSafeMaps marshals a nil maps '{}' instead of 'null'
nilSafeMaps bool
// filter matches jsonfilter tag of struct
// marshals if no jsonfilter is set or otherwise if jsonfilter has the filter value
filter *string
// use different tag instead of "json"
tagkey *string
}
type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
var encoderCache sync.Map // map[reflect.Type]encoderFunc
func valueEncoder(v reflect.Value) encoderFunc {
func valueEncoder(v reflect.Value, tagkey string) encoderFunc {
if !v.IsValid() {
return invalidValueEncoder
}
return typeEncoder(v.Type())
return typeEncoder(v.Type(), tagkey)
}
func typeEncoder(t reflect.Type) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
func typeEncoder(t reflect.Type, tagkey string) encoderFunc {
if fi, ok := encoderCache.Load(TagKeyTypeKey{t, tagkey}); ok {
return fi.(encoderFunc)
}
@ -354,7 +393,7 @@ func typeEncoder(t reflect.Type) encoderFunc {
f encoderFunc
)
wg.Add(1)
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value, opts encOpts) {
fi, loaded := encoderCache.LoadOrStore(TagKeyTypeKey{t, tagkey}, encoderFunc(func(e *encodeState, v reflect.Value, opts encOpts) {
wg.Wait()
f(e, v, opts)
}))
@ -363,9 +402,9 @@ func typeEncoder(t reflect.Type) encoderFunc {
}
// Compute the real encoder and replace the indirect func with it.
f = newTypeEncoder(t, true)
f = newTypeEncoder(t, true, tagkey)
wg.Done()
encoderCache.Store(t, f)
encoderCache.Store(TagKeyTypeKey{t, tagkey}, f)
return f
}
@ -376,19 +415,19 @@ var (
// newTypeEncoder constructs an encoderFunc for a type.
// The returned encoder only checks CanAddr when allowAddr is true.
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
func newTypeEncoder(t reflect.Type, allowAddr bool, tagkey string) encoderFunc {
// If we have a non-pointer value whose type implements
// Marshaler with a value receiver, then we're better off taking
// the address of the value - otherwise we end up with an
// allocation as we cast the value to an interface.
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(marshalerType) {
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false, tagkey))
}
if t.Implements(marshalerType) {
return marshalerEncoder
}
if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(textMarshalerType) {
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false, tagkey))
}
if t.Implements(textMarshalerType) {
return textMarshalerEncoder
@ -410,15 +449,15 @@ func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
case reflect.Interface:
return interfaceEncoder
case reflect.Struct:
return newStructEncoder(t)
return newStructEncoder(t, tagkey)
case reflect.Map:
return newMapEncoder(t)
return newMapEncoder(t, tagkey)
case reflect.Slice:
return newSliceEncoder(t)
return newSliceEncoder(t, tagkey)
case reflect.Array:
return newArrayEncoder(t)
return newArrayEncoder(t, tagkey)
case reflect.Pointer:
return newPtrEncoder(t)
return newPtrEncoder(t, tagkey)
default:
return unsupportedTypeEncoder
}
@ -703,6 +742,8 @@ FieldLoop:
if f.omitEmpty && isEmptyValue(fv) {
continue
} else if !matchesJSONFilter(f.jsonfilter, opts.filter) {
continue
}
e.WriteByte(next)
next = ','
@ -721,8 +762,27 @@ FieldLoop:
}
}
func newStructEncoder(t reflect.Type) encoderFunc {
se := structEncoder{fields: cachedTypeFields(t)}
func matchesJSONFilter(filter jsonfilter, value *string) bool {
if len(filter) == 0 {
return true // no filter in struct
}
if value == nil || *value == "" {
return false // no filter set, but struct has filter, return false
}
if len(filter) == 1 && filter[0] == "-" {
return false
}
if filter.Contains(*value) {
return true
}
if filter.Contains("*") {
return true
}
return false
}
func newStructEncoder(t reflect.Type, tagkey string) encoderFunc {
se := structEncoder{fields: cachedTypeFields(t, tagkey)}
return se.encode
}
@ -732,7 +792,11 @@ type mapEncoder struct {
func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
if opts.nilSafeMaps {
e.WriteString("{}")
} else {
e.WriteString("null")
}
return
}
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
@ -775,7 +839,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
e.ptrLevel--
}
func newMapEncoder(t reflect.Type) encoderFunc {
func newMapEncoder(t reflect.Type, tagkey string) encoderFunc {
switch t.Key().Kind() {
case reflect.String,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
@ -785,13 +849,17 @@ func newMapEncoder(t reflect.Type) encoderFunc {
return unsupportedTypeEncoder
}
}
me := mapEncoder{typeEncoder(t.Elem())}
me := mapEncoder{typeEncoder(t.Elem(), tagkey)}
return me.encode
}
func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
func encodeByteSlice(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
if opts.nilSafeSlices {
e.WriteString(`""`)
} else {
e.WriteString("null")
}
return
}
@ -810,7 +878,11 @@ type sliceEncoder struct {
func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
if v.IsNil() {
if opts.nilSafeSlices {
e.WriteString("[]")
} else {
e.WriteString("null")
}
return
}
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
@ -832,7 +904,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
e.ptrLevel--
}
func newSliceEncoder(t reflect.Type) encoderFunc {
func newSliceEncoder(t reflect.Type, tagkey string) encoderFunc {
// Byte slices get special treatment; arrays don't.
if t.Elem().Kind() == reflect.Uint8 {
p := reflect.PointerTo(t.Elem())
@ -840,7 +912,7 @@ func newSliceEncoder(t reflect.Type) encoderFunc {
return encodeByteSlice
}
}
enc := sliceEncoder{newArrayEncoder(t)}
enc := sliceEncoder{newArrayEncoder(t, tagkey)}
return enc.encode
}
@ -860,8 +932,8 @@ func (ae arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
e.WriteByte(']')
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := arrayEncoder{typeEncoder(t.Elem())}
func newArrayEncoder(t reflect.Type, tagkey string) encoderFunc {
enc := arrayEncoder{typeEncoder(t.Elem(), tagkey)}
return enc.encode
}
@ -888,8 +960,8 @@ func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
e.ptrLevel--
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := ptrEncoder{typeEncoder(t.Elem())}
func newPtrEncoder(t reflect.Type, tagkey string) encoderFunc {
enc := ptrEncoder{typeEncoder(t.Elem(), tagkey)}
return enc.encode
}
@ -1048,11 +1120,24 @@ type field struct {
index []int
typ reflect.Type
omitEmpty bool
jsonfilter jsonfilter
quoted bool
encoder encoderFunc
}
// jsonfilter stores the value of the jsonfilter struct tag
type jsonfilter []string
func (j jsonfilter) Contains(t string) bool {
for _, tag := range j {
if t == tag {
return true
}
}
return false
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
@ -1066,7 +1151,7 @@ type field struct {
// See go.dev/issue/67401.
//
//go:linkname typeFields
func typeFields(t reflect.Type) structFields {
func typeFields(t reflect.Type, tagkey string) structFields {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
@ -1111,7 +1196,7 @@ func typeFields(t reflect.Type) structFields {
// Ignore unexported non-embedded fields.
continue
}
tag := sf.Tag.Get("json")
tag := sf.Tag.Get(tagkey)
if tag == "-" {
continue
}
@ -1119,6 +1204,13 @@ func typeFields(t reflect.Type) structFields {
if !isValidTag(name) {
name = ""
}
var jsonfilterVal []string
jsonfilterTag := sf.Tag.Get("jsonfilter")
if jsonfilterTag != "" {
jsonfilterVal = strings.Split(jsonfilterTag, ",")
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
@ -1154,6 +1246,7 @@ func typeFields(t reflect.Type) structFields {
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
jsonfilter: jsonfilterVal,
quoted: quoted,
}
field.nameBytes = []byte(field.name)
@ -1237,7 +1330,7 @@ func typeFields(t reflect.Type) structFields {
for i := range fields {
f := &fields[i]
f.encoder = typeEncoder(typeByIndex(t, f.index))
f.encoder = typeEncoder(typeByIndex(t, f.index), tagkey)
}
exactNameIndex := make(map[string]*field, len(fields))
foldedNameIndex := make(map[string]*field, len(fields))
@ -1267,14 +1360,14 @@ func dominantField(fields []field) (field, bool) {
return fields[0], true
}
var fieldCache sync.Map // map[reflect.Type]structFields
var fieldCache sync.Map // map[reflect.Type + tagkey]structFields
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) structFields {
if f, ok := fieldCache.Load(t); ok {
func cachedTypeFields(t reflect.Type, tagkey string) structFields {
if f, ok := fieldCache.Load(TagKeyTypeKey{t, tagkey}); ok {
return f.(structFields)
}
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
f, _ := fieldCache.LoadOrStore(TagKeyTypeKey{t, tagkey}, typeFields(t, tagkey))
return f.(structFields)
}
@ -1284,3 +1377,8 @@ func mayAppendQuote(b []byte, quoted bool) []byte {
}
return b
}
type TagKeyTypeKey struct {
Type reflect.Type
TagKey string
}

View File

@ -1219,3 +1219,55 @@ func TestIssue63379(t *testing.T) {
}
}
}
func TestMarshalSafeCollections(t *testing.T) {
var (
nilSlice []interface{}
pNilSlice *[]interface{}
nilMap map[string]interface{}
pNilMap *map[string]interface{}
)
type (
nilSliceStruct struct {
NilSlice []interface{} `json:"nil_slice"`
}
nilMapStruct struct {
NilMap map[string]interface{} `json:"nil_map"`
}
testWithFilter struct {
Test1 string `json:"test1" jsonfilter:"FILTERONE"`
Test2 string `json:"test2" jsonfilter:"FILTERTWO"`
}
)
tests := []struct {
in interface{}
want string
}{
{nilSlice, "[]"},
{[]interface{}{}, "[]"},
{make([]interface{}, 0), "[]"},
{[]int{1, 2, 3}, "[1,2,3]"},
{pNilSlice, "null"},
{nilSliceStruct{}, "{\"nil_slice\":[]}"},
{nilMap, "{}"},
{map[string]interface{}{}, "{}"},
{make(map[string]interface{}, 0), "{}"},
{map[string]interface{}{"1": 1, "2": 2, "3": 3}, "{\"1\":1,\"2\":2,\"3\":3}"},
{pNilMap, "null"},
{nilMapStruct{}, "{\"nil_map\":{}}"},
{testWithFilter{}, "{\"test1\":\"\"}"},
}
filter := "FILTERONE"
for i, tt := range tests {
b, err := MarshalSafeCollections(tt.in, true, true, nil, &filter)
if err != nil {
t.Errorf("test %d, unexpected failure: %v", i, err)
}
if got := string(b); got != tt.want {
t.Errorf("test %d, Marshal(%#v) = %q, want %q", i, tt.in, got, tt.want)
}
}
}

52
gojson/gionic.go Normal file
View File

@ -0,0 +1,52 @@
package json
import (
"net/http"
)
// Render interface is copied from github.com/gin-gonic/gin@v1.8.1/render/render.go
type Render interface {
// Render writes data with custom ContentType.
Render(http.ResponseWriter) error
// WriteContentType writes custom ContentType.
WriteContentType(w http.ResponseWriter)
}
type GoJsonRender struct {
Data any
NilSafeSlices bool
NilSafeMaps bool
Indent *IndentOpt
Filter *string
}
func (r GoJsonRender) Render(w http.ResponseWriter) error {
header := w.Header()
if val := header["Content-Type"]; len(val) == 0 {
header["Content-Type"] = []string{"application/json; charset=utf-8"}
}
jsonBytes, err := MarshalSafeCollections(r.Data, r.NilSafeSlices, r.NilSafeMaps, r.Indent, r.Filter)
if err != nil {
panic(err)
}
_, err = w.Write(jsonBytes)
if err != nil {
panic(err)
}
return nil
}
func (r GoJsonRender) RenderString() (string, error) {
jsonBytes, err := MarshalSafeCollections(r.Data, r.NilSafeSlices, r.NilSafeMaps, r.Indent, r.Filter)
if err != nil {
panic(err)
}
return string(jsonBytes), nil
}
func (r GoJsonRender) WriteContentType(w http.ResponseWriter) {
header := w.Header()
if val := header["Content-Type"]; len(val) == 0 {
header["Content-Type"] = []string{"application/json; charset=utf-8"}
}
}

View File

@ -41,6 +41,9 @@ func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// non-ignored, exported fields in the destination.
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
// TagKey sets a different TagKey (instead of "json")
func (dec *Decoder) TagKey(v string) { dec.d.tagkey = &v }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//