1
0
mirror of https://github.com/golang/go synced 2024-11-17 04:55:07 -07:00

encoding/json: use sync.Map for field cache

The previous type cache is quadratic in time in the situation where
new types are continually encountered. Now that it is possible to dynamically
create new types with the reflect package, this can cause json to
perform very poorly.

Switch to sync.Map which does well when the cache has hit steady state,
but also handles occasional updates in better than quadratic time.

benchmark                                     old ns/op      new ns/op     delta
BenchmarkTypeFieldsCache/MissTypes1-8         14817          16202         +9.35%
BenchmarkTypeFieldsCache/MissTypes10-8        70926          69144         -2.51%
BenchmarkTypeFieldsCache/MissTypes100-8       976467         208973        -78.60%
BenchmarkTypeFieldsCache/MissTypes1000-8      79520162       1750371       -97.80%
BenchmarkTypeFieldsCache/MissTypes10000-8     6873625837     16847806      -99.75%
BenchmarkTypeFieldsCache/HitTypes1000-8       7.51           8.80          +17.18%
BenchmarkTypeFieldsCache/HitTypes10000-8      7.58           8.68          +14.51%

The old implementation takes 12 minutes just to build a cache of size 1e5
due to the quadratic behavior. I did not bother benchmark sizes above that.

Change-Id: I5e6facc1eb8e1b80e5ca285e4dd2cc8815618dad
Reviewed-on: https://go-review.googlesource.com/76850
Run-TryBot: Joe Tsai <thebrokentoaster@gmail.com>
Reviewed-by: Bryan Mills <bcmills@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
Joe Tsai 2017-11-09 17:33:12 -08:00 committed by Joe Tsai
parent e658b85f26
commit f0756ca2ea
2 changed files with 73 additions and 27 deletions

View File

@ -13,9 +13,14 @@ package json
import ( import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"fmt"
"internal/testenv"
"io/ioutil" "io/ioutil"
"os" "os"
"reflect"
"runtime"
"strings" "strings"
"sync"
"testing" "testing"
) )
@ -265,3 +270,66 @@ func BenchmarkUnmapped(b *testing.B) {
} }
}) })
} }
func BenchmarkTypeFieldsCache(b *testing.B) {
var maxTypes int = 1e6
if testenv.Builder() != "" {
maxTypes = 1e3 // restrict cache sizes on builders
}
// Dynamically generate many new types.
types := make([]reflect.Type, maxTypes)
fs := []reflect.StructField{{
Type: reflect.TypeOf(""),
Index: []int{0},
}}
for i := range types {
fs[0].Name = fmt.Sprintf("TypeFieldsCache%d", i)
types[i] = reflect.StructOf(fs)
}
// clearClear clears the cache. Other JSON operations, must not be running.
clearCache := func() {
fieldCache = sync.Map{}
}
// MissTypes tests the performance of repeated cache misses.
// This measures the time to rebuild a cache of size nt.
for nt := 1; nt <= maxTypes; nt *= 10 {
ts := types[:nt]
b.Run(fmt.Sprintf("MissTypes%d", nt), func(b *testing.B) {
nc := runtime.GOMAXPROCS(0)
for i := 0; i < b.N; i++ {
clearCache()
var wg sync.WaitGroup
for j := 0; j < nc; j++ {
wg.Add(1)
go func(j int) {
for _, t := range ts[(j*len(ts))/nc : ((j+1)*len(ts))/nc] {
cachedTypeFields(t)
}
wg.Done()
}(j)
}
wg.Wait()
}
})
}
// HitTypes tests the performance of repeated cache hits.
// This measures the average time of each cache lookup.
for nt := 1; nt <= maxTypes; nt *= 10 {
// Pre-warm a cache of size nt.
clearCache()
for _, t := range types[:nt] {
cachedTypeFields(t)
}
b.Run(fmt.Sprintf("HitTypes%d", nt), func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cachedTypeFields(types[0])
}
})
})
}
}

View File

@ -21,7 +21,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
) )
@ -1258,34 +1257,13 @@ func dominantField(fields []field) (field, bool) {
return fields[0], true return fields[0], true
} }
var fieldCache struct { var fieldCache sync.Map // map[reflect.Type][]field
value atomic.Value // map[reflect.Type][]field
mu sync.Mutex // used only by writers
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. // cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field { func cachedTypeFields(t reflect.Type) []field {
m, _ := fieldCache.value.Load().(map[reflect.Type][]field) if f, ok := fieldCache.Load(t); ok {
f := m[t] return f.([]field)
if f != nil {
return f
} }
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
// Compute fields without lock. return f.([]field)
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.mu.Lock()
m, _ = fieldCache.value.Load().(map[reflect.Type][]field)
newM := make(map[reflect.Type][]field, len(m)+1)
for k, v := range m {
newM[k] = v
}
newM[t] = f
fieldCache.value.Store(newM)
fieldCache.mu.Unlock()
return f
} }