2016-03-01 15:57:46 -07:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
2010-04-21 17:40:53 -06:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2016-04-13 12:14:52 -06:00
|
|
|
// Package json implements encoding and decoding of JSON as defined in
|
|
|
|
// RFC 4627. The mapping between JSON and Go values is described
|
2013-04-11 13:46:23 -06:00
|
|
|
// in the documentation for the Marshal and Unmarshal functions.
|
2011-09-09 17:35:25 -06:00
|
|
|
//
|
|
|
|
// See "JSON and Go" for an introduction to this package:
|
2015-07-10 17:17:11 -06:00
|
|
|
// https://golang.org/doc/articles/json_and_go.html
|
2010-04-21 17:40:53 -06:00
|
|
|
package json
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2013-08-14 12:56:07 -06:00
|
|
|
"encoding"
|
2011-02-23 09:32:29 -07:00
|
|
|
"encoding/base64"
|
2015-07-15 08:12:05 -06:00
|
|
|
"fmt"
|
2012-01-02 18:30:18 -07:00
|
|
|
"math"
|
2010-04-21 17:40:53 -06:00
|
|
|
"reflect"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
2012-04-24 22:33:33 -06:00
|
|
|
"strings"
|
2011-11-21 08:49:14 -07:00
|
|
|
"sync"
|
2011-02-17 15:14:19 -07:00
|
|
|
"unicode"
|
2011-11-08 16:40:58 -07:00
|
|
|
"unicode/utf8"
|
2010-04-21 17:40:53 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
// Marshal returns the JSON encoding of v.
|
|
|
|
//
|
|
|
|
// Marshal traverses the value v recursively.
|
2011-09-19 09:50:41 -06:00
|
|
|
// If an encountered value implements the Marshaler interface
|
|
|
|
// and is not a nil pointer, Marshal calls its MarshalJSON method
|
2015-09-28 11:47:39 -06:00
|
|
|
// to produce JSON. If no MarshalJSON method is present but the
|
|
|
|
// value implements encoding.TextMarshaler instead, Marshal calls
|
|
|
|
// its MarshalText method.
|
|
|
|
// The nil pointer exception is not strictly necessary
|
2011-09-19 09:50:41 -06:00
|
|
|
// but mimics a similar, necessary exception in the behavior of
|
|
|
|
// UnmarshalJSON.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
|
|
|
// Otherwise, Marshal uses the following type-dependent default encodings:
|
|
|
|
//
|
|
|
|
// Boolean values encode as JSON booleans.
|
|
|
|
//
|
2012-06-25 15:36:09 -06:00
|
|
|
// Floating point, integer, and Number values encode as JSON numbers.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
2014-08-07 16:57:41 -06:00
|
|
|
// String values encode as JSON strings coerced to valid UTF-8,
|
|
|
|
// replacing invalid bytes with the Unicode replacement rune.
|
2012-01-17 18:56:24 -07:00
|
|
|
// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
|
|
|
|
// to keep some browsers from misinterpreting JSON output as HTML.
|
2014-02-04 23:24:51 -07:00
|
|
|
// Ampersand "&" is also escaped to "\u0026" for the same reason.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
2013-07-12 15:42:01 -06:00
|
|
|
// Array and slice values encode as JSON arrays, except that
|
|
|
|
// []byte encodes as a base64-encoded string, and a nil slice
|
2016-04-13 12:14:52 -06:00
|
|
|
// encodes as the null JSON value.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
2011-07-14 12:54:55 -06:00
|
|
|
// Struct values encode as JSON objects. Each exported struct field
|
2011-09-14 16:09:43 -06:00
|
|
|
// becomes a member of the object unless
|
|
|
|
// - the field's tag is "-", or
|
|
|
|
// - the field is empty and its tag specifies the "omitempty" option.
|
|
|
|
// The empty values are false, 0, any
|
2011-07-14 12:54:55 -06:00
|
|
|
// nil pointer or interface value, and any array, slice, map, or string of
|
|
|
|
// length zero. The object's default key string is the struct field name
|
|
|
|
// but can be specified in the struct field's tag value. The "json" key in
|
2012-06-22 15:43:41 -06:00
|
|
|
// the struct field's tag value is the key name, followed by an optional comma
|
2011-07-14 12:54:55 -06:00
|
|
|
// and options. Examples:
|
|
|
|
//
|
2011-09-14 16:09:43 -06:00
|
|
|
// // Field is ignored by this package.
|
|
|
|
// Field int `json:"-"`
|
|
|
|
//
|
|
|
|
// // Field appears in JSON as key "myName".
|
2011-07-14 12:54:55 -06:00
|
|
|
// Field int `json:"myName"`
|
|
|
|
//
|
2011-09-14 16:09:43 -06:00
|
|
|
// // Field appears in JSON as key "myName" and
|
2011-07-14 12:54:55 -06:00
|
|
|
// // the field is omitted from the object if its value is empty,
|
|
|
|
// // as defined above.
|
|
|
|
// Field int `json:"myName,omitempty"`
|
|
|
|
//
|
|
|
|
// // Field appears in JSON as key "Field" (the default), but
|
|
|
|
// // the field is skipped if empty.
|
|
|
|
// // Note the leading comma.
|
|
|
|
// Field int `json:",omitempty"`
|
|
|
|
//
|
2011-08-29 13:46:32 -06:00
|
|
|
// The "string" option signals that a field is stored as JSON inside a
|
2013-01-06 15:44:35 -07:00
|
|
|
// JSON-encoded string. It applies only to fields of string, floating point,
|
2014-12-14 15:30:53 -07:00
|
|
|
// integer, or boolean types. This extra level of encoding is sometimes used
|
|
|
|
// when communicating with JavaScript programs:
|
2011-08-29 13:46:32 -06:00
|
|
|
//
|
|
|
|
// Int64String int64 `json:",string"`
|
|
|
|
//
|
2011-07-14 12:54:55 -06:00
|
|
|
// The key name will be used if it's a non-empty string consisting of
|
2012-01-18 23:33:29 -07:00
|
|
|
// only Unicode letters, digits, dollar signs, percent signs, hyphens,
|
|
|
|
// underscores and slashes.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
2012-09-10 21:31:40 -06:00
|
|
|
// Anonymous struct fields are usually marshaled as if their inner exported fields
|
2013-04-11 13:46:23 -06:00
|
|
|
// were fields in the outer struct, subject to the usual Go visibility rules amended
|
|
|
|
// as described in the next paragraph.
|
2012-10-30 14:38:01 -06:00
|
|
|
// An anonymous struct field with a name given in its JSON tag is treated as
|
2013-04-11 13:46:23 -06:00
|
|
|
// having that name, rather than being anonymous.
|
2014-10-06 13:48:17 -06:00
|
|
|
// An anonymous struct field of interface type is treated the same as having
|
|
|
|
// that type as its name, rather than being anonymous.
|
2013-04-11 13:46:23 -06:00
|
|
|
//
|
|
|
|
// The Go visibility rules for struct fields are amended for JSON when
|
|
|
|
// deciding which field to marshal or unmarshal. If there are
|
|
|
|
// multiple fields at the same level, and that level is the least
|
|
|
|
// nested (and would therefore be the nesting level selected by the
|
|
|
|
// usual Go rules), the following extra rules apply:
|
|
|
|
//
|
|
|
|
// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
|
|
|
|
// even if there are multiple untagged fields that would otherwise conflict.
|
|
|
|
// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
|
|
|
|
// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
|
2012-09-10 21:31:40 -06:00
|
|
|
//
|
|
|
|
// Handling of anonymous struct fields is new in Go 1.1.
|
|
|
|
// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
|
|
|
|
// an anonymous struct field in both current and earlier versions, give the field
|
|
|
|
// a JSON tag of "-".
|
|
|
|
//
|
2016-03-08 13:41:35 -07:00
|
|
|
// Map values encode as JSON objects. The map's key type must either be a string
|
|
|
|
// or implement encoding.TextMarshaler. The map keys are used as JSON object
|
2015-02-18 15:35:28 -07:00
|
|
|
// keys, subject to the UTF-8 coercion described for string values above.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
2010-04-27 11:24:00 -06:00
|
|
|
// Pointer values encode as the value pointed to.
|
2016-04-13 12:14:52 -06:00
|
|
|
// A nil pointer encodes as the null JSON value.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
|
|
|
// Interface values encode as the value contained in the interface.
|
2016-04-13 12:14:52 -06:00
|
|
|
// A nil interface value encodes as the null JSON value.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
|
|
|
// Channel, complex, and function values cannot be encoded in JSON.
|
|
|
|
// Attempting to encode such a value causes Marshal to return
|
2012-05-23 18:18:05 -06:00
|
|
|
// an UnsupportedTypeError.
|
2010-04-21 17:40:53 -06:00
|
|
|
//
|
|
|
|
// JSON cannot represent cyclic data structures and Marshal does not
|
2016-03-01 16:21:55 -07:00
|
|
|
// handle them. Passing cyclic structures to Marshal will result in
|
2010-04-21 17:40:53 -06:00
|
|
|
// an infinite recursion.
|
|
|
|
//
|
2011-11-01 20:04:37 -06:00
|
|
|
func Marshal(v interface{}) ([]byte, error) {
|
2010-04-21 17:40:53 -06:00
|
|
|
e := &encodeState{}
|
|
|
|
err := e.marshal(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return e.Bytes(), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarshalIndent is like Marshal but applies Indent to format the output.
|
2011-11-01 20:04:37 -06:00
|
|
|
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
|
2010-04-21 17:40:53 -06:00
|
|
|
b, err := Marshal(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var buf bytes.Buffer
|
|
|
|
err = Indent(&buf, b, prefix, indent)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return buf.Bytes(), nil
|
|
|
|
}
|
|
|
|
|
2013-07-11 22:35:55 -06:00
|
|
|
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
|
|
|
|
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
|
2010-07-12 12:26:41 -06:00
|
|
|
// so that the JSON will be safe to embed inside HTML <script> tags.
|
|
|
|
// For historical reasons, web browsers don't honor standard HTML
|
|
|
|
// escaping within <script> tags, so an alternative JSON encoding must
|
|
|
|
// be used.
|
|
|
|
func HTMLEscape(dst *bytes.Buffer, src []byte) {
|
2013-07-11 22:35:55 -06:00
|
|
|
// The characters can only appear in string literals,
|
2010-07-12 12:26:41 -06:00
|
|
|
// so just scan the string one byte at a time.
|
|
|
|
start := 0
|
|
|
|
for i, c := range src {
|
|
|
|
if c == '<' || c == '>' || c == '&' {
|
|
|
|
if start < i {
|
|
|
|
dst.Write(src[start:i])
|
|
|
|
}
|
|
|
|
dst.WriteString(`\u00`)
|
|
|
|
dst.WriteByte(hex[c>>4])
|
|
|
|
dst.WriteByte(hex[c&0xF])
|
|
|
|
start = i + 1
|
|
|
|
}
|
2013-07-11 22:35:55 -06:00
|
|
|
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
|
|
|
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
|
|
|
if start < i {
|
|
|
|
dst.Write(src[start:i])
|
|
|
|
}
|
|
|
|
dst.WriteString(`\u202`)
|
|
|
|
dst.WriteByte(hex[src[i+2]&0xF])
|
|
|
|
start = i + 3
|
|
|
|
}
|
2010-07-12 12:26:41 -06:00
|
|
|
}
|
|
|
|
if start < len(src) {
|
|
|
|
dst.Write(src[start:])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 12:14:52 -06:00
|
|
|
// Marshaler is the interface implemented by types that
|
2010-04-21 17:40:53 -06:00
|
|
|
// can marshal themselves into valid JSON.
|
|
|
|
type Marshaler interface {
|
2011-11-01 20:04:37 -06:00
|
|
|
MarshalJSON() ([]byte, error)
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
|
2012-05-23 18:18:05 -06:00
|
|
|
// An UnsupportedTypeError is returned by Marshal when attempting
|
|
|
|
// to encode an unsupported value type.
|
2010-04-21 17:40:53 -06:00
|
|
|
type UnsupportedTypeError struct {
|
|
|
|
Type reflect.Type
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:04:37 -06:00
|
|
|
func (e *UnsupportedTypeError) Error() string {
|
2010-04-21 17:40:53 -06:00
|
|
|
return "json: unsupported type: " + e.Type.String()
|
|
|
|
}
|
|
|
|
|
2012-01-02 18:30:18 -07:00
|
|
|
type UnsupportedValueError struct {
|
|
|
|
Value reflect.Value
|
|
|
|
Str string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *UnsupportedValueError) Error() string {
|
|
|
|
return "json: unsupported value: " + e.Str
|
|
|
|
}
|
|
|
|
|
2013-07-12 15:37:10 -06:00
|
|
|
// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
|
|
|
|
// attempting to encode a string value with invalid UTF-8 sequences.
|
|
|
|
// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
|
|
|
|
// replacing invalid bytes with the Unicode replacement rune U+FFFD.
|
|
|
|
// This error is no longer generated but is kept for backwards compatibility
|
|
|
|
// with programs that might mention it.
|
2010-12-13 13:51:11 -07:00
|
|
|
type InvalidUTF8Error struct {
|
2013-04-29 21:21:48 -06:00
|
|
|
S string // the whole string value that caused the error
|
2010-12-13 13:51:11 -07:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:04:37 -06:00
|
|
|
func (e *InvalidUTF8Error) Error() string {
|
2010-12-13 13:51:11 -07:00
|
|
|
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
|
|
|
|
}
|
|
|
|
|
2010-04-21 17:40:53 -06:00
|
|
|
type MarshalerError struct {
|
2011-11-01 20:04:37 -06:00
|
|
|
Type reflect.Type
|
|
|
|
Err error
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:04:37 -06:00
|
|
|
func (e *MarshalerError) Error() string {
|
|
|
|
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
var hex = "0123456789abcdef"
|
|
|
|
|
|
|
|
// An encodeState encodes JSON into a bytes.Buffer.
|
|
|
|
type encodeState struct {
|
|
|
|
bytes.Buffer // accumulated output
|
2011-12-15 12:21:21 -07:00
|
|
|
scratch [64]byte
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
|
2013-12-18 16:52:05 -07:00
|
|
|
var encodeStatePool sync.Pool
|
2013-05-14 16:50:46 -06:00
|
|
|
|
|
|
|
func newEncodeState() *encodeState {
|
2013-12-18 16:52:05 -07:00
|
|
|
if v := encodeStatePool.Get(); v != nil {
|
|
|
|
e := v.(*encodeState)
|
2013-05-14 16:50:46 -06:00
|
|
|
e.Reset()
|
|
|
|
return e
|
|
|
|
}
|
2013-12-18 16:52:05 -07:00
|
|
|
return new(encodeState)
|
2013-05-14 16:50:46 -06:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:04:37 -06:00
|
|
|
func (e *encodeState) marshal(v interface{}) (err error) {
|
2010-04-21 17:40:53 -06:00
|
|
|
defer func() {
|
|
|
|
if r := recover(); r != nil {
|
|
|
|
if _, ok := r.(runtime.Error); ok {
|
|
|
|
panic(r)
|
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if s, ok := r.(string); ok {
|
|
|
|
panic(s)
|
|
|
|
}
|
2011-11-01 20:04:37 -06:00
|
|
|
err = r.(error)
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
}()
|
2011-04-25 11:39:36 -06:00
|
|
|
e.reflectValue(reflect.ValueOf(v))
|
2010-04-21 17:40:53 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:04:37 -06:00
|
|
|
func (e *encodeState) error(err error) {
|
2010-04-21 17:40:53 -06:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2011-07-14 12:54:55 -06:00
|
|
|
func isEmptyValue(v reflect.Value) bool {
|
|
|
|
switch v.Kind() {
|
|
|
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
|
|
|
return v.Len() == 0
|
|
|
|
case reflect.Bool:
|
|
|
|
return !v.Bool()
|
|
|
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
|
|
return v.Int() == 0
|
|
|
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
|
|
return v.Uint() == 0
|
|
|
|
case reflect.Float32, reflect.Float64:
|
|
|
|
return v.Float() == 0
|
|
|
|
case reflect.Interface, reflect.Ptr:
|
|
|
|
return v.IsNil()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2010-04-21 17:40:53 -06:00
|
|
|
func (e *encodeState) reflectValue(v reflect.Value) {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
valueEncoder(v)(e, v, false)
|
2011-08-29 13:46:32 -06:00
|
|
|
}
|
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
|
|
|
|
var encoderCache struct {
|
|
|
|
sync.RWMutex
|
|
|
|
m map[reflect.Type]encoderFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
func valueEncoder(v reflect.Value) encoderFunc {
|
2011-04-08 10:27:58 -06:00
|
|
|
if !v.IsValid() {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return invalidValueEncoder
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
2013-09-23 20:57:19 -06:00
|
|
|
return typeEncoder(v.Type())
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func typeEncoder(t reflect.Type) encoderFunc {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
encoderCache.RLock()
|
|
|
|
f := encoderCache.m[t]
|
|
|
|
encoderCache.RUnlock()
|
|
|
|
if f != nil {
|
|
|
|
return f
|
2012-02-02 17:15:06 -07:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
|
|
|
|
// To deal with recursive types, populate the map with an
|
|
|
|
// indirect func before we build it. This type waits on the
|
2016-03-01 16:21:55 -07:00
|
|
|
// real func (f) to be ready and then calls it. This indirect
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
// func is only used for recursive types.
|
|
|
|
encoderCache.Lock()
|
|
|
|
if encoderCache.m == nil {
|
|
|
|
encoderCache.m = make(map[reflect.Type]encoderFunc)
|
|
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
wg.Wait()
|
|
|
|
f(e, v, quoted)
|
|
|
|
}
|
|
|
|
encoderCache.Unlock()
|
|
|
|
|
|
|
|
// Compute fields without lock.
|
|
|
|
// Might duplicate effort but won't hold other computations back.
|
2013-09-23 20:57:19 -06:00
|
|
|
f = newTypeEncoder(t, true)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
wg.Done()
|
|
|
|
encoderCache.Lock()
|
|
|
|
encoderCache.m[t] = f
|
|
|
|
encoderCache.Unlock()
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
var (
|
|
|
|
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
|
|
|
|
textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
|
|
|
|
)
|
2013-08-14 12:56:07 -06:00
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
// newTypeEncoder constructs an encoderFunc for a type.
|
|
|
|
// The returned encoder only checks CanAddr when allowAddr is true.
|
|
|
|
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
|
|
|
|
if t.Implements(marshalerType) {
|
2013-08-14 12:56:07 -06:00
|
|
|
return marshalerEncoder
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
2013-09-23 20:57:19 -06:00
|
|
|
if t.Kind() != reflect.Ptr && allowAddr {
|
|
|
|
if reflect.PtrTo(t).Implements(marshalerType) {
|
|
|
|
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
2013-08-14 12:56:07 -06:00
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
if t.Implements(textMarshalerType) {
|
2013-08-14 12:56:07 -06:00
|
|
|
return textMarshalerEncoder
|
|
|
|
}
|
2013-09-23 20:57:19 -06:00
|
|
|
if t.Kind() != reflect.Ptr && allowAddr {
|
|
|
|
if reflect.PtrTo(t).Implements(textMarshalerType) {
|
|
|
|
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
|
2013-08-14 12:56:07 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
switch t.Kind() {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
case reflect.Bool:
|
|
|
|
return boolEncoder
|
|
|
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
|
|
return intEncoder
|
|
|
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
|
|
|
return uintEncoder
|
|
|
|
case reflect.Float32:
|
|
|
|
return float32Encoder
|
|
|
|
case reflect.Float64:
|
|
|
|
return float64Encoder
|
|
|
|
case reflect.String:
|
|
|
|
return stringEncoder
|
|
|
|
case reflect.Interface:
|
|
|
|
return interfaceEncoder
|
|
|
|
case reflect.Struct:
|
2013-09-23 20:57:19 -06:00
|
|
|
return newStructEncoder(t)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
case reflect.Map:
|
2013-09-23 20:57:19 -06:00
|
|
|
return newMapEncoder(t)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
case reflect.Slice:
|
2013-09-23 20:57:19 -06:00
|
|
|
return newSliceEncoder(t)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
case reflect.Array:
|
2013-09-23 20:57:19 -06:00
|
|
|
return newArrayEncoder(t)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
case reflect.Ptr:
|
2013-09-23 20:57:19 -06:00
|
|
|
return newPtrEncoder(t)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
default:
|
|
|
|
return unsupportedTypeEncoder
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
e.WriteString("null")
|
|
|
|
}
|
|
|
|
|
2013-08-14 12:56:07 -06:00
|
|
|
func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
|
|
e.WriteString("null")
|
2010-04-21 17:40:53 -06:00
|
|
|
return
|
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
m := v.Interface().(Marshaler)
|
|
|
|
b, err := m.MarshalJSON()
|
|
|
|
if err == nil {
|
|
|
|
// copy JSON into buffer, checking validity.
|
|
|
|
err = compact(&e.Buffer, b, true)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
e.error(&MarshalerError{v.Type(), err})
|
|
|
|
}
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2013-08-14 12:56:07 -06:00
|
|
|
func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
va := v.Addr()
|
2013-08-14 12:56:07 -06:00
|
|
|
if va.IsNil() {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m := va.Interface().(Marshaler)
|
|
|
|
b, err := m.MarshalJSON()
|
|
|
|
if err == nil {
|
|
|
|
// copy JSON into buffer, checking validity.
|
|
|
|
err = compact(&e.Buffer, b, true)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
e.error(&MarshalerError{v.Type(), err})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-14 12:56:07 -06:00
|
|
|
func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
if v.Kind() == reflect.Ptr && v.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m := v.Interface().(encoding.TextMarshaler)
|
|
|
|
b, err := m.MarshalText()
|
|
|
|
if err != nil {
|
|
|
|
e.error(&MarshalerError{v.Type(), err})
|
|
|
|
}
|
2015-10-14 16:18:10 -06:00
|
|
|
e.stringBytes(b)
|
2013-08-14 12:56:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
va := v.Addr()
|
|
|
|
if va.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m := va.Interface().(encoding.TextMarshaler)
|
|
|
|
b, err := m.MarshalText()
|
|
|
|
if err != nil {
|
|
|
|
e.error(&MarshalerError{v.Type(), err})
|
|
|
|
}
|
2015-10-14 16:18:10 -06:00
|
|
|
e.stringBytes(b)
|
2013-08-14 12:56:07 -06:00
|
|
|
}
|
|
|
|
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
2011-08-29 13:46:32 -06:00
|
|
|
if quoted {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
if v.Bool() {
|
|
|
|
e.WriteString("true")
|
|
|
|
} else {
|
|
|
|
e.WriteString("false")
|
2011-08-29 13:46:32 -06:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
}
|
2011-08-29 13:46:32 -06:00
|
|
|
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
|
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
e.Write(b)
|
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
|
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
e.Write(b)
|
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type floatEncoder int // number of bits
|
|
|
|
|
|
|
|
func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
f := v.Float()
|
|
|
|
if math.IsInf(f, 0) || math.IsNaN(f) {
|
|
|
|
e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
|
|
|
|
}
|
|
|
|
b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
|
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
e.Write(b)
|
|
|
|
if quoted {
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
float32Encoder = (floatEncoder(32)).encode
|
|
|
|
float64Encoder = (floatEncoder(64)).encode
|
|
|
|
)
|
|
|
|
|
|
|
|
func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
if v.Type() == numberType {
|
|
|
|
numStr := v.String()
|
2015-07-15 08:12:05 -06:00
|
|
|
// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
|
|
|
|
// we keep compatibility so check validity after this.
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if numStr == "" {
|
|
|
|
numStr = "0" // Number's zero-val
|
2015-11-25 09:34:41 -07:00
|
|
|
}
|
|
|
|
if !isValidNumber(numStr) {
|
|
|
|
e.error(fmt.Errorf("json: invalid number literal %q", numStr))
|
2011-12-15 12:21:21 -07:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.WriteString(numStr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if quoted {
|
|
|
|
sb, err := Marshal(v.String())
|
|
|
|
if err != nil {
|
|
|
|
e.error(err)
|
2011-12-15 12:21:21 -07:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.string(string(sb))
|
|
|
|
} else {
|
|
|
|
e.string(v.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
if v.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e.reflectValue(v.Elem())
|
|
|
|
}
|
|
|
|
|
|
|
|
func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
e.error(&UnsupportedTypeError{v.Type()})
|
|
|
|
}
|
|
|
|
|
|
|
|
type structEncoder struct {
|
|
|
|
fields []field
|
|
|
|
fieldEncs []encoderFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
e.WriteByte('{')
|
|
|
|
first := true
|
|
|
|
for i, f := range se.fields {
|
2013-08-28 22:45:59 -06:00
|
|
|
fv := fieldByIndex(v, f.index)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
|
|
|
|
continue
|
2012-01-02 18:30:18 -07:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if first {
|
|
|
|
first = false
|
2011-12-15 12:21:21 -07:00
|
|
|
} else {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.WriteByte(',')
|
2012-06-25 15:36:09 -06:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.string(f.name)
|
|
|
|
e.WriteByte(':')
|
2013-09-23 20:57:19 -06:00
|
|
|
se.fieldEncs[i](e, fv, f.quoted)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
|
|
|
e.WriteByte('}')
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func newStructEncoder(t reflect.Type) encoderFunc {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
fields := cachedTypeFields(t)
|
|
|
|
se := &structEncoder{
|
|
|
|
fields: fields,
|
|
|
|
fieldEncs: make([]encoderFunc, len(fields)),
|
|
|
|
}
|
|
|
|
for i, f := range fields {
|
2013-09-23 20:57:19 -06:00
|
|
|
se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
|
|
|
return se.encode
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
type mapEncoder struct {
|
|
|
|
elemEnc encoderFunc
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
|
|
|
|
if v.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e.WriteByte('{')
|
2016-03-08 13:41:35 -07:00
|
|
|
|
|
|
|
// Extract and sort the keys.
|
|
|
|
keys := v.MapKeys()
|
|
|
|
sv := make([]reflectWithString, len(keys))
|
|
|
|
for i, v := range keys {
|
|
|
|
sv[i].v = v
|
|
|
|
if err := sv[i].resolve(); err != nil {
|
|
|
|
e.error(&MarshalerError{v.Type(), err})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Sort(byString(sv))
|
|
|
|
|
|
|
|
for i, kv := range sv {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if i > 0 {
|
|
|
|
e.WriteByte(',')
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
2016-03-08 13:41:35 -07:00
|
|
|
e.string(kv.s)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
e.WriteByte(':')
|
2016-03-08 13:41:35 -07:00
|
|
|
me.elemEnc(e, v.MapIndex(kv.v), false)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
|
|
|
e.WriteByte('}')
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func newMapEncoder(t reflect.Type) encoderFunc {
|
2016-03-08 13:41:35 -07:00
|
|
|
if t.Key().Kind() != reflect.String && !t.Key().Implements(textMarshalerType) {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return unsupportedTypeEncoder
|
|
|
|
}
|
2013-09-23 20:57:19 -06:00
|
|
|
me := &mapEncoder{typeEncoder(t.Elem())}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return me.encode
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
|
|
|
|
if v.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s := v.Bytes()
|
|
|
|
e.WriteByte('"')
|
|
|
|
if len(s) < 1024 {
|
|
|
|
// for small buffers, using Encode directly is much faster.
|
|
|
|
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
|
|
|
|
base64.StdEncoding.Encode(dst, s)
|
|
|
|
e.Write(dst)
|
|
|
|
} else {
|
|
|
|
// for large buffers, avoid unnecessary extra temporary
|
|
|
|
// buffer space.
|
|
|
|
enc := base64.NewEncoder(base64.StdEncoding, e)
|
|
|
|
enc.Write(s)
|
|
|
|
enc.Close()
|
|
|
|
}
|
|
|
|
e.WriteByte('"')
|
|
|
|
}
|
|
|
|
|
|
|
|
// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
|
|
|
|
type sliceEncoder struct {
|
|
|
|
arrayEnc encoderFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
|
|
|
|
if v.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
se.arrayEnc(e, v, false)
|
|
|
|
}
|
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func newSliceEncoder(t reflect.Type) encoderFunc {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
// Byte slices get special treatment; arrays don't.
|
2016-02-03 15:41:55 -07:00
|
|
|
if t.Elem().Kind() == reflect.Uint8 &&
|
|
|
|
!t.Elem().Implements(marshalerType) &&
|
|
|
|
!t.Elem().Implements(textMarshalerType) {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return encodeByteSlice
|
|
|
|
}
|
2013-09-23 20:57:19 -06:00
|
|
|
enc := &sliceEncoder{newArrayEncoder(t)}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return enc.encode
|
|
|
|
}
|
|
|
|
|
|
|
|
type arrayEncoder struct {
|
|
|
|
elemEnc encoderFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
|
|
|
|
e.WriteByte('[')
|
|
|
|
n := v.Len()
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
if i > 0 {
|
|
|
|
e.WriteByte(',')
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
ae.elemEnc(e, v.Index(i), false)
|
|
|
|
}
|
|
|
|
e.WriteByte(']')
|
|
|
|
}
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func newArrayEncoder(t reflect.Type) encoderFunc {
|
|
|
|
enc := &arrayEncoder{typeEncoder(t.Elem())}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return enc.encode
|
|
|
|
}
|
|
|
|
|
|
|
|
type ptrEncoder struct {
|
|
|
|
elemEnc encoderFunc
|
|
|
|
}
|
|
|
|
|
2014-08-25 11:32:46 -06:00
|
|
|
func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
if v.IsNil() {
|
|
|
|
e.WriteString("null")
|
|
|
|
return
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
2014-08-25 11:32:46 -06:00
|
|
|
pe.elemEnc(e, v.Elem(), quoted)
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
}
|
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func newPtrEncoder(t reflect.Type) encoderFunc {
|
|
|
|
enc := &ptrEncoder{typeEncoder(t.Elem())}
|
|
|
|
return enc.encode
|
|
|
|
}
|
|
|
|
|
|
|
|
type condAddrEncoder struct {
|
|
|
|
canAddrEnc, elseEnc encoderFunc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
|
|
|
|
if v.CanAddr() {
|
|
|
|
ce.canAddrEnc(e, v, quoted)
|
|
|
|
} else {
|
|
|
|
ce.elseEnc(e, v, quoted)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// newCondAddrEncoder returns an encoder that checks whether its value
|
|
|
|
// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
|
|
|
|
func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
|
|
|
|
enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
|
encoding/json: faster encoding
The old code was caching per-type struct field info. Instead,
cache type-specific encoding funcs, tailored for that
particular type to avoid unnecessary reflection at runtime.
Once the machine is built once, future encodings of that type
just run the func.
benchmark old ns/op new ns/op delta
BenchmarkCodeEncoder 48424939 36975320 -23.64%
benchmark old MB/s new MB/s speedup
BenchmarkCodeEncoder 40.07 52.48 1.31x
Additionally, the numbers seem stable now at ~52 MB/s, whereas
the numbers for the old code were all over the place: 11 MB/s,
40 MB/s, 13 MB/s, 39 MB/s, etc. In the benchmark above I compared
against the best I saw the old code do.
R=rsc, adg
CC=gobot, golang-dev, r
https://golang.org/cl/9129044
2013-08-09 10:46:47 -06:00
|
|
|
return enc.encode
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
|
2011-02-17 15:14:19 -07:00
|
|
|
func isValidTag(s string) bool {
|
|
|
|
if s == "" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, c := range s {
|
2012-04-24 22:33:33 -06:00
|
|
|
switch {
|
2012-12-22 11:36:55 -07:00
|
|
|
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
2012-04-24 22:33:33 -06:00
|
|
|
// Backslash and quote chars are reserved, but
|
|
|
|
// otherwise any punctuation chars are allowed
|
|
|
|
// in a tag name.
|
2012-01-18 20:05:15 -07:00
|
|
|
default:
|
|
|
|
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
|
|
|
return false
|
|
|
|
}
|
2011-02-17 15:14:19 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-08-28 22:45:59 -06:00
|
|
|
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
|
2012-09-10 21:31:40 -06:00
|
|
|
for _, i := range index {
|
|
|
|
if v.Kind() == reflect.Ptr {
|
|
|
|
if v.IsNil() {
|
2013-08-28 22:45:59 -06:00
|
|
|
return reflect.Value{}
|
2012-09-10 21:31:40 -06:00
|
|
|
}
|
|
|
|
v = v.Elem()
|
|
|
|
}
|
|
|
|
v = v.Field(i)
|
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
2013-09-23 20:57:19 -06:00
|
|
|
func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
|
|
|
for _, i := range index {
|
|
|
|
if t.Kind() == reflect.Ptr {
|
|
|
|
t = t.Elem()
|
|
|
|
}
|
|
|
|
t = t.Field(i).Type
|
|
|
|
}
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
2016-03-08 13:41:35 -07:00
|
|
|
type reflectWithString struct {
|
|
|
|
v reflect.Value
|
|
|
|
s string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *reflectWithString) resolve() error {
|
|
|
|
if w.v.Kind() == reflect.String {
|
|
|
|
w.s = w.v.String()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
buf, err := w.v.Interface().(encoding.TextMarshaler).MarshalText()
|
|
|
|
w.s = string(buf)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// byString is a slice of reflectWithString where the reflect.Value is either
|
|
|
|
// a string or an encoding.TextMarshaler.
|
2010-04-21 17:40:53 -06:00
|
|
|
// It implements the methods to sort by string.
|
2016-03-08 13:41:35 -07:00
|
|
|
type byString []reflectWithString
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2016-03-08 13:41:35 -07:00
|
|
|
func (sv byString) Len() int { return len(sv) }
|
|
|
|
func (sv byString) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
|
|
|
func (sv byString) Less(i, j int) bool { return sv[i].s < sv[j].s }
|
2010-04-21 17:40:53 -06:00
|
|
|
|
2013-08-14 12:56:07 -06:00
|
|
|
// NOTE: keep in sync with stringBytes below.
|
2015-10-14 16:18:10 -06:00
|
|
|
func (e *encodeState) string(s string) int {
|
2011-08-29 13:46:32 -06:00
|
|
|
len0 := e.Len()
|
2010-04-21 17:40:53 -06:00
|
|
|
e.WriteByte('"')
|
2010-12-13 13:51:11 -07:00
|
|
|
start := 0
|
|
|
|
for i := 0; i < len(s); {
|
|
|
|
if b := s[i]; b < utf8.RuneSelf {
|
2013-08-09 16:33:57 -06:00
|
|
|
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
|
2010-12-13 13:51:11 -07:00
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if start < i {
|
|
|
|
e.WriteString(s[start:i])
|
|
|
|
}
|
2011-07-11 08:31:08 -06:00
|
|
|
switch b {
|
|
|
|
case '\\', '"':
|
2010-12-13 13:51:11 -07:00
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte(b)
|
2011-07-11 08:31:08 -06:00
|
|
|
case '\n':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte('n')
|
|
|
|
case '\r':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte('r')
|
2014-10-27 16:58:25 -06:00
|
|
|
case '\t':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte('t')
|
2011-07-11 08:31:08 -06:00
|
|
|
default:
|
2011-07-13 21:30:08 -06:00
|
|
|
// This encodes bytes < 0x20 except for \n and \r,
|
2014-02-04 23:24:51 -07:00
|
|
|
// as well as <, > and &. The latter are escaped because they
|
2011-07-13 21:30:08 -06:00
|
|
|
// can lead to security holes when user-controlled strings
|
|
|
|
// are rendered into JSON and served to some browsers.
|
2010-12-13 13:51:11 -07:00
|
|
|
e.WriteString(`\u00`)
|
|
|
|
e.WriteByte(hex[b>>4])
|
|
|
|
e.WriteByte(hex[b&0xF])
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
start = i
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c, size := utf8.DecodeRuneInString(s[i:])
|
|
|
|
if c == utf8.RuneError && size == 1 {
|
2013-07-12 15:37:10 -06:00
|
|
|
if start < i {
|
|
|
|
e.WriteString(s[start:i])
|
|
|
|
}
|
|
|
|
e.WriteString(`\ufffd`)
|
|
|
|
i += size
|
|
|
|
start = i
|
|
|
|
continue
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
2013-07-11 22:35:55 -06:00
|
|
|
// U+2028 is LINE SEPARATOR.
|
|
|
|
// U+2029 is PARAGRAPH SEPARATOR.
|
|
|
|
// They are both technically valid characters in JSON strings,
|
|
|
|
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
|
|
|
// and can lead to security holes there. It is valid JSON to
|
|
|
|
// escape them, so we do so unconditionally.
|
|
|
|
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
|
|
|
if c == '\u2028' || c == '\u2029' {
|
|
|
|
if start < i {
|
|
|
|
e.WriteString(s[start:i])
|
|
|
|
}
|
|
|
|
e.WriteString(`\u202`)
|
|
|
|
e.WriteByte(hex[c&0xF])
|
|
|
|
i += size
|
|
|
|
start = i
|
|
|
|
continue
|
|
|
|
}
|
2010-12-13 13:51:11 -07:00
|
|
|
i += size
|
|
|
|
}
|
|
|
|
if start < len(s) {
|
|
|
|
e.WriteString(s[start:])
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
|
|
|
e.WriteByte('"')
|
2015-10-14 16:18:10 -06:00
|
|
|
return e.Len() - len0
|
2013-08-14 12:56:07 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: keep in sync with string above.
|
2015-10-14 16:18:10 -06:00
|
|
|
func (e *encodeState) stringBytes(s []byte) int {
|
2013-08-14 12:56:07 -06:00
|
|
|
len0 := e.Len()
|
|
|
|
e.WriteByte('"')
|
|
|
|
start := 0
|
|
|
|
for i := 0; i < len(s); {
|
|
|
|
if b := s[i]; b < utf8.RuneSelf {
|
|
|
|
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
|
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if start < i {
|
|
|
|
e.Write(s[start:i])
|
|
|
|
}
|
|
|
|
switch b {
|
|
|
|
case '\\', '"':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte(b)
|
|
|
|
case '\n':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte('n')
|
|
|
|
case '\r':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte('r')
|
2014-10-27 16:58:25 -06:00
|
|
|
case '\t':
|
|
|
|
e.WriteByte('\\')
|
|
|
|
e.WriteByte('t')
|
2013-08-14 12:56:07 -06:00
|
|
|
default:
|
|
|
|
// This encodes bytes < 0x20 except for \n and \r,
|
2014-10-27 16:58:25 -06:00
|
|
|
// as well as <, >, and &. The latter are escaped because they
|
2013-08-14 12:56:07 -06:00
|
|
|
// can lead to security holes when user-controlled strings
|
|
|
|
// are rendered into JSON and served to some browsers.
|
|
|
|
e.WriteString(`\u00`)
|
|
|
|
e.WriteByte(hex[b>>4])
|
|
|
|
e.WriteByte(hex[b&0xF])
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
start = i
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c, size := utf8.DecodeRune(s[i:])
|
|
|
|
if c == utf8.RuneError && size == 1 {
|
|
|
|
if start < i {
|
|
|
|
e.Write(s[start:i])
|
|
|
|
}
|
|
|
|
e.WriteString(`\ufffd`)
|
|
|
|
i += size
|
|
|
|
start = i
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// U+2028 is LINE SEPARATOR.
|
|
|
|
// U+2029 is PARAGRAPH SEPARATOR.
|
|
|
|
// They are both technically valid characters in JSON strings,
|
|
|
|
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
|
|
|
// and can lead to security holes there. It is valid JSON to
|
|
|
|
// escape them, so we do so unconditionally.
|
|
|
|
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
|
|
|
if c == '\u2028' || c == '\u2029' {
|
|
|
|
if start < i {
|
|
|
|
e.Write(s[start:i])
|
|
|
|
}
|
|
|
|
e.WriteString(`\u202`)
|
|
|
|
e.WriteByte(hex[c&0xF])
|
|
|
|
i += size
|
|
|
|
start = i
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i += size
|
|
|
|
}
|
|
|
|
if start < len(s) {
|
|
|
|
e.Write(s[start:])
|
|
|
|
}
|
|
|
|
e.WriteByte('"')
|
2015-10-14 16:18:10 -06:00
|
|
|
return e.Len() - len0
|
2010-04-21 17:40:53 -06:00
|
|
|
}
|
2011-11-21 08:49:14 -07:00
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
// A field represents a single field found in a struct.
|
|
|
|
type field struct {
|
|
|
|
name string
|
2013-12-18 08:30:21 -07:00
|
|
|
nameBytes []byte // []byte(name)
|
|
|
|
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
tag bool
|
|
|
|
index []int
|
|
|
|
typ reflect.Type
|
2011-11-21 08:49:14 -07:00
|
|
|
omitEmpty bool
|
2012-09-10 21:31:40 -06:00
|
|
|
quoted bool
|
2011-11-21 08:49:14 -07:00
|
|
|
}
|
|
|
|
|
2013-12-18 08:30:21 -07:00
|
|
|
func fillField(f field) field {
|
|
|
|
f.nameBytes = []byte(f.name)
|
|
|
|
f.equalFold = foldFunc(f.nameBytes)
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
// byName sorts field by name, breaking ties with depth,
|
|
|
|
// then breaking ties with "name came from json tag", then
|
|
|
|
// breaking ties with index sequence.
|
|
|
|
type byName []field
|
2011-11-21 08:49:14 -07:00
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
func (x byName) Len() int { return len(x) }
|
2011-11-21 08:49:14 -07:00
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
|
|
|
|
|
|
func (x byName) Less(i, j int) bool {
|
|
|
|
if x[i].name != x[j].name {
|
|
|
|
return x[i].name < x[j].name
|
|
|
|
}
|
|
|
|
if len(x[i].index) != len(x[j].index) {
|
|
|
|
return len(x[i].index) < len(x[j].index)
|
|
|
|
}
|
|
|
|
if x[i].tag != x[j].tag {
|
|
|
|
return x[i].tag
|
2011-11-21 08:49:14 -07:00
|
|
|
}
|
2012-09-10 21:31:40 -06:00
|
|
|
return byIndex(x).Less(i, j)
|
|
|
|
}
|
2011-11-21 08:49:14 -07:00
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
// byIndex sorts field by index sequence.
|
|
|
|
type byIndex []field
|
|
|
|
|
|
|
|
func (x byIndex) Len() int { return len(x) }
|
|
|
|
|
|
|
|
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
|
|
|
|
|
|
func (x byIndex) Less(i, j int) bool {
|
|
|
|
for k, xik := range x[i].index {
|
|
|
|
if k >= len(x[j].index) {
|
|
|
|
return false
|
2011-11-21 08:49:14 -07:00
|
|
|
}
|
2012-09-10 21:31:40 -06:00
|
|
|
if xik != x[j].index[k] {
|
|
|
|
return xik < x[j].index[k]
|
2012-02-18 22:27:05 -07:00
|
|
|
}
|
2012-09-10 21:31:40 -06:00
|
|
|
}
|
|
|
|
return len(x[i].index) < len(x[j].index)
|
|
|
|
}
|
2011-11-21 08:49:14 -07:00
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
// typeFields returns a list of fields that JSON should recognize for the given type.
|
|
|
|
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
|
|
|
// and then any reachable anonymous structs.
|
|
|
|
func typeFields(t reflect.Type) []field {
|
|
|
|
// Anonymous fields to explore at the current level and the next.
|
|
|
|
current := []field{}
|
|
|
|
next := []field{{typ: t}}
|
|
|
|
|
|
|
|
// Count of queued names for current level and the next.
|
|
|
|
count := map[reflect.Type]int{}
|
|
|
|
nextCount := map[reflect.Type]int{}
|
|
|
|
|
|
|
|
// Types already visited at an earlier level.
|
|
|
|
visited := map[reflect.Type]bool{}
|
|
|
|
|
|
|
|
// Fields found.
|
|
|
|
var fields []field
|
|
|
|
|
|
|
|
for len(next) > 0 {
|
|
|
|
current, next = next, current[:0]
|
|
|
|
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
|
|
|
|
|
|
for _, f := range current {
|
|
|
|
if visited[f.typ] {
|
2011-11-21 08:49:14 -07:00
|
|
|
continue
|
|
|
|
}
|
2012-09-10 21:31:40 -06:00
|
|
|
visited[f.typ] = true
|
|
|
|
|
|
|
|
// Scan f.typ for fields to include.
|
|
|
|
for i := 0; i < f.typ.NumField(); i++ {
|
|
|
|
sf := f.typ.Field(i)
|
2015-08-28 02:17:05 -06:00
|
|
|
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
2012-09-10 21:31:40 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
tag := sf.Tag.Get("json")
|
|
|
|
if tag == "-" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
name, opts := parseTag(tag)
|
|
|
|
if !isValidTag(name) {
|
|
|
|
name = ""
|
|
|
|
}
|
|
|
|
index := make([]int, len(f.index)+1)
|
|
|
|
copy(index, f.index)
|
|
|
|
index[len(f.index)] = i
|
2013-01-02 15:39:41 -07:00
|
|
|
|
|
|
|
ft := sf.Type
|
|
|
|
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
|
|
// Follow pointer.
|
|
|
|
ft = ft.Elem()
|
|
|
|
}
|
|
|
|
|
2015-05-16 21:01:39 -06:00
|
|
|
// Only strings, floats, integers, and booleans can be quoted.
|
|
|
|
quoted := false
|
|
|
|
if opts.Contains("string") {
|
|
|
|
switch ft.Kind() {
|
|
|
|
case reflect.Bool,
|
|
|
|
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
|
|
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
|
|
|
reflect.Float32, reflect.Float64,
|
|
|
|
reflect.String:
|
|
|
|
quoted = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
// Record found field and index sequence.
|
2013-01-02 15:39:41 -07:00
|
|
|
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
2012-09-10 21:31:40 -06:00
|
|
|
tagged := name != ""
|
|
|
|
if name == "" {
|
|
|
|
name = sf.Name
|
|
|
|
}
|
2013-12-18 08:30:21 -07:00
|
|
|
fields = append(fields, fillField(field{
|
|
|
|
name: name,
|
|
|
|
tag: tagged,
|
|
|
|
index: index,
|
|
|
|
typ: ft,
|
|
|
|
omitEmpty: opts.Contains("omitempty"),
|
2015-05-16 21:01:39 -06:00
|
|
|
quoted: quoted,
|
2013-12-18 08:30:21 -07:00
|
|
|
}))
|
2012-09-10 21:31:40 -06:00
|
|
|
if count[f.typ] > 1 {
|
|
|
|
// If there were multiple instances, add a second,
|
|
|
|
// so that the annihilation code will see a duplicate.
|
|
|
|
// It only cares about the distinction between 1 or 2,
|
|
|
|
// so don't bother generating any more copies.
|
|
|
|
fields = append(fields, fields[len(fields)-1])
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Record new anonymous struct to explore in next round.
|
|
|
|
nextCount[ft]++
|
|
|
|
if nextCount[ft] == 1 {
|
2013-12-18 08:30:21 -07:00
|
|
|
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
2012-09-10 21:31:40 -06:00
|
|
|
}
|
2011-11-21 08:49:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-09-10 21:31:40 -06:00
|
|
|
|
|
|
|
sort.Sort(byName(fields))
|
|
|
|
|
2013-04-09 16:00:21 -06:00
|
|
|
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
|
|
// except that fields with JSON tags are promoted.
|
|
|
|
|
|
|
|
// The fields are sorted in primary order of name, secondary order
|
|
|
|
// of field index length. Loop over names; for each name, delete
|
|
|
|
// hidden fields by choosing the one dominant field that survives.
|
2012-09-10 21:31:40 -06:00
|
|
|
out := fields[:0]
|
2013-04-09 16:00:21 -06:00
|
|
|
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
|
|
// One iteration per name.
|
|
|
|
// Find the sequence of fields with the name of this first field.
|
|
|
|
fi := fields[i]
|
|
|
|
name := fi.name
|
|
|
|
for advance = 1; i+advance < len(fields); advance++ {
|
|
|
|
fj := fields[i+advance]
|
|
|
|
if fj.name != name {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if advance == 1 { // Only one field with this name
|
|
|
|
out = append(out, fi)
|
2012-09-10 21:31:40 -06:00
|
|
|
continue
|
|
|
|
}
|
2013-04-10 14:05:34 -06:00
|
|
|
dominant, ok := dominantField(fields[i : i+advance])
|
2013-04-09 16:00:21 -06:00
|
|
|
if ok {
|
|
|
|
out = append(out, dominant)
|
2012-09-10 21:31:40 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-09 16:00:21 -06:00
|
|
|
fields = out
|
2012-09-10 21:31:40 -06:00
|
|
|
sort.Sort(byIndex(fields))
|
|
|
|
|
|
|
|
return fields
|
|
|
|
}
|
|
|
|
|
2013-04-09 16:00:21 -06:00
|
|
|
// dominantField looks through the fields, all of which are known to
|
|
|
|
// have the same name, to find the single field that dominates the
|
|
|
|
// others using Go's embedding rules, modified by the presence of
|
|
|
|
// JSON tags. If there are multiple top-level fields, the boolean
|
|
|
|
// will be false: This condition is an error in Go and we skip all
|
|
|
|
// the fields.
|
2013-04-10 14:05:34 -06:00
|
|
|
func dominantField(fields []field) (field, bool) {
|
|
|
|
// The fields are sorted in increasing index-length order. The winner
|
|
|
|
// must therefore be one with the shortest index length. Drop all
|
|
|
|
// longer entries, which is easy: just truncate the slice.
|
|
|
|
length := len(fields[0].index)
|
|
|
|
tagged := -1 // Index of first tagged field.
|
|
|
|
for i, f := range fields {
|
|
|
|
if len(f.index) > length {
|
|
|
|
fields = fields[:i]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if f.tag {
|
|
|
|
if tagged >= 0 {
|
|
|
|
// Multiple tagged fields at the same level: conflict.
|
|
|
|
// Return no field.
|
|
|
|
return field{}, false
|
2013-04-09 16:00:21 -06:00
|
|
|
}
|
2013-04-10 14:05:34 -06:00
|
|
|
tagged = i
|
2013-04-09 16:00:21 -06:00
|
|
|
}
|
|
|
|
}
|
2013-04-10 14:05:34 -06:00
|
|
|
if tagged >= 0 {
|
|
|
|
return fields[tagged], true
|
|
|
|
}
|
|
|
|
// All remaining fields have the same length. If there's more than one,
|
|
|
|
// we have a conflict (two fields named "X" at the same level) and we
|
|
|
|
// return no field.
|
|
|
|
if len(fields) > 1 {
|
2013-04-09 16:00:21 -06:00
|
|
|
return field{}, false
|
|
|
|
}
|
|
|
|
return fields[0], true
|
|
|
|
}
|
|
|
|
|
2012-09-10 21:31:40 -06:00
|
|
|
var fieldCache struct {
|
|
|
|
sync.RWMutex
|
|
|
|
m map[reflect.Type][]field
|
|
|
|
}
|
|
|
|
|
|
|
|
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
|
|
func cachedTypeFields(t reflect.Type) []field {
|
|
|
|
fieldCache.RLock()
|
|
|
|
f := fieldCache.m[t]
|
|
|
|
fieldCache.RUnlock()
|
|
|
|
if f != nil {
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute fields without lock.
|
|
|
|
// Might duplicate effort but won't hold other computations back.
|
|
|
|
f = typeFields(t)
|
|
|
|
if f == nil {
|
|
|
|
f = []field{}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldCache.Lock()
|
|
|
|
if fieldCache.m == nil {
|
|
|
|
fieldCache.m = map[reflect.Type][]field{}
|
|
|
|
}
|
|
|
|
fieldCache.m[t] = f
|
|
|
|
fieldCache.Unlock()
|
|
|
|
return f
|
2011-11-21 08:49:14 -07:00
|
|
|
}
|