1
0
mirror of https://github.com/golang/go synced 2024-11-21 23:44:39 -07:00

reflect: faster FieldByName, FieldByNameFunc

The old code was a depth first graph traversal that could, under the
right conditions, end up re-exploring the same subgraphs multiple
times, once for each way to arrive at that subgraph at a given depth.

The new code uses a breadth first search to make sure that it only
visits each reachable embedded struct once.

Also add fast path for the trivial case.

benchmark                old ns/op    new ns/op    delta
BenchmarkFieldByName1         1321          187  -85.84%
BenchmarkFieldByName2         6118         5186  -15.23%
BenchmarkFieldByName3      8218553        42112  -99.49%

R=gri, r
CC=golang-dev
https://golang.org/cl/6458090
This commit is contained in:
Russ Cox 2012-09-05 09:35:53 -04:00
parent 845f4d6b47
commit 5e3224ce79
2 changed files with 270 additions and 73 deletions

View File

@ -1053,7 +1053,6 @@ func TestChan(t *testing.T) {
if l, m := cv.Len(), cv.Cap(); l != len(c) || m != cap(c) {
t.Errorf("Len/Cap = %d/%d want %d/%d", l, m, len(c), cap(c))
}
}
// Difficult test for function call because of
@ -1225,7 +1224,7 @@ func TestAnonymousFields(t *testing.T) {
var t1 T1
type1 := TypeOf(t1)
if field, ok = type1.FieldByName("int"); !ok {
t.Error("no field 'int'")
t.Fatal("no field 'int'")
}
if field.Index[0] != 1 {
t.Error("field index should be 1; is", field.Index)
@ -1282,6 +1281,47 @@ type S4 struct {
A int
}
// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
type S5 struct {
S6
S7
S8
}
type S6 struct {
X int
}
type S7 S6
type S8 struct {
S9
}
type S9 struct {
X int
Y int
}
// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
type S10 struct {
S11
S12
S13
}
type S11 struct {
S6
}
type S12 struct {
S6
}
type S13 struct {
S8
}
var fieldTests = []FTest{
{struct{}{}, "", nil, 0},
{struct{}{}, "Foo", nil, 0},
@ -1303,6 +1343,10 @@ var fieldTests = []FTest{
{S3{E: 'e'}, "E", []int{3}, 'e'},
{S4{A: 'a'}, "A", []int{1}, 'a'},
{S4{}, "B", nil, 0},
{S5{}, "X", nil, 0},
{S5{}, "Y", []int{2, 0, 1}, 0},
{S10{}, "X", nil, 0},
{S10{}, "Y", []int{2, 0, 0, 1}, 0},
}
func TestFieldByIndex(t *testing.T) {
@ -1346,7 +1390,7 @@ func TestFieldByName(t *testing.T) {
if test.index != nil {
// Verify field depth and index.
if len(f.Index) != len(test.index) {
t.Errorf("%s.%s depth %d; want %d", s.Name(), test.name, len(f.Index), len(test.index))
t.Errorf("%s.%s depth %d; want %d: %v vs %v", s.Name(), test.name, len(f.Index), len(test.index), f.Index, test.index)
} else {
for i, x := range f.Index {
if x != test.index[i] {
@ -1784,3 +1828,108 @@ func TestAlias(t *testing.T) {
t.Errorf("aliasing: old=%q new=%q, want hello, world", oldvalue, newvalue)
}
}
type B1 struct {
X int
Y int
Z int
}
func BenchmarkFieldByName1(b *testing.B) {
t := TypeOf(B1{})
for i := 0; i < b.N; i++ {
t.FieldByName("Z")
}
}
func BenchmarkFieldByName2(b *testing.B) {
t := TypeOf(S3{})
for i := 0; i < b.N; i++ {
t.FieldByName("B")
}
}
type R0 struct {
*R1
*R2
*R3
*R4
}
type R1 struct {
*R5
*R6
*R7
*R8
}
type R2 R1
type R3 R1
type R4 R1
type R5 struct {
*R9
*R10
*R11
*R12
}
type R6 R5
type R7 R5
type R8 R5
type R9 struct {
*R13
*R14
*R15
*R16
}
type R10 R9
type R11 R9
type R12 R9
type R13 struct {
*R17
*R18
*R19
*R20
}
type R14 R13
type R15 R13
type R16 R13
type R17 struct {
*R21
*R22
*R23
*R24
}
type R18 R17
type R19 R17
type R20 R17
type R21 struct {
X int
}
type R22 R21
type R23 R21
type R24 R21
func TestEmbed(t *testing.T) {
typ := TypeOf(R0{})
f, ok := typ.FieldByName("X")
if ok {
t.Fatalf(`FieldByName("X") should fail, returned %v`, f.Index)
}
}
func BenchmarkFieldByName3(b *testing.B) {
t := TypeOf(R0{})
for i := 0; i < b.N; i++ {
t.FieldByName("X")
}
}

View File

@ -837,92 +837,140 @@ func (t *structType) FieldByIndex(index []int) (f StructField) {
return
}
const inf = 1 << 30 // infinity - no struct has that many nesting levels
// A fieldScan represents an item on the fieldByNameFunc scan work list.
type fieldScan struct {
typ *structType
index []int
}
func (t *structType) fieldByNameFunc(match func(string) bool, mark map[*structType]bool, depth int) (ff StructField, fd int) {
fd = inf // field depth
// FieldByNameFunc returns the struct field with a name that satisfies the
// match function and a boolean to indicate if the field was found.
func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
// This uses the same condition that the Go language does: there must be a unique instance
// of the match at a given depth level. If there are multiple instances of a match at the
// same depth, they annihilate each other and inhibit any possible match at a lower level.
// The algorithm is breadth first search, one depth level at a time.
if mark[t] {
// Struct already seen.
return
}
mark[t] = true
// The current and next slices are work queues:
// current lists the fields to visit on this depth level,
// and next lists the fields on the next lower level.
current := []fieldScan{}
next := []fieldScan{{typ: t}}
var fi int // field index
n := 0 // number of matching fields at depth fd
L:
for i := range t.fields {
f := t.Field(i)
d := inf
switch {
case match(f.Name):
// Matching top-level field.
d = depth
case f.Anonymous:
ft := f.Type
if ft.Kind() == Ptr {
ft = ft.Elem()
// nextCount records the number of times an embedded type has been
// encountered and considered for queueing in the 'next' slice.
// We only queue the first one, but we increment the count on each.
// If a struct type T can be reached more than once at a given depth level,
// then it annihilates itself and need not be considered at all when we
// process that next depth level.
var nextCount map[*structType]int
// visited records the structs that have been considered already.
// Embedded pointer fields can create cycles in the graph of
// reachable embedded types; visited avoids following those cycles.
// It also avoids duplicated effort: if we didn't find the field in an
// embedded type T at level 2, we won't find it in one at level 4 either.
visited := map[*structType]bool{}
for len(next) > 0 {
current, next = next, current[:0]
count := nextCount
nextCount = nil
// Process all the fields at this depth, now listed in 'current'.
// The loop queues embedded fields found in 'next', for processing during the next
// iteration. The multiplicity of the 'current' field counts is recorded
// in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
for _, scan := range current {
t := scan.typ
if visited[t] {
// We've looked through this type before, at a higher level.
// That higher level would shadow the lower level we're now at,
// so this one can't be useful to us. Ignore it.
continue
}
switch {
case match(ft.Name()):
// Matching anonymous top-level field.
d = depth
case fd > depth:
// No top-level field yet; look inside nested structs.
if ft.Kind() == Struct {
st := (*structType)(unsafe.Pointer(ft.(*commonType)))
f, d = st.fieldByNameFunc(match, mark, depth+1)
visited[t] = true
for i := range t.fields {
f := &t.fields[i]
// Find name and type for field f.
var fname string
var ntyp *commonType
if f.name != nil {
fname = *f.name
} else {
// Anonymous field of type T or *T.
// Name taken from type.
ntyp = toCommonType(f.typ)
if ntyp.Kind() == Ptr {
ntyp = ntyp.Elem().common()
}
fname = ntyp.Name()
}
// Does it match?
if match(fname) {
// Potential match
if count[t] > 1 || ok {
// Name appeared multiple times at this level: annihilate.
return StructField{}, false
}
result = t.Field(i)
result.Index = nil
result.Index = append(result.Index, scan.index...)
result.Index = append(result.Index, i)
ok = true
continue
}
// Queue embedded struct fields for processing with next level,
// but only if we haven't seen a match yet at this level and only
// if the embedded types haven't alredy been queued.
if ok || ntyp == nil || ntyp.Kind() != Struct {
continue
}
styp := (*structType)(unsafe.Pointer(ntyp))
if nextCount[styp] > 0 {
nextCount[styp]++
continue
}
if nextCount == nil {
nextCount = map[*structType]int{}
}
nextCount[styp] = 1
var index []int
index = append(index, scan.index...)
index = append(index, i)
next = append(next, fieldScan{styp, index})
}
}
switch {
case d < fd:
// Found field at shallower depth.
ff, fi, fd = f, i, d
n = 1
case d == fd:
// More than one matching field at the same depth (or d, fd == inf).
// Same as no field found at this depth.
n++
if d == depth {
// Impossible to find a field at lower depth.
break L
}
if ok {
break
}
}
if n == 1 {
// Found matching field.
if depth >= len(ff.Index) {
ff.Index = make([]int, depth+1)
}
if len(ff.Index) > 1 {
ff.Index[depth] = fi
}
} else {
// None or more than one matching field found.
fd = inf
}
delete(mark, t)
return
}
// FieldByName returns the struct field with the given name
// and a boolean to indicate if the field was found.
func (t *structType) FieldByName(name string) (f StructField, present bool) {
return t.FieldByNameFunc(func(s string) bool { return s == name })
}
// FieldByNameFunc returns the struct field with a name that satisfies the
// match function and a boolean to indicate if the field was found.
func (t *structType) FieldByNameFunc(match func(string) bool) (f StructField, present bool) {
if ff, fd := t.fieldByNameFunc(match, make(map[*structType]bool), 0); fd < inf {
ff.Index = ff.Index[0 : fd+1]
f, present = ff, true
// Quick check for top-level name, or struct without anonymous fields.
hasAnon := false
if name != "" {
for i := range t.fields {
tf := &t.fields[i]
if tf.name == nil {
hasAnon = true
continue
}
if *tf.name == name {
return t.Field(i), true
}
}
}
return
if !hasAnon {
return
}
return t.FieldByNameFunc(func(s string) bool { return s == name })
}
// Convert runtime type to reflect type.