mirror of
https://github.com/golang/go
synced 2024-11-18 18:44:42 -07:00
3b5de067a1
Core: reflect.TypeOf reflect.ValueOf reflect.Zero reflect.Value.Interface Maps: (reflect.Value).MapIndex (reflect.Value).MapKeys (reflect.Value).SetMapIndex (*reflect.rtype).Elem (*reflect.rtype).Key + tests: pointer/testdata/mapreflect.go. oracle/testdata/src/main/reflection.go. Interface objects (T, V...) have been renamed "tagged objects". Abstraction: we model reflect.Value similar to interface{}---as a pointer that points only to tagged objects---but a reflect.Value may also point to an "indirect tagged object", one in which the payload V is of type *T not T. These are required because reflect.Values can hold lvalues, e.g. when derived via Field() or Elem(), though we won't use them till we get to structs and pointers. Solving: each reflection intrinsic defines a new constraint and resolution rule. Because of the nature of reflection, generalizing across types, the resolution rules dynamically create additional complex constraints during solving, where previously only simple (copy) constraints were created. This requires some solver changes: The work done before the main solver loop (to attach new constraints to the graph) is now done before each iteration, in processNewConstraints. Its loop over constraints is broken into two passes: the first handles base (addr-of) constraints, the second handles simple and complex constraints. constraint.init() has been inlined. The only behaviour that varies across constraints is ptr() Sadly this will pessimize presolver optimisations, when we get there; such is the price of reflection. Objects: reflection intrinsics create objects (i.e. cause memory allocations) with no SSA operation. We will represent them as the cgnode of the instrinsic (e.g. reflect.New), so we extend Labels and node.data to represent objects as a product (not sum) of ssa.Value and cgnode and pull this out into its own type, struct object. This simplifies a number of invariants and saves space. The ntObject flag is now represented by obj!=nil; the other flags are moved into object. cgnodes are now always recorded in objects/Labels for which it is appropriate (all but those for globals, constants and the shared contours for functions). Also: - Prepopulate the flattenMemo cache to consider reflect.Value a fake pointer, not a struct. - Improve accessors and documentation on type Label. - @conctypes assertions renamed @types (since dyn. types needn't be concrete). - add oracle 'describe' test on an interface (missing, an oversight). R=crawshaw CC=golang-dev https://golang.org/cl/13418048
325 lines
8.1 KiB
Go
325 lines
8.1 KiB
Go
// Copyright 2013 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package pointer
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
|
|
"code.google.com/p/go.tools/go/types"
|
|
)
|
|
|
|
// CanPoint reports whether the type T is pointerlike,
|
|
// for the purposes of this analysis.
|
|
func CanPoint(T types.Type) bool {
|
|
switch T := T.(type) {
|
|
case *types.Named:
|
|
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
|
|
return true // treat reflect.Value like interface{}
|
|
}
|
|
return CanPoint(T.Underlying())
|
|
|
|
case *types.Pointer, *types.Interface, *types.Map, *types.Chan, *types.Signature, *types.Slice:
|
|
return true
|
|
}
|
|
|
|
return false // array struct tuple builtin basic
|
|
}
|
|
|
|
// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types,
|
|
// i.e. is an interface (incl. reflect.Type) or a reflect.Value.
|
|
//
|
|
func CanHaveDynamicTypes(T types.Type) bool {
|
|
switch T := T.(type) {
|
|
case *types.Named:
|
|
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
|
|
return true // reflect.Value
|
|
}
|
|
return CanHaveDynamicTypes(T.Underlying())
|
|
case *types.Interface:
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
// mustDeref returns the element type of its argument, which must be a
|
|
// pointer; panic ensues otherwise.
|
|
func mustDeref(typ types.Type) types.Type {
|
|
return typ.Underlying().(*types.Pointer).Elem()
|
|
}
|
|
|
|
// deref returns a pointer's element type; otherwise it returns typ.
|
|
func deref(typ types.Type) types.Type {
|
|
if p, ok := typ.Underlying().(*types.Pointer); ok {
|
|
return p.Elem()
|
|
}
|
|
return typ
|
|
}
|
|
|
|
// A fieldInfo describes one subelement (node) of the flattening-out
|
|
// of a type T: the subelement's type and its path from the root of T.
|
|
//
|
|
// For example, for this type:
|
|
// type line struct{ points []struct{x, y int} }
|
|
// flatten() of the inner struct yields the following []fieldInfo:
|
|
// struct{ x, y int } ""
|
|
// int ".x"
|
|
// int ".y"
|
|
// and flatten(line) yields:
|
|
// struct{ points []struct{x, y int} } ""
|
|
// struct{ x, y int } ".points[*]"
|
|
// int ".points[*].x
|
|
// int ".points[*].y"
|
|
//
|
|
type fieldInfo struct {
|
|
typ types.Type
|
|
|
|
// op and tail describe the path to the element (e.g. ".a#2.b[*].c").
|
|
op interface{} // *Array: true; *Tuple: int; *Struct: *types.Var; *Named: nil
|
|
tail *fieldInfo
|
|
}
|
|
|
|
// path returns a user-friendly string describing the subelement path.
|
|
//
|
|
func (fi *fieldInfo) path() string {
|
|
var buf bytes.Buffer
|
|
for p := fi; p != nil; p = p.tail {
|
|
switch op := p.op.(type) {
|
|
case bool:
|
|
fmt.Fprintf(&buf, "[*]")
|
|
case int:
|
|
fmt.Fprintf(&buf, "#%d", op)
|
|
case *types.Var:
|
|
fmt.Fprintf(&buf, ".%s", op.Name())
|
|
}
|
|
}
|
|
return buf.String()
|
|
}
|
|
|
|
// flatten returns a list of directly contained fields in the preorder
|
|
// traversal of the type tree of t. The resulting elements are all
|
|
// scalars (basic types or pointerlike types), except for struct/array
|
|
// "identity" nodes, whose type is that of the aggregate.
|
|
//
|
|
// reflect.Value is considered pointerlike, similar to interface{}.
|
|
//
|
|
// Callers must not mutate the result.
|
|
//
|
|
func (a *analysis) flatten(t types.Type) []*fieldInfo {
|
|
fl, ok := a.flattenMemo[t]
|
|
if !ok {
|
|
switch t := t.(type) {
|
|
case *types.Named:
|
|
u := t.Underlying()
|
|
if _, ok := u.(*types.Interface); ok {
|
|
// Debuggability hack: don't remove
|
|
// the named type from interfaces as
|
|
// they're very verbose.
|
|
fl = append(fl, &fieldInfo{typ: t})
|
|
} else {
|
|
fl = a.flatten(u)
|
|
}
|
|
|
|
case *types.Basic,
|
|
*types.Signature,
|
|
*types.Chan,
|
|
*types.Map,
|
|
*types.Interface,
|
|
*types.Slice,
|
|
*types.Pointer:
|
|
fl = append(fl, &fieldInfo{typ: t})
|
|
|
|
case *types.Array:
|
|
fl = append(fl, &fieldInfo{typ: t}) // identity node
|
|
for _, fi := range a.flatten(t.Elem()) {
|
|
fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi})
|
|
}
|
|
|
|
case *types.Struct:
|
|
fl = append(fl, &fieldInfo{typ: t}) // identity node
|
|
for i, n := 0, t.NumFields(); i < n; i++ {
|
|
f := t.Field(i)
|
|
for _, fi := range a.flatten(f.Type()) {
|
|
fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi})
|
|
}
|
|
}
|
|
|
|
case *types.Tuple:
|
|
// No identity node: tuples are never address-taken.
|
|
for i, n := 0, t.Len(); i < n; i++ {
|
|
f := t.At(i)
|
|
for _, fi := range a.flatten(f.Type()) {
|
|
fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi})
|
|
}
|
|
}
|
|
|
|
case *types.Builtin:
|
|
panic("flatten(*types.Builtin)") // not the type of any value
|
|
|
|
default:
|
|
panic(t)
|
|
}
|
|
|
|
a.flattenMemo[t] = fl
|
|
}
|
|
|
|
return fl
|
|
}
|
|
|
|
// sizeof returns the number of pointerlike abstractions (nodes) in the type t.
|
|
func (a *analysis) sizeof(t types.Type) uint32 {
|
|
return uint32(len(a.flatten(t)))
|
|
}
|
|
|
|
// offsetOf returns the (abstract) offset of field index within struct
|
|
// or tuple typ.
|
|
func (a *analysis) offsetOf(typ types.Type, index int) uint32 {
|
|
var offset uint32
|
|
switch t := typ.Underlying().(type) {
|
|
case *types.Tuple:
|
|
for i := 0; i < index; i++ {
|
|
offset += a.sizeof(t.At(i).Type())
|
|
}
|
|
case *types.Struct:
|
|
offset++ // the node for the struct itself
|
|
for i := 0; i < index; i++ {
|
|
offset += a.sizeof(t.Field(i).Type())
|
|
}
|
|
default:
|
|
panic(fmt.Sprintf("offsetOf(%s : %T)", typ, typ))
|
|
}
|
|
return offset
|
|
}
|
|
|
|
// sliceToArray returns the type representing the arrays to which
|
|
// slice type slice points.
|
|
func sliceToArray(slice types.Type) *types.Array {
|
|
return types.NewArray(slice.Underlying().(*types.Slice).Elem(), 1)
|
|
}
|
|
|
|
// Node set -------------------------------------------------------------------
|
|
|
|
// NB, mutator methods are attached to *nodeset.
|
|
// nodeset may be a reference, but its address matters!
|
|
type nodeset map[nodeid]struct{}
|
|
|
|
// ---- Accessors ----
|
|
|
|
func (ns nodeset) String() string {
|
|
var buf bytes.Buffer
|
|
buf.WriteRune('{')
|
|
var sep string
|
|
for n := range ns {
|
|
fmt.Fprintf(&buf, "%sn%d", sep, n)
|
|
sep = ", "
|
|
}
|
|
buf.WriteRune('}')
|
|
return buf.String()
|
|
}
|
|
|
|
// diff returns the set-difference x - y. nil => empty.
|
|
//
|
|
// TODO(adonovan): opt: extremely inefficient. BDDs do this in
|
|
// constant time. Sparse bitvectors are linear but very fast.
|
|
func (x nodeset) diff(y nodeset) nodeset {
|
|
var z nodeset
|
|
for k := range x {
|
|
if _, ok := y[k]; !ok {
|
|
z.add(k)
|
|
}
|
|
}
|
|
return z
|
|
}
|
|
|
|
// clone() returns an unaliased copy of x.
|
|
func (x nodeset) clone() nodeset {
|
|
return x.diff(nil)
|
|
}
|
|
|
|
// ---- Mutators ----
|
|
|
|
func (ns *nodeset) add(n nodeid) bool {
|
|
sz := len(*ns)
|
|
if *ns == nil {
|
|
*ns = make(nodeset)
|
|
}
|
|
(*ns)[n] = struct{}{}
|
|
return len(*ns) > sz
|
|
}
|
|
|
|
func (x *nodeset) addAll(y nodeset) bool {
|
|
if y == nil {
|
|
return false
|
|
}
|
|
sz := len(*x)
|
|
if *x == nil {
|
|
*x = make(nodeset)
|
|
}
|
|
for n := range y {
|
|
(*x)[n] = struct{}{}
|
|
}
|
|
return len(*x) > sz
|
|
}
|
|
|
|
// Constraint set -------------------------------------------------------------
|
|
|
|
type constraintset map[constraint]struct{}
|
|
|
|
func (cs *constraintset) add(c constraint) bool {
|
|
sz := len(*cs)
|
|
if *cs == nil {
|
|
*cs = make(constraintset)
|
|
}
|
|
(*cs)[c] = struct{}{}
|
|
return len(*cs) > sz
|
|
}
|
|
|
|
// Worklist -------------------------------------------------------------------
|
|
|
|
// TODO(adonovan): interface may not be general enough for certain
|
|
// implementations, e.g. priority queue
|
|
//
|
|
// Uses double-buffering so nodes can be added during iteration.
|
|
type worklist interface {
|
|
empty() bool // Reports whether active buffer is empty.
|
|
swap() bool // Switches to the shadow buffer if empty().
|
|
add(nodeid) // Adds a node to the shadow buffer.
|
|
take() nodeid // Takes a node from the active buffer. Precondition: !empty().
|
|
}
|
|
|
|
// Horribly naive (and nondeterministic) worklist
|
|
// based on two hash-sets.
|
|
type mapWorklist struct {
|
|
active, shadow nodeset
|
|
}
|
|
|
|
func (w *mapWorklist) empty() bool {
|
|
return len(w.active) == 0
|
|
}
|
|
|
|
func (w *mapWorklist) swap() bool {
|
|
if w.empty() {
|
|
w.shadow, w.active = w.active, w.shadow
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (w *mapWorklist) add(n nodeid) {
|
|
w.shadow[n] = struct{}{}
|
|
}
|
|
|
|
func (w *mapWorklist) take() nodeid {
|
|
for k := range w.active {
|
|
delete(w.active, k)
|
|
return k
|
|
}
|
|
panic("worklist.take(): empty active buffer")
|
|
}
|
|
|
|
func makeMapWorklist() worklist {
|
|
return &mapWorklist{make(nodeset), make(nodeset)}
|
|
}
|