1
0
mirror of https://github.com/golang/go synced 2024-11-19 02:24:41 -07:00
go/ssa/sanity.go

466 lines
12 KiB
Go
Raw Normal View History

// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// An optional pass for sanity-checking invariants of the SSA representation.
// Currently it checks CFG invariants but little at the instruction level.
import (
"fmt"
"io"
"os"
"strings"
"code.google.com/p/go.tools/go/types"
)
type sanity struct {
reporter io.Writer
fn *Function
block *BasicBlock
insane bool
}
// sanityCheck performs integrity checking of the SSA representation
// of the function fn and returns true if it was valid. Diagnostics
// are written to reporter if non-nil, os.Stderr otherwise. Some
// diagnostics are only warnings and do not imply a negative result.
//
// Sanity-checking is intended to facilitate the debugging of code
// transformation passes.
//
func sanityCheck(fn *Function, reporter io.Writer) bool {
if reporter == nil {
reporter = os.Stderr
}
return (&sanity{reporter: reporter}).checkFunction(fn)
}
// mustSanityCheck is like sanityCheck but panics instead of returning
// a negative result.
//
func mustSanityCheck(fn *Function, reporter io.Writer) {
if !sanityCheck(fn, reporter) {
fn.DumpTo(os.Stderr)
panic("SanityCheck failed")
}
}
func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
if s.block != nil {
fmt.Fprintf(s.reporter, ", block %s", s.block)
}
io.WriteString(s.reporter, ": ")
fmt.Fprintf(s.reporter, format, args...)
io.WriteString(s.reporter, "\n")
}
func (s *sanity) errorf(format string, args ...interface{}) {
s.insane = true
s.diagnostic("Error", format, args...)
}
func (s *sanity) warnf(format string, args ...interface{}) {
s.diagnostic("Warning", format, args...)
}
// findDuplicate returns an arbitrary basic block that appeared more
// than once in blocks, or nil if all were unique.
func findDuplicate(blocks []*BasicBlock) *BasicBlock {
if len(blocks) < 2 {
return nil
}
if blocks[0] == blocks[1] {
return blocks[0]
}
// Slow path:
m := make(map[*BasicBlock]bool)
for _, b := range blocks {
if m[b] {
return b
}
m[b] = true
}
return nil
}
func (s *sanity) checkInstr(idx int, instr Instruction) {
switch instr := instr.(type) {
case *If, *Jump, *Return, *Panic:
s.errorf("control flow instruction not at end of block")
case *Phi:
if idx == 0 {
// It suffices to apply this check to just the first phi node.
if dup := findDuplicate(s.block.Preds); dup != nil {
s.errorf("phi node in block with duplicate predecessor %s", dup)
}
} else {
prev := s.block.Instrs[idx-1]
if _, ok := prev.(*Phi); !ok {
s.errorf("Phi instruction follows a non-Phi: %T", prev)
}
}
if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
s.errorf("phi node has %d edges but %d predecessors", ne, np)
} else {
for i, e := range instr.Edges {
if e == nil {
s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i])
}
}
}
case *Alloc:
if !instr.Heap {
found := false
for _, l := range s.fn.Locals {
if l == instr {
found = true
break
}
}
if !found {
s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
}
}
case *BinOp:
case *Call:
case *ChangeInterface:
case *ChangeType:
case *Convert:
if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
}
}
go.tools/ssa: implement correct control flow for recovered panic. A function such as this: func one() (x int) { defer func() { recover() }() x = 1 panic("return") } that combines named return parameters (NRPs) with deferred calls that call recover, may return non-zero values despite the fact it doesn't even contain a return statement. (!) This requires a change to the SSA API: all functions' control-flow graphs now have a second entry point, called Recover, which is the block at which control flow resumes after a recovered panic. The Recover block simply loads the NRPs and returns them. As an optimization, most functions don't need a Recover block, so it is omitted. In fact it is only needed for functions that have NRPs and defer a call to another function that _may_ call recover. Dataflow analysis of SSA now requires extra work, since every may-panic instruction has an implicit control-flow edge to the Recover block. The only dataflow analysis so far implemented is SSA renaming, for which we make the following simplifying assumption: the Recover block only loads the NRPs and returns. This means we don't really need to analyze it, we can just skip the "lifting" of such NRPs. We also special-case the Recover block in the dominance computation. Rejected alternative approaches: - Specifying a Recover block for every defer instruction (like a traditional exception handler). This seemed like excessive generality, since Go programs only need the same degenerate form of Recover block. - Adding an instruction to set the Recover block immediately after the named return values are set up, so that dominance can be computed without special-casing. This didn't seem worth the effort. Interpreter: - This CL completely reimplements the panic/recover/ defer logic in the interpreter. It's clearer and simpler and closer to the model in the spec. - Some runtime panic messages have been changed to be closer to gc's, since tests depend on it. - The interpreter now requires that the runtime.runtimeError type be part of the SSA program. This requires that clients import this package prior to invoking the interpreter. This in turn requires (Importer).ImportPackage(path string), which this CL adds. - All $GOROOT/test/recover{,1,2,3}.go tests are now passing. NB, the bug described in coverage.go (defer/recover in a concatenated init function) remains. Will be fixed in a follow-up. Fixes golang/go#6381 R=gri CC=crawshaw, golang-dev https://golang.org/cl/13844043
2013-10-14 13:38:56 -06:00
case *Defer:
case *Extract:
case *Field:
case *FieldAddr:
case *Go:
case *Index:
case *IndexAddr:
case *Lookup:
case *MakeChan:
case *MakeClosure:
numFree := len(instr.Fn.(*Function).FreeVars)
numBind := len(instr.Bindings)
if numFree != numBind {
s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
numBind, instr.Fn, numFree)
}
if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
s.errorf("MakeClosure's type includes receiver %s", recv.Type())
}
case *MakeInterface:
case *MakeMap:
case *MakeSlice:
case *MapUpdate:
case *Next:
case *Range:
case *RunDefers:
case *Select:
case *Send:
case *Slice:
case *Store:
case *TypeAssert:
case *UnOp:
go.tools/ssa: add debug information for all ast.Idents. This CL adds three new functions to determine the SSA Value for a given syntactic var, func or const object: Program.{Const,Func,Var}Value. Since constants and functions are immutable, the first two only need a types.Object; but each distinct reference to a var may return a distinct Value, so the third requires an ast.Ident parameter too. Debug information for local vars is encoded in the instruction stream in the form of DebugRef instructions, which are a no-op but relate their operand to a particular ident in the AST. The beauty of this approach is that it naturally stays consistent during optimisation passes (e.g. lifting) without additional bookkeeping. DebugRef instructions are only generated if the DebugMode builder flag is set; I plan to make the policy more fine- grained (per function). DebugRef instructions are inserted for: - expr(Ident) for rvalue idents - address.store() for idents that update an lvalue - address.address() for idents that take address of lvalue (this new method replaces all uses of lval.(address).addr) - expr() for all constant expressions - local ValueSpecs with implicit zero initialization (no RHS) (this case doesn't call store() or address()) To ensure we don't forget to emit debug info for uses of Idents, we must use the lvalue mechanism consistently. (Previously, many simple cases had effectively inlined these functions.) Similarly setCallFunc no longer inlines expr(Ident). Also: - Program.Value() has been inlined & specialized. - Program.Package() has moved nearer the new lookup functions. - refactoring: funcSyntax has lost paramFields, resultFields; gained funcType, which provides access to both. - add package-level constants to Package.values map. - opt: don't call localValueSpec for constants. (The resulting code is always optimised away.) There are a number of comments asking whether Literals should have positions. Will address in a follow-up. Added tests of all interesting cases. R=gri CC=golang-dev https://golang.org/cl/11259044
2013-07-15 11:56:46 -06:00
case *DebugRef:
// TODO(adonovan): implement checks.
default:
panic(fmt.Sprintf("Unknown instruction type: %T", instr))
}
// Check that value-defining instructions have valid types.
if v, ok := instr.(Value); ok {
t := v.Type()
if t == nil {
s.errorf("no type: %s = %s", v.Name(), v)
} else if t == tRangeIter {
// not a proper type; ignore.
} else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
}
}
// TODO(adonovan): sanity-check Consts used as instruction Operands(),
// e.g. reject Consts with "untyped" types.
//
// All other non-Instruction Values can be found via their
// enclosing Function or Package.
}
func (s *sanity) checkFinalInstr(idx int, instr Instruction) {
switch instr.(type) {
case *If:
if nsuccs := len(s.block.Succs); nsuccs != 2 {
s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
return
}
if s.block.Succs[0] == s.block.Succs[1] {
s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
return
}
case *Jump:
if nsuccs := len(s.block.Succs); nsuccs != 1 {
s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
return
}
case *Return:
if nsuccs := len(s.block.Succs); nsuccs != 0 {
s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
return
}
// TODO(adonovan): check number and types of results
case *Panic:
if nsuccs := len(s.block.Succs); nsuccs != 0 {
s.errorf("Panic-terminated block has %d successors; expected none", nsuccs)
return
}
default:
s.errorf("non-control flow instruction at end of block")
}
}
func (s *sanity) checkBlock(b *BasicBlock, index int) {
s.block = b
if b.Index != index {
s.errorf("block has incorrect Index %d", b.Index)
}
if b.parent != s.fn {
s.errorf("block has incorrect parent %s", b.parent)
}
// Check all blocks are reachable.
go.tools/ssa: implement correct control flow for recovered panic. A function such as this: func one() (x int) { defer func() { recover() }() x = 1 panic("return") } that combines named return parameters (NRPs) with deferred calls that call recover, may return non-zero values despite the fact it doesn't even contain a return statement. (!) This requires a change to the SSA API: all functions' control-flow graphs now have a second entry point, called Recover, which is the block at which control flow resumes after a recovered panic. The Recover block simply loads the NRPs and returns them. As an optimization, most functions don't need a Recover block, so it is omitted. In fact it is only needed for functions that have NRPs and defer a call to another function that _may_ call recover. Dataflow analysis of SSA now requires extra work, since every may-panic instruction has an implicit control-flow edge to the Recover block. The only dataflow analysis so far implemented is SSA renaming, for which we make the following simplifying assumption: the Recover block only loads the NRPs and returns. This means we don't really need to analyze it, we can just skip the "lifting" of such NRPs. We also special-case the Recover block in the dominance computation. Rejected alternative approaches: - Specifying a Recover block for every defer instruction (like a traditional exception handler). This seemed like excessive generality, since Go programs only need the same degenerate form of Recover block. - Adding an instruction to set the Recover block immediately after the named return values are set up, so that dominance can be computed without special-casing. This didn't seem worth the effort. Interpreter: - This CL completely reimplements the panic/recover/ defer logic in the interpreter. It's clearer and simpler and closer to the model in the spec. - Some runtime panic messages have been changed to be closer to gc's, since tests depend on it. - The interpreter now requires that the runtime.runtimeError type be part of the SSA program. This requires that clients import this package prior to invoking the interpreter. This in turn requires (Importer).ImportPackage(path string), which this CL adds. - All $GOROOT/test/recover{,1,2,3}.go tests are now passing. NB, the bug described in coverage.go (defer/recover in a concatenated init function) remains. Will be fixed in a follow-up. Fixes golang/go#6381 R=gri CC=crawshaw, golang-dev https://golang.org/cl/13844043
2013-10-14 13:38:56 -06:00
// (The entry block is always implicitly reachable,
// as is the Recover block, if any.)
if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 {
s.warnf("unreachable block")
if b.Instrs == nil {
// Since this block is about to be pruned,
// tolerating transient problems in it
// simplifies other optimizations.
return
}
}
// Check predecessor and successor relations are dual,
// and that all blocks in CFG belong to same function.
for _, a := range b.Preds {
found := false
for _, bb := range a.Succs {
if bb == b {
found = true
break
}
}
if !found {
s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
}
if a.parent != s.fn {
s.errorf("predecessor %s belongs to different function %s", a, a.parent)
}
}
for _, c := range b.Succs {
found := false
for _, bb := range c.Preds {
if bb == b {
found = true
break
}
}
if !found {
s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
}
if c.parent != s.fn {
s.errorf("successor %s belongs to different function %s", c, c.parent)
}
}
// Check each instruction is sane.
n := len(b.Instrs)
if n == 0 {
s.errorf("basic block contains no instructions")
}
var rands [10]*Value // reuse storage
for j, instr := range b.Instrs {
if instr == nil {
s.errorf("nil instruction at index %d", j)
continue
}
if b2 := instr.Block(); b2 == nil {
s.errorf("nil Block() for instruction at index %d", j)
continue
} else if b2 != b {
s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
continue
}
if j < n-1 {
s.checkInstr(j, instr)
} else {
s.checkFinalInstr(j, instr)
}
// Check Instruction.Operands.
operands:
for i, op := range instr.Operands(rands[:0]) {
if op == nil {
s.errorf("nil operand pointer %d of %s", i, instr)
continue
}
val := *op
if val == nil {
continue // a nil operand is ok
}
// Check that Operands that are also Instructions belong to same function.
// TODO(adonovan): also check their block dominates block b.
if val, ok := val.(Instruction); ok {
if val.Parent() != s.fn {
s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
}
}
// Check that each function-local operand of
// instr refers back to instr. (NB: quadratic)
switch val := val.(type) {
case *Const, *Global, *Builtin:
continue // not local
case *Function:
if val.Enclosing == nil {
continue // only anon functions are local
}
}
if refs := val.Referrers(); refs != nil {
for _, ref := range *refs {
if ref == instr {
continue operands
}
}
s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
} else {
s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
}
}
}
}
func (s *sanity) checkFunction(fn *Function) bool {
// TODO(adonovan): check Function invariants:
// - check params match signature
// - check transient fields are nil
// - warn if any fn.Locals do not appear among block instructions.
s.fn = fn
if fn.Prog == nil {
s.errorf("nil Prog")
}
fn.String() // must not crash
fn.RelString(fn.pkgobj()) // must not crash
go.tools/ssa: fix computation of set of types requiring method sets. Motivation: Previously, we assumed that the set of types for which a complete method set (containing all synthesized wrapper functions) is required at runtime was the set of types used as operands to some *ssa.MakeInterface instruction. In fact, this is an underapproximation because types can be derived from other ones via reflection, and some of these may need methods. The reflect.Type API allows *T to be derived from T, and these may have different method sets. Reflection also allows almost any subcomponent of a type to be accessed (with one exception: given T, defined 'type T struct{S}', you can reach S but not struct{S}). As a result, the pointer analysis was unable to generate all necessary constraints before running the solver, causing a crash when reflection derives types whose methods are unavailable. (A similar problem would afflict an ahead-of-time compiler based on ssa. The ssa/interp interpreter was immune only because it does not require all wrapper methods to be created before execution begins.) Description: This change causes the SSA builder to record, for each package, the set of all types with non-empty method sets that are referenced within that package. This set is accessed via Packages.TypesWithMethodSets(). Program.TypesWithMethodSets() returns its union across all packages. The set of references that matter are: - types of operands to some MakeInterface instruction (as before) - types of all exported package members - all subcomponents of the above, recursively. This is a conservative approximation to the set of types whose methods may be called dynamically. We define the owning package of a type as follows: - the owner of a named type is the package in which it is defined; - the owner of a pointer-to-named type is the owner of that named type; - the owner of all other types is nil. A package must include the method sets for all types that it owns, and all subcomponents of that type that are not owned by another package, recursively. Types with an owner appear in exactly one package; types with no owner (such as struct{T}) may appear within multiple packages. (A typical Go compiler would emit multiple copies of these methods as weak symbols; a typical linker would eliminate duplicates.) Also: - go/types/typemap: implement hash function for *Tuple. - pointer: generate nodes/constraints for all of ssa.Program.TypesWithMethodSets(). Add rtti.go regression test. - Add API test of Package.TypesWithMethodSets(). - Set Function.Pkg to nil (again) for wrapper functions, since these may be shared by many packages. - Remove a redundant logging statement. - Document that ssa CREATE phase is in fact sequential. Fixes golang/go#6605 R=gri CC=golang-dev https://golang.org/cl/14920056
2013-10-23 15:07:52 -06:00
// All functions have a package, except wrappers (which are
// shared across packages, or duplicated as weak symbols in a
// separate-compilation model), and error.Error.
if fn.Pkg == nil {
go.tools/ssa: fix computation of set of types requiring method sets. Motivation: Previously, we assumed that the set of types for which a complete method set (containing all synthesized wrapper functions) is required at runtime was the set of types used as operands to some *ssa.MakeInterface instruction. In fact, this is an underapproximation because types can be derived from other ones via reflection, and some of these may need methods. The reflect.Type API allows *T to be derived from T, and these may have different method sets. Reflection also allows almost any subcomponent of a type to be accessed (with one exception: given T, defined 'type T struct{S}', you can reach S but not struct{S}). As a result, the pointer analysis was unable to generate all necessary constraints before running the solver, causing a crash when reflection derives types whose methods are unavailable. (A similar problem would afflict an ahead-of-time compiler based on ssa. The ssa/interp interpreter was immune only because it does not require all wrapper methods to be created before execution begins.) Description: This change causes the SSA builder to record, for each package, the set of all types with non-empty method sets that are referenced within that package. This set is accessed via Packages.TypesWithMethodSets(). Program.TypesWithMethodSets() returns its union across all packages. The set of references that matter are: - types of operands to some MakeInterface instruction (as before) - types of all exported package members - all subcomponents of the above, recursively. This is a conservative approximation to the set of types whose methods may be called dynamically. We define the owning package of a type as follows: - the owner of a named type is the package in which it is defined; - the owner of a pointer-to-named type is the owner of that named type; - the owner of all other types is nil. A package must include the method sets for all types that it owns, and all subcomponents of that type that are not owned by another package, recursively. Types with an owner appear in exactly one package; types with no owner (such as struct{T}) may appear within multiple packages. (A typical Go compiler would emit multiple copies of these methods as weak symbols; a typical linker would eliminate duplicates.) Also: - go/types/typemap: implement hash function for *Tuple. - pointer: generate nodes/constraints for all of ssa.Program.TypesWithMethodSets(). Add rtti.go regression test. - Add API test of Package.TypesWithMethodSets(). - Set Function.Pkg to nil (again) for wrapper functions, since these may be shared by many packages. - Remove a redundant logging statement. - Document that ssa CREATE phase is in fact sequential. Fixes golang/go#6605 R=gri CC=golang-dev https://golang.org/cl/14920056
2013-10-23 15:07:52 -06:00
if strings.Contains(fn.Synthetic, "wrapper") ||
strings.HasSuffix(fn.name, "Error") {
go.tools/ssa: fix computation of set of types requiring method sets. Motivation: Previously, we assumed that the set of types for which a complete method set (containing all synthesized wrapper functions) is required at runtime was the set of types used as operands to some *ssa.MakeInterface instruction. In fact, this is an underapproximation because types can be derived from other ones via reflection, and some of these may need methods. The reflect.Type API allows *T to be derived from T, and these may have different method sets. Reflection also allows almost any subcomponent of a type to be accessed (with one exception: given T, defined 'type T struct{S}', you can reach S but not struct{S}). As a result, the pointer analysis was unable to generate all necessary constraints before running the solver, causing a crash when reflection derives types whose methods are unavailable. (A similar problem would afflict an ahead-of-time compiler based on ssa. The ssa/interp interpreter was immune only because it does not require all wrapper methods to be created before execution begins.) Description: This change causes the SSA builder to record, for each package, the set of all types with non-empty method sets that are referenced within that package. This set is accessed via Packages.TypesWithMethodSets(). Program.TypesWithMethodSets() returns its union across all packages. The set of references that matter are: - types of operands to some MakeInterface instruction (as before) - types of all exported package members - all subcomponents of the above, recursively. This is a conservative approximation to the set of types whose methods may be called dynamically. We define the owning package of a type as follows: - the owner of a named type is the package in which it is defined; - the owner of a pointer-to-named type is the owner of that named type; - the owner of all other types is nil. A package must include the method sets for all types that it owns, and all subcomponents of that type that are not owned by another package, recursively. Types with an owner appear in exactly one package; types with no owner (such as struct{T}) may appear within multiple packages. (A typical Go compiler would emit multiple copies of these methods as weak symbols; a typical linker would eliminate duplicates.) Also: - go/types/typemap: implement hash function for *Tuple. - pointer: generate nodes/constraints for all of ssa.Program.TypesWithMethodSets(). Add rtti.go regression test. - Add API test of Package.TypesWithMethodSets(). - Set Function.Pkg to nil (again) for wrapper functions, since these may be shared by many packages. - Remove a redundant logging statement. - Document that ssa CREATE phase is in fact sequential. Fixes golang/go#6605 R=gri CC=golang-dev https://golang.org/cl/14920056
2013-10-23 15:07:52 -06:00
// ok
} else {
s.errorf("nil Pkg")
}
}
if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
}
for i, l := range fn.Locals {
if l.Parent() != fn {
s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
}
if l.Heap {
s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
}
}
for i, p := range fn.Params {
if p.Parent() != fn {
s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
}
}
for i, fv := range fn.FreeVars {
if fv.Parent() != fn {
s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
}
}
if fn.Blocks != nil && len(fn.Blocks) == 0 {
// Function _had_ blocks (so it's not external) but
// they were "optimized" away, even the entry block.
s.errorf("Blocks slice is non-nil but empty")
}
for i, b := range fn.Blocks {
if b == nil {
s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
continue
}
s.checkBlock(b, i)
}
go.tools/ssa: implement correct control flow for recovered panic. A function such as this: func one() (x int) { defer func() { recover() }() x = 1 panic("return") } that combines named return parameters (NRPs) with deferred calls that call recover, may return non-zero values despite the fact it doesn't even contain a return statement. (!) This requires a change to the SSA API: all functions' control-flow graphs now have a second entry point, called Recover, which is the block at which control flow resumes after a recovered panic. The Recover block simply loads the NRPs and returns them. As an optimization, most functions don't need a Recover block, so it is omitted. In fact it is only needed for functions that have NRPs and defer a call to another function that _may_ call recover. Dataflow analysis of SSA now requires extra work, since every may-panic instruction has an implicit control-flow edge to the Recover block. The only dataflow analysis so far implemented is SSA renaming, for which we make the following simplifying assumption: the Recover block only loads the NRPs and returns. This means we don't really need to analyze it, we can just skip the "lifting" of such NRPs. We also special-case the Recover block in the dominance computation. Rejected alternative approaches: - Specifying a Recover block for every defer instruction (like a traditional exception handler). This seemed like excessive generality, since Go programs only need the same degenerate form of Recover block. - Adding an instruction to set the Recover block immediately after the named return values are set up, so that dominance can be computed without special-casing. This didn't seem worth the effort. Interpreter: - This CL completely reimplements the panic/recover/ defer logic in the interpreter. It's clearer and simpler and closer to the model in the spec. - Some runtime panic messages have been changed to be closer to gc's, since tests depend on it. - The interpreter now requires that the runtime.runtimeError type be part of the SSA program. This requires that clients import this package prior to invoking the interpreter. This in turn requires (Importer).ImportPackage(path string), which this CL adds. - All $GOROOT/test/recover{,1,2,3}.go tests are now passing. NB, the bug described in coverage.go (defer/recover in a concatenated init function) remains. Will be fixed in a follow-up. Fixes golang/go#6381 R=gri CC=crawshaw, golang-dev https://golang.org/cl/13844043
2013-10-14 13:38:56 -06:00
if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover {
s.errorf("Recover block is not in Blocks slice")
}
s.block = nil
for i, anon := range fn.AnonFuncs {
if anon.Enclosing != fn {
s.errorf("AnonFuncs[%d]=%s but %s.Enclosing=%s", i, anon, anon, anon.Enclosing)
}
}
s.fn = nil
return !s.insane
}
// sanityCheckPackage checks invariants of packages upon creation.
// It does not require that the package is built.
// Unlike sanityCheck (for functions), it just panics at the first error.
func sanityCheckPackage(pkg *Package) {
if pkg.Object == nil {
panic(fmt.Sprintf("Package %s has no Object", pkg))
}
pkg.String() // must not crash
for name, mem := range pkg.Members {
if name != mem.Name() {
panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
pkg.Object.Path(), mem, mem.Name(), name))
}
obj := mem.Object()
if obj == nil {
// This check is sound because fields
// {Global,Function}.object have type
// types.Object. (If they were declared as
// *types.{Var,Func}, we'd have a non-empty
// interface containing a nil pointer.)
continue // not all members have typechecker objects
}
if obj.Name() != name {
panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
pkg.Object.Path(), mem, obj.Name(), name))
}
if obj.Pos() != mem.Pos() {
panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos()))
}
}
}