1
0
mirror of https://github.com/golang/go synced 2024-11-12 01:10:21 -07:00

cmd/compile/internal/ssa: use reverse postorder traversal

Instead of the hand-written control flow analysis in debug info
generation, use a reverse postorder traversal, which is basically the
same thing. It should be slightly faster.

More importantly, the previous version simply gave up in the case of
non-reducible functions, and produced output that caused a later stage
to crash. It turns out that there's a non-reducible function in
compress/flate, so that wasn't a theoretical issue.

With this change, all blocks will be visited, even for non-reducible
functions.

Change-Id: Id47536764ee93203c6b4105a1a3013fe3265aa12
Reviewed-on: https://go-review.googlesource.com/73110
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
Heschi Kreinick 2017-10-19 18:59:41 -04:00
parent 81ec725607
commit 73f1a1a1a7
2 changed files with 6 additions and 47 deletions

View File

@ -606,7 +606,6 @@ var knownFormats = map[string]string{
"[16]byte %x": "",
"[]*cmd/compile/internal/gc.Node %v": "",
"[]*cmd/compile/internal/gc.Sig %#v": "",
"[]*cmd/compile/internal/ssa.Block %+v": "",
"[]*cmd/compile/internal/ssa.Value %v": "",
"[][]cmd/compile/internal/ssa.SlotID %v": "",
"[]byte %s": "",

View File

@ -274,30 +274,14 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug {
// Build up block states, starting with the first block, then
// processing blocks once their predecessors have been processed.
// TODO: use a reverse post-order traversal instead of the work queue.
// Location list entries for each block.
blockLocs := make([]*BlockDebug, f.NumBlocks())
// Work queue of blocks to visit. Some of them may already be processed.
work := []*Block{f.Entry}
for len(work) > 0 {
b := work[0]
work = work[1:]
if blockLocs[b.ID] != nil {
continue // already processed
}
if !state.predecessorsDone(b, blockLocs) {
continue // not ready yet
}
for _, edge := range b.Succs {
if blockLocs[edge.Block().ID] != nil {
continue
}
work = append(work, edge.Block())
}
// Reverse postorder: visit a block after as many as possible of its
// predecessors have been visited.
po := f.Postorder()
for i := len(po) - 1; i >= 0; i-- {
b := po[i]
// Build the starting state for the block from the final
// state of its predecessors.
@ -351,7 +335,7 @@ func BuildFuncDebug(f *Func, loggingEnabled bool) *FuncDebug {
last.End = BlockEnd
}
if state.loggingEnabled {
f.Logf("Block done: locs %v, regs %v. work = %+v\n", state.BlockString(locs), state.registerContents, work)
f.Logf("Block done: locs %v, regs %v\n", state.BlockString(locs), state.registerContents)
}
blockLocs[b.ID] = locs
}
@ -382,30 +366,6 @@ func isSynthetic(slot *LocalSlot) bool {
return c == '.' || c == '~'
}
// predecessorsDone reports whether block is ready to be processed.
func (state *debugState) predecessorsDone(b *Block, blockLocs []*BlockDebug) bool {
f := b.Func
for _, edge := range b.Preds {
// Ignore back branches, e.g. the continuation of a for loop.
// This may not work for functions with mutual gotos, which are not
// reducible, in which case debug information will be missing for any
// code after that point in the control flow.
if f.sdom().isAncestorEq(b, edge.b) {
if state.loggingEnabled {
f.Logf("ignoring back branch from %v to %v\n", edge.b, b)
}
continue // back branch
}
if blockLocs[edge.b.ID] == nil {
if state.loggingEnabled {
f.Logf("%v is not ready because %v isn't done\n", b, edge.b)
}
return false
}
}
return true
}
// mergePredecessors takes the end state of each of b's predecessors and
// intersects them to form the starting state for b.
// The registers slice (the second return value) will be reused for each call to mergePredecessors.