mirror of
https://github.com/golang/go
synced 2024-11-19 17:04:41 -07:00
cmd/compile: reimplement location list generation
Completely redesign and reimplement location list generation to be more efficient, and hopefully not too hard to understand. RegKills are gone. Instead of using the regalloc's liveness calculations, redo them using the Ops' clobber information. Besides saving a lot of Values, this avoids adding RegKills to blocks that would be empty otherwise, which was messing up optimizations. This does mean that it's much harder to tell whether the generation process is buggy (there's nothing to cross-check it with), and there may be disagreements with GC liveness. But the performance gain is significant, and it's nice not to be messing with earlier compiler phases. The intermediate representations are gone. Instead of producing ssa.BlockDebugs, then dwarf.LocationLists, and then finally real location lists, go directly from the SSA to a (mostly) real location list. Because the SSA analysis happens before assembly, it stores encoded block/value IDs where PCs would normally go. It would be easier to do the SSA analysis after assembly, but I didn't want to retain the SSA just for that. Generation proceeds in two phases: first, it traverses the function in CFG order, storing the state of the block at the beginning and end. End states are used to produce the start states of the successor blocks. In the second phase, it traverses in program text order and produces the location lists. The processing in the second phase is redundant, but much cheaper than storing the intermediate representation. It might be possible to combine the two phases somewhat to take advantage of cases where the CFG matches the block layout, but I haven't tried. Location lists are finalized by adding a base address selection entry, translating each encoded block/value ID to a real PC, and adding the terminating zero entry. This probably won't work on OSX, where dsymutil will choke on the base address selection. I tried emitting CU-relative relocations for each address, and it was *very* bad for performance -- it uses more memory storing all the relocations than it does for the actual location list bytes. I think I'm going to end up synthesizing the relocations in the linker only on OSX, but TBD. TestNexting needs updating: with more optimizations working, the debugger doesn't stop on the continue (line 88) any more, and the test's duplicate suppression kicks in. Also, dx and dy live a little longer now, but they have the correct values. Change-Id: Ie772dfe23a4e389ca573624fac4d05401ae32307 Reviewed-on: https://go-review.googlesource.com/89356 Run-TryBot: Heschi Kreinick <heschi@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
7d7af6106f
commit
2075a9323d
@ -572,13 +572,12 @@ var knownFormats = map[string]string{
|
||||
"*cmd/compile/internal/ssa.Block %v": "",
|
||||
"*cmd/compile/internal/ssa.Func %s": "",
|
||||
"*cmd/compile/internal/ssa.Func %v": "",
|
||||
"*cmd/compile/internal/ssa.LocalSlot %+v": "",
|
||||
"*cmd/compile/internal/ssa.LocalSlot %v": "",
|
||||
"*cmd/compile/internal/ssa.Register %s": "",
|
||||
"*cmd/compile/internal/ssa.Register %v": "",
|
||||
"*cmd/compile/internal/ssa.SparseTreeNode %v": "",
|
||||
"*cmd/compile/internal/ssa.Value %s": "",
|
||||
"*cmd/compile/internal/ssa.Value %v": "",
|
||||
"*cmd/compile/internal/ssa.VarLoc %v": "",
|
||||
"*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "",
|
||||
"*cmd/compile/internal/types.Field %p": "",
|
||||
"*cmd/compile/internal/types.Field %v": "",
|
||||
@ -597,7 +596,6 @@ var knownFormats = map[string]string{
|
||||
"*cmd/compile/internal/types.Type %p": "",
|
||||
"*cmd/compile/internal/types.Type %s": "",
|
||||
"*cmd/compile/internal/types.Type %v": "",
|
||||
"*cmd/internal/dwarf.Location %#v": "",
|
||||
"*cmd/internal/obj.Addr %v": "",
|
||||
"*cmd/internal/obj.LSym %v": "",
|
||||
"*math/big.Int %#x": "",
|
||||
@ -605,13 +603,12 @@ var knownFormats = map[string]string{
|
||||
"[16]byte %x": "",
|
||||
"[]*cmd/compile/internal/gc.Node %v": "",
|
||||
"[]*cmd/compile/internal/gc.Sig %#v": "",
|
||||
"[]*cmd/compile/internal/ssa.Block %v": "",
|
||||
"[]*cmd/compile/internal/ssa.Value %v": "",
|
||||
"[][]cmd/compile/internal/ssa.SlotID %v": "",
|
||||
"[]byte %s": "",
|
||||
"[]byte %x": "",
|
||||
"[]cmd/compile/internal/ssa.Edge %v": "",
|
||||
"[]cmd/compile/internal/ssa.ID %v": "",
|
||||
"[]cmd/compile/internal/ssa.VarLocList %v": "",
|
||||
"[]cmd/compile/internal/syntax.token %s": "",
|
||||
"[]string %v": "",
|
||||
"bool %v": "",
|
||||
@ -637,18 +634,17 @@ var knownFormats = map[string]string{
|
||||
"cmd/compile/internal/gc.Val %v": "",
|
||||
"cmd/compile/internal/gc.fmtMode %d": "",
|
||||
"cmd/compile/internal/gc.initKind %d": "",
|
||||
"cmd/compile/internal/gc.locID %v": "",
|
||||
"cmd/compile/internal/ssa.BranchPrediction %d": "",
|
||||
"cmd/compile/internal/ssa.Edge %v": "",
|
||||
"cmd/compile/internal/ssa.GCNode %v": "",
|
||||
"cmd/compile/internal/ssa.ID %d": "",
|
||||
"cmd/compile/internal/ssa.ID %v": "",
|
||||
"cmd/compile/internal/ssa.LocalSlot %s": "",
|
||||
"cmd/compile/internal/ssa.LocalSlot %v": "",
|
||||
"cmd/compile/internal/ssa.Location %s": "",
|
||||
"cmd/compile/internal/ssa.Op %s": "",
|
||||
"cmd/compile/internal/ssa.Op %v": "",
|
||||
"cmd/compile/internal/ssa.ValAndOff %s": "",
|
||||
"cmd/compile/internal/ssa.VarLocList %v": "",
|
||||
"cmd/compile/internal/ssa.rbrank %d": "",
|
||||
"cmd/compile/internal/ssa.regMask %d": "",
|
||||
"cmd/compile/internal/ssa.register %d": "",
|
||||
@ -663,7 +659,6 @@ var knownFormats = map[string]string{
|
||||
"cmd/compile/internal/types.EType %d": "",
|
||||
"cmd/compile/internal/types.EType %s": "",
|
||||
"cmd/compile/internal/types.EType %v": "",
|
||||
"cmd/internal/dwarf.Location %#v": "",
|
||||
"cmd/internal/src.Pos %s": "",
|
||||
"cmd/internal/src.Pos %v": "",
|
||||
"error %v": "",
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"cmd/internal/src"
|
||||
"cmd/internal/sys"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -304,8 +303,6 @@ func compileFunctions() {
|
||||
|
||||
func debuginfo(fnsym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
|
||||
fn := curfn.(*Node)
|
||||
debugInfo := fn.Func.DebugInfo
|
||||
fn.Func.DebugInfo = nil
|
||||
if fn.Func.Nname != nil {
|
||||
if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect {
|
||||
Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
|
||||
@ -344,7 +341,7 @@ func debuginfo(fnsym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCall
|
||||
})
|
||||
}
|
||||
|
||||
decls, dwarfVars := createDwarfVars(fnsym, debugInfo, automDecls)
|
||||
decls, dwarfVars := createDwarfVars(fnsym, fn.Func, automDecls)
|
||||
|
||||
var varScopes []ScopeID
|
||||
for _, decl := range decls {
|
||||
@ -437,65 +434,24 @@ func createSimpleVars(automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool
|
||||
return decls, vars, selected
|
||||
}
|
||||
|
||||
type varPart struct {
|
||||
varOffset int64
|
||||
slot ssa.SlotID
|
||||
}
|
||||
|
||||
func createComplexVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
|
||||
for _, blockDebug := range debugInfo.Blocks {
|
||||
for _, locList := range blockDebug.Variables {
|
||||
for _, loc := range locList.Locations {
|
||||
if loc.StartProg != nil {
|
||||
loc.StartPC = loc.StartProg.Pc
|
||||
}
|
||||
if loc.EndProg != nil {
|
||||
loc.EndPC = loc.EndProg.Pc
|
||||
} else {
|
||||
loc.EndPC = fnsym.Size
|
||||
}
|
||||
if Debug_locationlist == 0 {
|
||||
loc.EndProg = nil
|
||||
loc.StartProg = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Group SSA variables by the user variable they were decomposed from.
|
||||
varParts := map[*Node][]varPart{}
|
||||
ssaVars := make(map[*Node]bool)
|
||||
for slotID, slot := range debugInfo.VarSlots {
|
||||
for slot.SplitOf != nil {
|
||||
slot = slot.SplitOf
|
||||
}
|
||||
n := slot.N.(*Node)
|
||||
ssaVars[n] = true
|
||||
varParts[n] = append(varParts[n], varPart{varOffset(slot), ssa.SlotID(slotID)})
|
||||
}
|
||||
// createComplexVars creates recomposed DWARF vars with location lists,
|
||||
// suitable for describing optimized code.
|
||||
func createComplexVars(fnsym *obj.LSym, fn *Func, automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) {
|
||||
debugInfo := fn.DebugInfo
|
||||
|
||||
// Produce a DWARF variable entry for each user variable.
|
||||
// Don't iterate over the map -- that's nondeterministic, and
|
||||
// createComplexVar has side effects. Instead, go by slot.
|
||||
var decls []*Node
|
||||
var vars []*dwarf.Var
|
||||
for _, slot := range debugInfo.VarSlots {
|
||||
for slot.SplitOf != nil {
|
||||
slot = slot.SplitOf
|
||||
}
|
||||
n := slot.N.(*Node)
|
||||
parts := varParts[n]
|
||||
if parts == nil {
|
||||
continue
|
||||
}
|
||||
// Don't work on this variable again, no matter how many slots it has.
|
||||
delete(varParts, n)
|
||||
ssaVars := make(map[*Node]bool)
|
||||
|
||||
// Get the order the parts need to be in to represent the memory
|
||||
// of the decomposed user variable.
|
||||
sort.Sort(partsByVarOffset(parts))
|
||||
for varID := range debugInfo.Vars {
|
||||
n := debugInfo.Vars[varID].(*Node)
|
||||
ssaVars[n] = true
|
||||
for _, slot := range debugInfo.VarSlots[varID] {
|
||||
ssaVars[debugInfo.Slots[slot].N.(*Node)] = true
|
||||
}
|
||||
|
||||
if dvar := createComplexVar(debugInfo, n, parts); dvar != nil {
|
||||
if dvar := createComplexVar(fn, ssa.VarID(varID)); dvar != nil {
|
||||
decls = append(decls, n)
|
||||
vars = append(vars, dvar)
|
||||
}
|
||||
@ -504,13 +460,15 @@ func createComplexVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*
|
||||
return decls, vars, ssaVars
|
||||
}
|
||||
|
||||
func createDwarfVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var) {
|
||||
// createDwarfVars process fn, returning a list of DWARF variables and the
|
||||
// Nodes they represent.
|
||||
func createDwarfVars(fnsym *obj.LSym, fn *Func, automDecls []*Node) ([]*Node, []*dwarf.Var) {
|
||||
// Collect a raw list of DWARF vars.
|
||||
var vars []*dwarf.Var
|
||||
var decls []*Node
|
||||
var selected map[*Node]bool
|
||||
if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && debugInfo != nil {
|
||||
decls, vars, selected = createComplexVars(fnsym, debugInfo, automDecls)
|
||||
if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && fn.DebugInfo != nil {
|
||||
decls, vars, selected = createComplexVars(fnsym, fn, automDecls)
|
||||
} else {
|
||||
decls, vars, selected = createSimpleVars(automDecls)
|
||||
}
|
||||
@ -635,22 +593,6 @@ func (s byNodeName) Len() int { return len(s) }
|
||||
func (s byNodeName) Less(i, j int) bool { return cmpNodeName(s[i], s[j]) }
|
||||
func (s byNodeName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// varOffset returns the offset of slot within the user variable it was
|
||||
// decomposed from. This has nothing to do with its stack offset.
|
||||
func varOffset(slot *ssa.LocalSlot) int64 {
|
||||
offset := slot.Off
|
||||
for ; slot.SplitOf != nil; slot = slot.SplitOf {
|
||||
offset += slot.SplitOffset
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
type partsByVarOffset []varPart
|
||||
|
||||
func (a partsByVarOffset) Len() int { return len(a) }
|
||||
func (a partsByVarOffset) Less(i, j int) bool { return a[i].varOffset < a[j].varOffset }
|
||||
func (a partsByVarOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
// stackOffset returns the stack location of a LocalSlot relative to the
|
||||
// stack pointer, suitable for use in a DWARF location entry. This has nothing
|
||||
// to do with its offset in the user variable.
|
||||
@ -671,24 +613,17 @@ func stackOffset(slot *ssa.LocalSlot) int32 {
|
||||
return int32(base + n.Xoffset + slot.Off)
|
||||
}
|
||||
|
||||
// createComplexVar builds a DWARF variable entry and location list representing n.
|
||||
func createComplexVar(debugInfo *ssa.FuncDebug, n *Node, parts []varPart) *dwarf.Var {
|
||||
slots := debugInfo.Slots
|
||||
var offs int64 // base stack offset for this kind of variable
|
||||
// createComplexVar builds a single DWARF variable entry and location list.
|
||||
func createComplexVar(fn *Func, varID ssa.VarID) *dwarf.Var {
|
||||
debug := fn.DebugInfo
|
||||
n := debug.Vars[varID].(*Node)
|
||||
|
||||
var abbrev int
|
||||
switch n.Class() {
|
||||
case PAUTO:
|
||||
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
|
||||
if Ctxt.FixedFrameSize() == 0 {
|
||||
offs -= int64(Widthptr)
|
||||
}
|
||||
if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) {
|
||||
offs -= int64(Widthptr)
|
||||
}
|
||||
|
||||
case PPARAM, PPARAMOUT:
|
||||
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
|
||||
offs += Ctxt.FixedFrameSize()
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@ -712,196 +647,20 @@ func createComplexVar(debugInfo *ssa.FuncDebug, n *Node, parts []varPart) *dwarf
|
||||
Abbrev: abbrev,
|
||||
Type: Ctxt.Lookup(typename),
|
||||
// The stack offset is used as a sorting key, so for decomposed
|
||||
// variables just give it the lowest one. It's not used otherwise.
|
||||
// variables just give it the first one. It's not used otherwise.
|
||||
// This won't work well if the first slot hasn't been assigned a stack
|
||||
// location, but it's not obvious how to do better.
|
||||
StackOffset: int32(stackOffset(slots[parts[0].slot])),
|
||||
StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
|
||||
DeclFile: declpos.Base().SymFilename(),
|
||||
DeclLine: declpos.Line(),
|
||||
DeclCol: declpos.Col(),
|
||||
InlIndex: int32(inlIndex),
|
||||
ChildIndex: -1,
|
||||
}
|
||||
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("Building location list for %+v. Parts:\n", n)
|
||||
for _, part := range parts {
|
||||
Ctxt.Logf("\t%v => %v\n", debugInfo.Slots[part.slot], debugInfo.SlotLocsString(part.slot))
|
||||
}
|
||||
}
|
||||
|
||||
// Given a variable that's been decomposed into multiple parts,
|
||||
// its location list may need a new entry after the beginning or
|
||||
// end of every location entry for each of its parts. For example:
|
||||
//
|
||||
// [variable] [pc range]
|
||||
// string.ptr |----|-----| |----|
|
||||
// string.len |------------| |--|
|
||||
// ... needs a location list like:
|
||||
// string |----|-----|-| |--|-|
|
||||
//
|
||||
// Note that location entries may or may not line up with each other,
|
||||
// and some of the result will only have one or the other part.
|
||||
//
|
||||
// To build the resulting list:
|
||||
// - keep a "current" pointer for each part
|
||||
// - find the next transition point
|
||||
// - advance the current pointer for each part up to that transition point
|
||||
// - build the piece for the range between that transition point and the next
|
||||
// - repeat
|
||||
|
||||
type locID struct {
|
||||
block int
|
||||
loc int
|
||||
}
|
||||
findLoc := func(part varPart, id locID) *ssa.VarLoc {
|
||||
if id.block >= len(debugInfo.Blocks) {
|
||||
return nil
|
||||
}
|
||||
return debugInfo.Blocks[id.block].Variables[part.slot].Locations[id.loc]
|
||||
}
|
||||
nextLoc := func(part varPart, id locID) (locID, *ssa.VarLoc) {
|
||||
// Check if there's another loc in this block
|
||||
id.loc++
|
||||
if b := debugInfo.Blocks[id.block]; b != nil && id.loc < len(b.Variables[part.slot].Locations) {
|
||||
return id, findLoc(part, id)
|
||||
}
|
||||
// Find the next block that has a loc for this part.
|
||||
id.loc = 0
|
||||
id.block++
|
||||
for ; id.block < len(debugInfo.Blocks); id.block++ {
|
||||
if b := debugInfo.Blocks[id.block]; b != nil && len(b.Variables[part.slot].Locations) != 0 {
|
||||
return id, findLoc(part, id)
|
||||
}
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
curLoc := make([]locID, len(slots))
|
||||
// Position each pointer at the first entry for its slot.
|
||||
for _, part := range parts {
|
||||
if b := debugInfo.Blocks[0]; b != nil && len(b.Variables[part.slot].Locations) != 0 {
|
||||
// Block 0 has an entry; no need to advance.
|
||||
continue
|
||||
}
|
||||
curLoc[part.slot], _ = nextLoc(part, curLoc[part.slot])
|
||||
}
|
||||
|
||||
// findBoundaryAfter finds the next beginning or end of a piece after currentPC.
|
||||
findBoundaryAfter := func(currentPC int64) int64 {
|
||||
min := int64(math.MaxInt64)
|
||||
for _, part := range parts {
|
||||
// For each part, find the first PC greater than current. Doesn't
|
||||
// matter if it's a start or an end, since we're looking for any boundary.
|
||||
// If it's the new winner, save it.
|
||||
onePart:
|
||||
for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) {
|
||||
for _, pc := range [2]int64{loc.StartPC, loc.EndPC} {
|
||||
if pc > currentPC {
|
||||
if pc < min {
|
||||
min = pc
|
||||
}
|
||||
break onePart
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
||||
var start int64
|
||||
end := findBoundaryAfter(0)
|
||||
for {
|
||||
// Advance to the next chunk.
|
||||
start = end
|
||||
end = findBoundaryAfter(start)
|
||||
if end == math.MaxInt64 {
|
||||
break
|
||||
}
|
||||
|
||||
dloc := dwarf.Location{StartPC: start, EndPC: end}
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("Processing range %x -> %x\n", start, end)
|
||||
}
|
||||
|
||||
// Advance curLoc to the last location that starts before/at start.
|
||||
// After this loop, if there's a location that covers [start, end), it will be current.
|
||||
// Otherwise the current piece will be too early.
|
||||
for _, part := range parts {
|
||||
choice := locID{-1, -1}
|
||||
for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) {
|
||||
if loc.StartPC > start {
|
||||
break //overshot
|
||||
}
|
||||
choice = i // best yet
|
||||
}
|
||||
if choice.block != -1 {
|
||||
curLoc[part.slot] = choice
|
||||
}
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("\t %v => %v", slots[part.slot], curLoc[part.slot])
|
||||
}
|
||||
}
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("\n")
|
||||
}
|
||||
// Assemble the location list entry for this chunk.
|
||||
present := 0
|
||||
for _, part := range parts {
|
||||
dpiece := dwarf.Piece{
|
||||
Length: slots[part.slot].Type.Size(),
|
||||
}
|
||||
loc := findLoc(part, curLoc[part.slot])
|
||||
if loc == nil || start >= loc.EndPC || end <= loc.StartPC {
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("\t%v: missing", slots[part.slot])
|
||||
}
|
||||
dpiece.Missing = true
|
||||
dloc.Pieces = append(dloc.Pieces, dpiece)
|
||||
continue
|
||||
}
|
||||
present++
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("\t%v: %v", slots[part.slot], debugInfo.Blocks[curLoc[part.slot].block].LocString(loc))
|
||||
}
|
||||
if loc.OnStack {
|
||||
dpiece.OnStack = true
|
||||
dpiece.StackOffset = stackOffset(slots[loc.StackLocation])
|
||||
} else {
|
||||
for reg := 0; reg < len(debugInfo.Registers); reg++ {
|
||||
if loc.Registers&(1<<uint8(reg)) != 0 {
|
||||
dpiece.RegNum = Ctxt.Arch.DWARFRegisters[debugInfo.Registers[reg].ObjNum()]
|
||||
}
|
||||
}
|
||||
}
|
||||
dloc.Pieces = append(dloc.Pieces, dpiece)
|
||||
}
|
||||
if present == 0 {
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf(" -> totally missing\n")
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Extend the previous entry if possible.
|
||||
if len(dvar.LocationList) > 0 {
|
||||
prev := &dvar.LocationList[len(dvar.LocationList)-1]
|
||||
if prev.EndPC == dloc.StartPC && len(prev.Pieces) == len(dloc.Pieces) {
|
||||
equal := true
|
||||
for i := range prev.Pieces {
|
||||
if prev.Pieces[i] != dloc.Pieces[i] {
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if equal {
|
||||
prev.EndPC = end
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("-> merged with previous, now %#v\n", prev)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
dvar.LocationList = append(dvar.LocationList, dloc)
|
||||
if Debug_locationlist != 0 {
|
||||
Ctxt.Logf("-> added: %#v\n", dloc)
|
||||
list := debug.LocationLists[varID]
|
||||
if len(list) != 0 {
|
||||
dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
|
||||
debug.PutLocationList(list, Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
|
||||
}
|
||||
}
|
||||
return dvar
|
||||
|
@ -4652,15 +4652,14 @@ func genssa(f *ssa.Func, pp *Progs) {
|
||||
|
||||
s.ScratchFpMem = e.scratchFpMem
|
||||
|
||||
logLocationLists := Debug_locationlist != 0
|
||||
if Ctxt.Flag_locationlists {
|
||||
e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists)
|
||||
valueToProgAfter = make([]*obj.Prog, f.NumValues())
|
||||
}
|
||||
|
||||
// Emit basic blocks
|
||||
for i, b := range f.Blocks {
|
||||
s.bstart[b.ID] = s.pp.next
|
||||
|
||||
// Emit values in block
|
||||
thearch.SSAMarkMoves(&s, b)
|
||||
for _, v := range b.Values {
|
||||
@ -4698,8 +4697,6 @@ func genssa(f *ssa.Func, pp *Progs) {
|
||||
}
|
||||
case ssa.OpPhi:
|
||||
CheckLoweredPhi(v)
|
||||
case ssa.OpRegKill:
|
||||
// nothing to do
|
||||
default:
|
||||
// let the backend handle it
|
||||
thearch.SSAGenValue(&s, v)
|
||||
@ -4708,12 +4705,14 @@ func genssa(f *ssa.Func, pp *Progs) {
|
||||
if Ctxt.Flag_locationlists {
|
||||
valueToProgAfter[v.ID] = s.pp.next
|
||||
}
|
||||
|
||||
if logProgs {
|
||||
for ; x != s.pp.next; x = x.Link {
|
||||
progToValue[x] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Emit control flow instructions for block
|
||||
var next *ssa.Block
|
||||
if i < len(f.Blocks)-1 && Debug['N'] == 0 {
|
||||
@ -4734,41 +4733,19 @@ func genssa(f *ssa.Func, pp *Progs) {
|
||||
}
|
||||
|
||||
if Ctxt.Flag_locationlists {
|
||||
for i := range f.Blocks {
|
||||
blockDebug := e.curfn.Func.DebugInfo.Blocks[i]
|
||||
for _, locList := range blockDebug.Variables {
|
||||
for _, loc := range locList.Locations {
|
||||
if loc.Start == ssa.BlockStart {
|
||||
loc.StartProg = s.bstart[f.Blocks[i].ID]
|
||||
} else {
|
||||
loc.StartProg = valueToProgAfter[loc.Start.ID]
|
||||
}
|
||||
if loc.End == nil {
|
||||
Fatalf("empty loc %v compiling %v", loc, f.Name)
|
||||
}
|
||||
|
||||
if loc.End == ssa.BlockEnd {
|
||||
// If this variable was live at the end of the block, it should be
|
||||
// live over the control flow instructions. Extend it up to the
|
||||
// beginning of the next block.
|
||||
// If this is the last block, then there's no Prog to use for it, and
|
||||
// EndProg is unset.
|
||||
if i < len(f.Blocks)-1 {
|
||||
loc.EndProg = s.bstart[f.Blocks[i+1].ID]
|
||||
}
|
||||
} else {
|
||||
// Advance the "end" forward by one; the end-of-range doesn't take effect
|
||||
// until the instruction actually executes.
|
||||
loc.EndProg = valueToProgAfter[loc.End.ID].Link
|
||||
if loc.EndProg == nil {
|
||||
Fatalf("nil loc.EndProg compiling %v, loc=%v", f.Name, loc)
|
||||
}
|
||||
}
|
||||
if !logLocationLists {
|
||||
loc.Start = nil
|
||||
loc.End = nil
|
||||
}
|
||||
}
|
||||
e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
|
||||
bstart := s.bstart
|
||||
// Note that at this moment, Prog.Pc is a sequence number; it's
|
||||
// not a real PC until after assembly, so this mapping has to
|
||||
// be done later.
|
||||
e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
|
||||
switch v {
|
||||
case ssa.BlockStart.ID:
|
||||
return int64(bstart[b].Pc)
|
||||
case ssa.BlockEnd.ID:
|
||||
return int64(e.curfn.Func.lsym.Size)
|
||||
default:
|
||||
return int64(valueToProgAfter[v].Pc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,11 +14,6 @@ type Cache struct {
|
||||
blocks [200]Block
|
||||
locs [2000]Location
|
||||
|
||||
// Storage for DWARF variable locations. Lazily allocated
|
||||
// since location lists are off by default.
|
||||
varLocs []VarLoc
|
||||
curVarLoc int
|
||||
|
||||
// Reusable stackAllocState.
|
||||
// See stackalloc.go's {new,put}StackAllocState.
|
||||
stackAllocState *stackAllocState
|
||||
@ -43,21 +38,4 @@ func (c *Cache) Reset() {
|
||||
for i := range xl {
|
||||
xl[i] = nil
|
||||
}
|
||||
xvl := c.varLocs[:c.curVarLoc]
|
||||
for i := range xvl {
|
||||
xvl[i] = VarLoc{}
|
||||
}
|
||||
c.curVarLoc = 0
|
||||
}
|
||||
|
||||
func (c *Cache) NewVarLoc() *VarLoc {
|
||||
if c.varLocs == nil {
|
||||
c.varLocs = make([]VarLoc, 4000)
|
||||
}
|
||||
if c.curVarLoc == len(c.varLocs) {
|
||||
return &VarLoc{}
|
||||
}
|
||||
vl := &c.varLocs[c.curVarLoc]
|
||||
c.curVarLoc++
|
||||
return vl
|
||||
}
|
||||
|
@ -465,10 +465,6 @@ func memCheck(f *Func) {
|
||||
if seenNonPhi {
|
||||
f.Fatalf("phi after non-phi @ %s: %s", b, v)
|
||||
}
|
||||
case OpRegKill:
|
||||
if f.RegAlloc == nil {
|
||||
f.Fatalf("RegKill seen before register allocation @ %s: %s", b, v)
|
||||
}
|
||||
default:
|
||||
seenNonPhi = true
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -450,7 +450,6 @@ var genericOps = []opData{
|
||||
{name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
|
||||
{name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read"}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
|
||||
{name: "KeepAlive", argLength: 2, typ: "Mem"}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
|
||||
{name: "RegKill"}, // regalloc has determined that the value in this register is dead
|
||||
|
||||
// Ops for breaking 64-bit operations on 32-bit architectures
|
||||
{name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
|
||||
|
@ -2017,7 +2017,6 @@ const (
|
||||
OpVarKill
|
||||
OpVarLive
|
||||
OpKeepAlive
|
||||
OpRegKill
|
||||
OpInt64Make
|
||||
OpInt64Hi
|
||||
OpInt64Lo
|
||||
@ -24081,11 +24080,6 @@ var opcodeTable = [...]opInfo{
|
||||
argLen: 2,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "RegKill",
|
||||
argLen: 0,
|
||||
generic: true,
|
||||
},
|
||||
{
|
||||
name: "Int64Make",
|
||||
argLen: 2,
|
||||
|
@ -242,9 +242,6 @@ type regAllocState struct {
|
||||
// current state of each (preregalloc) Value
|
||||
values []valState
|
||||
|
||||
// names associated with each Value
|
||||
valueNames [][]LocalSlot
|
||||
|
||||
// ID of SP, SB values
|
||||
sp, sb ID
|
||||
|
||||
@ -303,13 +300,6 @@ type startReg struct {
|
||||
|
||||
// freeReg frees up register r. Any current user of r is kicked out.
|
||||
func (s *regAllocState) freeReg(r register) {
|
||||
s.freeOrResetReg(r, false)
|
||||
}
|
||||
|
||||
// freeOrResetReg frees up register r. Any current user of r is kicked out.
|
||||
// resetting indicates that the operation is only for bookkeeping,
|
||||
// e.g. when clearing out state upon entry to a new block.
|
||||
func (s *regAllocState) freeOrResetReg(r register, resetting bool) {
|
||||
v := s.regs[r].v
|
||||
if v == nil {
|
||||
s.f.Fatalf("tried to free an already free register %d\n", r)
|
||||
@ -319,16 +309,6 @@ func (s *regAllocState) freeOrResetReg(r register, resetting bool) {
|
||||
if s.f.pass.debug > regDebug {
|
||||
fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c)
|
||||
}
|
||||
if !resetting && s.f.Config.ctxt.Flag_locationlists && len(s.valueNames[v.ID]) != 0 {
|
||||
kill := s.curBlock.NewValue0(src.NoXPos, OpRegKill, types.TypeVoid)
|
||||
for int(kill.ID) >= len(s.orig) {
|
||||
s.orig = append(s.orig, nil)
|
||||
}
|
||||
for _, name := range s.valueNames[v.ID] {
|
||||
s.f.NamedValues[name] = append(s.f.NamedValues[name], kill)
|
||||
}
|
||||
s.f.setHome(kill, &s.registers[r])
|
||||
}
|
||||
s.regs[r] = regState{}
|
||||
s.values[v.ID].regs &^= regMask(1) << r
|
||||
s.used &^= regMask(1) << r
|
||||
@ -613,17 +593,6 @@ func (s *regAllocState) init(f *Func) {
|
||||
s.values = make([]valState, f.NumValues())
|
||||
s.orig = make([]*Value, f.NumValues())
|
||||
s.copies = make(map[*Value]bool)
|
||||
if s.f.Config.ctxt.Flag_locationlists {
|
||||
s.valueNames = make([][]LocalSlot, f.NumValues())
|
||||
for slot, values := range f.NamedValues {
|
||||
if isSynthetic(&slot) {
|
||||
continue
|
||||
}
|
||||
for _, value := range values {
|
||||
s.valueNames[value.ID] = append(s.valueNames[value.ID], slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, b := range f.Blocks {
|
||||
for _, v := range b.Values {
|
||||
if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
|
||||
@ -717,9 +686,7 @@ func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
|
||||
|
||||
// Sets the state of the registers to that encoded in regs.
|
||||
func (s *regAllocState) setState(regs []endReg) {
|
||||
for s.used != 0 {
|
||||
s.freeOrResetReg(pickReg(s.used), true)
|
||||
}
|
||||
s.freeRegs(s.used)
|
||||
for _, x := range regs {
|
||||
s.assignReg(x.r, x.v, x.c)
|
||||
}
|
||||
@ -1035,7 +1002,7 @@ func (s *regAllocState) regalloc(f *Func) {
|
||||
pidx := e.i
|
||||
for _, v := range succ.Values {
|
||||
if v.Op != OpPhi {
|
||||
continue
|
||||
break
|
||||
}
|
||||
if !s.values[v.ID].needReg {
|
||||
continue
|
||||
@ -1598,9 +1565,6 @@ func (s *regAllocState) placeSpills() {
|
||||
for _, b := range f.Blocks {
|
||||
var m regMask
|
||||
for _, v := range b.Values {
|
||||
if v.Op == OpRegKill {
|
||||
continue
|
||||
}
|
||||
if v.Op != OpPhi {
|
||||
break
|
||||
}
|
||||
@ -1711,7 +1675,7 @@ func (s *regAllocState) placeSpills() {
|
||||
for _, b := range f.Blocks {
|
||||
nphi := 0
|
||||
for _, v := range b.Values {
|
||||
if v.Op != OpRegKill && v.Op != OpPhi {
|
||||
if v.Op != OpPhi {
|
||||
break
|
||||
}
|
||||
nphi++
|
||||
@ -1832,9 +1796,6 @@ func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive
|
||||
}
|
||||
// Phis need their args to end up in a specific location.
|
||||
for _, v := range e.b.Values {
|
||||
if v.Op == OpRegKill {
|
||||
continue
|
||||
}
|
||||
if v.Op != OpPhi {
|
||||
break
|
||||
}
|
||||
@ -2094,16 +2055,6 @@ func (e *edgeState) erase(loc Location) {
|
||||
fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c)
|
||||
}
|
||||
a[i], a = a[len(a)-1], a[:len(a)-1]
|
||||
if e.s.f.Config.ctxt.Flag_locationlists {
|
||||
if _, isReg := loc.(*Register); isReg && int(c.ID) < len(e.s.valueNames) && len(e.s.valueNames[c.ID]) != 0 {
|
||||
kill := e.p.NewValue0(src.NoXPos, OpRegKill, types.TypeVoid)
|
||||
e.s.f.setHome(kill, loc)
|
||||
for _, name := range e.s.valueNames[c.ID] {
|
||||
e.s.f.NamedValues[name] = append(e.s.f.NamedValues[name], kill)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ dy = <Optimized out, as expected>
|
||||
dx = 2
|
||||
dy = 2
|
||||
63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
|
||||
dx = <Optimized out, as expected>
|
||||
dx = 2
|
||||
dy = <Optimized out, as expected>
|
||||
64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
|
||||
65: if len(os.Args) > 1 {
|
||||
@ -116,11 +116,6 @@ scanner = (struct bufio.Scanner *) <A>
|
||||
a = 0
|
||||
n = 0
|
||||
t = 0
|
||||
88: continue
|
||||
87: if a == 0 { //gdb-opt=(a,n,t)
|
||||
a = 3
|
||||
n = 0
|
||||
t = 0
|
||||
92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
|
||||
91: n += a
|
||||
92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
|
||||
@ -147,11 +142,6 @@ t = 3
|
||||
a = 0
|
||||
n = 6
|
||||
t = 9
|
||||
88: continue
|
||||
87: if a == 0 { //gdb-opt=(a,n,t)
|
||||
a = 2
|
||||
n = 6
|
||||
t = 9
|
||||
92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
|
||||
91: n += a
|
||||
92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
|
||||
@ -178,5 +168,4 @@ t = 17
|
||||
a = 0
|
||||
n = 9
|
||||
t = 22
|
||||
88: continue
|
||||
98: }
|
||||
|
@ -44,23 +44,6 @@ type Sym interface {
|
||||
Len() int64
|
||||
}
|
||||
|
||||
// A Location represents a variable's location at a particular PC range.
|
||||
// It becomes a location list entry in the DWARF.
|
||||
type Location struct {
|
||||
StartPC, EndPC int64
|
||||
Pieces []Piece
|
||||
}
|
||||
|
||||
// A Piece represents the location of a particular part of a variable.
|
||||
// It becomes part of a location list entry (a DW_OP_piece) in the DWARF.
|
||||
type Piece struct {
|
||||
Length int64
|
||||
StackOffset int32
|
||||
RegNum int16
|
||||
Missing bool
|
||||
OnStack bool // if true, RegNum is unset.
|
||||
}
|
||||
|
||||
// A Var represents a local variable or a function parameter.
|
||||
type Var struct {
|
||||
Name string
|
||||
@ -68,15 +51,17 @@ type Var struct {
|
||||
IsReturnValue bool
|
||||
IsInlFormal bool
|
||||
StackOffset int32
|
||||
LocationList []Location
|
||||
Scope int32
|
||||
Type Sym
|
||||
DeclFile string
|
||||
DeclLine uint
|
||||
DeclCol uint
|
||||
InlIndex int32 // subtract 1 to form real index into InlTree
|
||||
ChildIndex int32 // child DIE index in abstract function
|
||||
IsInAbstract bool // variable exists in abstract function
|
||||
// This package can't use the ssa package, so it can't mention ssa.FuncDebug,
|
||||
// so indirect through a closure.
|
||||
PutLocationList func(listSym, startPC Sym)
|
||||
Scope int32
|
||||
Type Sym
|
||||
DeclFile string
|
||||
DeclLine uint
|
||||
DeclCol uint
|
||||
InlIndex int32 // subtract 1 to form real index into InlTree
|
||||
ChildIndex int32 // child DIE index in abstract function
|
||||
IsInAbstract bool // variable exists in abstract function
|
||||
}
|
||||
|
||||
// A Scope represents a lexical scope. All variables declared within a
|
||||
@ -1360,10 +1345,10 @@ func determineVarAbbrev(v *Var, fnabbrev int) (int, bool, bool) {
|
||||
// convert to an inline abbreviation and emit an empty location.
|
||||
missing := false
|
||||
switch {
|
||||
case abbrev == DW_ABRV_AUTO_LOCLIST && len(v.LocationList) == 0:
|
||||
case abbrev == DW_ABRV_AUTO_LOCLIST && v.PutLocationList == nil:
|
||||
missing = true
|
||||
abbrev = DW_ABRV_AUTO
|
||||
case abbrev == DW_ABRV_PARAM_LOCLIST && len(v.LocationList) == 0:
|
||||
case abbrev == DW_ABRV_PARAM_LOCLIST && v.PutLocationList == nil:
|
||||
missing = true
|
||||
abbrev = DW_ABRV_PARAM
|
||||
}
|
||||
@ -1470,7 +1455,7 @@ func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int,
|
||||
|
||||
if abbrevUsesLoclist(abbrev) {
|
||||
putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, int64(s.Loc.Len()), s.Loc)
|
||||
addLocList(ctxt, s.Loc, s.StartPC, v, encbuf)
|
||||
v.PutLocationList(s.Loc, s.StartPC)
|
||||
} else {
|
||||
loc := encbuf[:0]
|
||||
switch {
|
||||
@ -1488,45 +1473,6 @@ func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int,
|
||||
// Var has no children => no terminator
|
||||
}
|
||||
|
||||
func addLocList(ctxt Context, listSym, startPC Sym, v *Var, encbuf []byte) {
|
||||
// Base address entry: max ptr followed by the base address.
|
||||
ctxt.AddInt(listSym, ctxt.PtrSize(), ^0)
|
||||
ctxt.AddAddress(listSym, startPC, 0)
|
||||
for _, entry := range v.LocationList {
|
||||
ctxt.AddInt(listSym, ctxt.PtrSize(), entry.StartPC)
|
||||
ctxt.AddInt(listSym, ctxt.PtrSize(), entry.EndPC)
|
||||
locBuf := encbuf[:0]
|
||||
for _, piece := range entry.Pieces {
|
||||
if !piece.Missing {
|
||||
if piece.OnStack {
|
||||
if piece.StackOffset == 0 {
|
||||
locBuf = append(locBuf, DW_OP_call_frame_cfa)
|
||||
} else {
|
||||
locBuf = append(locBuf, DW_OP_fbreg)
|
||||
locBuf = AppendSleb128(locBuf, int64(piece.StackOffset))
|
||||
}
|
||||
} else {
|
||||
if piece.RegNum < 32 {
|
||||
locBuf = append(locBuf, DW_OP_reg0+byte(piece.RegNum))
|
||||
} else {
|
||||
locBuf = append(locBuf, DW_OP_regx)
|
||||
locBuf = AppendUleb128(locBuf, uint64(piece.RegNum))
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(entry.Pieces) > 1 {
|
||||
locBuf = append(locBuf, DW_OP_piece)
|
||||
locBuf = AppendUleb128(locBuf, uint64(piece.Length))
|
||||
}
|
||||
}
|
||||
ctxt.AddInt(listSym, 2, int64(len(locBuf)))
|
||||
ctxt.AddBytes(listSym, locBuf)
|
||||
}
|
||||
// End list
|
||||
ctxt.AddInt(listSym, ctxt.PtrSize(), 0)
|
||||
ctxt.AddInt(listSym, ctxt.PtrSize(), 0)
|
||||
}
|
||||
|
||||
// VarsByOffset attaches the methods of sort.Interface to []*Var,
|
||||
// sorting in increasing StackOffset.
|
||||
type VarsByOffset []*Var
|
||||
|
Loading…
Reference in New Issue
Block a user