1
0
mirror of https://github.com/golang/go synced 2024-11-25 16:57:58 -07:00

cmd/compile: stop using VARKILL

With the introduction of stack objects, VARKILL information is
no longer needed.

With stack objects, an object is dead when there are no more static
references to it, and the stack scanner can't find any live pointers
to it. VARKILL information isn't used to establish live ranges for
address-taken variables any more. In effect, the last static reference
*is* the VARKILL, and there's an additional dynamic liveness check
during stack scanning.

Next CL will actually rip out the VARKILL opcodes.

Change-Id: I030a2ab867445cf4e0e69397911f8a2e2f0ed07b
Reviewed-on: https://go-review.googlesource.com/c/go/+/419234
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: David Chase <drchase@google.com>
Run-TryBot: Keith Randall <khr@golang.org>
This commit is contained in:
Keith Randall 2022-07-22 14:58:40 -07:00
parent 661146bc0b
commit 908499adec
8 changed files with 50 additions and 122 deletions

View File

@ -320,12 +320,6 @@ func writebarrier(f *Func) {
}
}
// mark volatile temps dead
for _, c := range volatiles {
tmpNode := c.tmp.Aux
memThen = bThen.NewValue1A(memThen.Pos, OpVarKill, types.TypeMem, tmpNode, memThen)
}
// merge memory
// Splice memory Phi into the last memory of the original sequence,
// which may be used in subsequent blocks. Other memories in the

View File

@ -1958,15 +1958,6 @@ func (s *state) stmt(n ir.Node) {
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
// varkill in the store chain is enough to keep it correctly ordered
// with respect to call ops.
n := n.(*ir.UnaryExpr)
if !s.canSSA(n.X) {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
}
case ir.OVARLIVE:
// Insert a varlive op to record that a variable is still live.
@ -6464,7 +6455,6 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ
delete(s.vars, valVar)
} else {
res = s.load(dst, addr)
s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
}
resok = s.variable(okVar, types.Types[types.TBOOL])
delete(s.vars, okVar)

View File

@ -494,6 +494,7 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
// Build list of var[c] = expr.
// Use temporaries so that mapassign1 can have addressable key, elem.
// TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
// TODO(khr): assign these temps in order phase so we can reuse them across multiple maplits?
tmpkey := typecheck.Temp(m.Type().Key())
tmpelem := typecheck.Temp(m.Type().Elem())
@ -519,9 +520,6 @@ func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
a = orderStmtInPlace(a, map[string][]*ir.Name{})
appendWalkStmt(init, a)
}
appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey))
appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpelem))
}
func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {

View File

@ -36,18 +36,6 @@ import (
// Arrange that receive expressions only appear in direct assignments
// x = <-c or as standalone statements <-c, never in larger expressions.
// TODO(rsc): The temporary introduction during multiple assignments
// should be moved into this file, so that the temporaries can be cleaned
// and so that conversions implicit in the OAS2FUNC and OAS2RECV
// nodes can be made explicit and then have their temporaries cleaned.
// TODO(rsc): Goto and multilevel break/continue can jump over
// inserted VARKILL annotations. Work out a way to handle these.
// The current implementation is safe, in that it will execute correctly.
// But it won't reuse temporaries as aggressively as it might, and
// it can result in unnecessary zeroing of those variables in the function
// prologue.
// orderState holds state during the ordering process.
type orderState struct {
out []ir.Node // list of generated statements
@ -223,16 +211,6 @@ func (o *orderState) safeExpr(n ir.Node) ir.Node {
}
}
// isaddrokay reports whether it is okay to pass n's address to runtime routines.
// Taking the address of a variable makes the liveness and optimization analyses
// lose track of where the variable's lifetime ends. To avoid hurting the analyses
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
func isaddrokay(n ir.Node) bool {
return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
}
// addrTemp ensures that n is okay to pass by address to runtime routines.
// If the original argument n is not okay, addrTemp creates a tmp, emits
// tmp = n, and then returns tmp.
@ -253,7 +231,7 @@ func (o *orderState) addrTemp(n ir.Node) ir.Node {
vstat = typecheck.Expr(vstat).(*ir.Name)
return vstat
}
if isaddrokay(n) {
if ir.IsAddressable(n) {
return n
}
return o.copyExpr(n)
@ -380,25 +358,6 @@ func (o *orderState) popTemp(mark ordermarker) {
o.temp = o.temp[:mark]
}
// cleanTempNoPop emits VARKILL instructions to *out
// for each temporary above the mark on the temporary stack.
// It does not pop the temporaries from the stack.
func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
var out []ir.Node
for i := len(o.temp) - 1; i >= int(mark); i-- {
n := o.temp[i]
out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
}
return out
}
// cleanTemp emits VARKILL instructions for each temporary above the
// mark on the temporary stack and removes them from the stack.
func (o *orderState) cleanTemp(top ordermarker) {
o.out = append(o.out, o.cleanTempNoPop(top)...)
o.popTemp(top)
}
// stmtList orders each of the statements in the list.
func (o *orderState) stmtList(l ir.Nodes) {
s := l
@ -494,7 +453,7 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
mark := order.markTemp()
order.edge()
order.stmtList(*n)
order.cleanTemp(mark)
order.popTemp(mark)
*n = order.out
}
@ -527,7 +486,7 @@ func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
order.free = free
mark := order.markTemp()
order.stmt(n)
order.cleanTemp(mark)
order.popTemp(mark)
return ir.NewBlockStmt(src.NoXPos, order.out)
}
@ -626,8 +585,6 @@ func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
}
// stmt orders the statement n, appending to o.out.
// Temporaries created during the statement are cleaned
// up using VARKILL instructions as possible.
func (o *orderState) stmt(n ir.Node) {
if n == nil {
return
@ -649,7 +606,7 @@ func (o *orderState) stmt(n ir.Node) {
n.X = o.expr(n.X, nil)
n.Y = o.expr(n.Y, n.X)
o.mapAssign(n)
o.cleanTemp(t)
o.popTemp(t)
case ir.OASOP:
n := n.(*ir.AssignOpStmt)
@ -674,12 +631,12 @@ func (o *orderState) stmt(n ir.Node) {
r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
o.mapAssign(as)
o.cleanTemp(t)
o.popTemp(t)
return
}
o.mapAssign(n)
o.cleanTemp(t)
o.popTemp(t)
case ir.OAS2:
n := n.(*ir.AssignListStmt)
@ -687,7 +644,7 @@ func (o *orderState) stmt(n ir.Node) {
o.exprList(n.Lhs)
o.exprList(n.Rhs)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
// Special: avoid copy of func call n.Right
case ir.OAS2FUNC:
@ -708,7 +665,7 @@ func (o *orderState) stmt(n ir.Node) {
o.call(call)
o.as2func(n)
}
o.cleanTemp(t)
o.popTemp(t)
// Special: use temporary variables to hold result,
// so that runtime can take address of temporary.
@ -745,7 +702,7 @@ func (o *orderState) stmt(n ir.Node) {
}
o.as2ok(n)
o.cleanTemp(t)
o.popTemp(t)
// Special: does not save n onto out.
case ir.OBLOCK:
@ -770,7 +727,7 @@ func (o *orderState) stmt(n ir.Node) {
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
case ir.OINLCALL:
n := n.(*ir.InlinedCallExpr)
@ -788,7 +745,7 @@ func (o *orderState) stmt(n ir.Node) {
t := o.markTemp()
n.X = o.expr(n.X, nil)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
case ir.OCOPY:
n := n.(*ir.BinaryExpr)
@ -796,14 +753,14 @@ func (o *orderState) stmt(n ir.Node) {
n.X = o.expr(n.X, nil)
n.Y = o.expr(n.Y, nil)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
n := n.(*ir.CallExpr)
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
// Special: order arguments to inner call but not call itself.
case ir.ODEFER, ir.OGO:
@ -812,7 +769,7 @@ func (o *orderState) stmt(n ir.Node) {
o.init(n.Call)
o.call(n.Call)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
case ir.ODELETE:
n := n.(*ir.CallExpr)
@ -821,7 +778,7 @@ func (o *orderState) stmt(n ir.Node) {
n.Args[1] = o.expr(n.Args[1], nil)
n.Args[1] = o.mapKeyTemp(n.Pos(), n.Args[0].Type(), n.Args[1])
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
// Clean temporaries from condition evaluation at
// beginning of loop body and after for statement.
@ -829,11 +786,10 @@ func (o *orderState) stmt(n ir.Node) {
n := n.(*ir.ForStmt)
t := o.markTemp()
n.Cond = o.exprInPlace(n.Cond)
n.Body.Prepend(o.cleanTempNoPop(t)...)
orderBlock(&n.Body, o.free)
n.Post = orderStmtInPlace(n.Post, o.free)
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
// Clean temporaries from condition at
// beginning of both branches.
@ -841,8 +797,6 @@ func (o *orderState) stmt(n ir.Node) {
n := n.(*ir.IfStmt)
t := o.markTemp()
n.Cond = o.exprInPlace(n.Cond)
n.Body.Prepend(o.cleanTempNoPop(t)...)
n.Else.Prepend(o.cleanTempNoPop(t)...)
o.popTemp(t)
orderBlock(&n.Body, o.free)
orderBlock(&n.Else, o.free)
@ -922,7 +876,7 @@ func (o *orderState) stmt(n ir.Node) {
orderBlock(&n.Body, o.free)
}
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
case ir.ORETURN:
n := n.(*ir.ReturnStmt)
@ -1029,7 +983,6 @@ func (o *orderState) stmt(n ir.Node) {
// (The temporary cleaning must follow that ninit work.)
for _, cas := range n.Cases {
orderBlock(&cas.Body, o.free)
cas.Body.Prepend(o.cleanTempNoPop(t)...)
// TODO(mdempsky): Is this actually necessary?
// walkSelect appears to walk Ninit.
@ -1053,7 +1006,7 @@ func (o *orderState) stmt(n ir.Node) {
n.Value = o.addrTemp(n.Value)
}
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
// TODO(rsc): Clean temporaries more aggressively.
// Note that because walkSwitch will rewrite some of the
@ -1077,7 +1030,7 @@ func (o *orderState) stmt(n ir.Node) {
}
o.out = append(o.out, n)
o.cleanTemp(t)
o.popTemp(t)
}
base.Pos = lno
@ -1265,7 +1218,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
o.edge()
rhs := o.expr(n.Y, nil)
o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
o.cleanTemp(t)
o.popTemp(t)
gen := o.out
o.out = saveout

View File

@ -230,12 +230,7 @@ func walkSelectCases(cases []*ir.CommClause) []ir.Node {
init = append(init, fnInit...)
init = append(init, typecheck.Stmt(r))
// selv and order are no longer alive after selectgo.
init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv))
init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, order))
if base.Flag.Race {
init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, pcs))
}
// selv, order, and pcs (if race) are no longer alive after selectgo.
// dispatch cases
dispatch := func(cond ir.Node, cas *ir.CommClause) {

View File

@ -234,29 +234,28 @@ func mapCompoundAssignmentString() {
var sinkAppend bool
// TODO: optimization is not applied because of mapslow flag.
func mapAppendAssignmentInt8() {
m := make(map[int8][]int8, 0)
var k int8 = 0
// 386:".*mapaccess"
// amd64:".*mapaccess"
// arm:".*mapaccess"
// arm64:".*mapaccess"
// 386:-".*mapaccess"
// amd64:-".*mapaccess"
// arm:-".*mapaccess"
// arm64:-".*mapaccess"
m[k] = append(m[k], 1)
// 386:".*mapaccess"
// amd64:".*mapaccess"
// arm:".*mapaccess"
// arm64:".*mapaccess"
// 386:-".*mapaccess"
// amd64:-".*mapaccess"
// arm:-".*mapaccess"
// arm64:-".*mapaccess"
m[k] = append(m[k], 1, 2, 3)
a := []int8{7, 8, 9, 0}
// 386:".*mapaccess"
// amd64:".*mapaccess"
// arm:".*mapaccess"
// arm64:".*mapaccess"
// 386:-".*mapaccess"
// amd64:-".*mapaccess"
// arm:-".*mapaccess"
// arm64:-".*mapaccess"
m[k] = append(m[k], a...)
// Exceptions
@ -394,29 +393,28 @@ func mapAppendAssignmentInt64() {
m[k] = append(m[k+1], 100)
}
// TODO: optimization is not applied because of mapslow flag.
func mapAppendAssignmentComplex128() {
m := make(map[complex128][]complex128, 0)
var k complex128 = 0
// 386:".*mapaccess"
// amd64:".*mapaccess"
// arm:".*mapaccess"
// arm64:".*mapaccess"
// 386:-".*mapaccess"
// amd64:-".*mapaccess"
// arm:-".*mapaccess"
// arm64:-".*mapaccess"
m[k] = append(m[k], 1)
// 386:".*mapaccess"
// amd64:".*mapaccess"
// arm:".*mapaccess"
// arm64:".*mapaccess"
// 386:-".*mapaccess"
// amd64:-".*mapaccess"
// arm:-".*mapaccess"
// arm64:-".*mapaccess"
m[k] = append(m[k], 1, 2, 3)
a := []complex128{7, 8, 9, 0}
// 386:".*mapaccess"
// amd64:".*mapaccess"
// arm:".*mapaccess"
// arm64:".*mapaccess"
// 386:-".*mapaccess"
// amd64:-".*mapaccess"
// arm:-".*mapaccess"
// arm64:-".*mapaccess"
m[k] = append(m[k], a...)
// Exceptions

View File

@ -719,7 +719,7 @@ func f44(f func() [2]*int) interface{} { // ERROR "live at entry to f44: f"
type T struct {
s [1][2]*int
}
ret := T{}
ret := T{} // ERROR "stack object ret T"
ret.s[0] = f()
return ret // ERROR "stack object .autotmp_[0-9]+ T"
return ret
}

View File

@ -714,7 +714,7 @@ func f44(f func() [2]*int) interface{} { // ERROR "live at entry to f44: f"
type T struct {
s [1][2]*int
}
ret := T{}
ret := T{} // ERROR "stack object ret T"
ret.s[0] = f()
return ret // ERROR "stack object .autotmp_[0-9]+ T"
return ret
}