1
0
mirror of https://github.com/golang/go synced 2024-11-26 17:56:55 -07:00

cmd/compile: compute faulting args before writing args to stack

when compiling f(a, b, c), we do something like:
  *(SP+0) = eval(a)
  *(SP+8) = eval(b)
  *(SP+16) = eval(c)
  call f

If one of those evaluations is later determined to unconditionally panic
(say eval(b) in this example), then the call is deadcode eliminated. But
any previous argument write (*(SP+0)=... here) is still around. Becuase
we only compute the size of the outarg area for calls which are still
around at the end of optimization, the space needed for *(SP+0)=v is not
accounted for and thus the outarg area may be too small.

The fix is to make sure that we evaluate any potentially panicing
operation before we write any of the args to the stack. It turns out
that fix is pretty easy, as we already have such a mechanism available
for function args. We just need to extend it to possibly panicing args
as well.

The resulting code (if b and c can panic, but a can't) is:
  tmpb = eval(b)
  *(SP+16) = eval(c)
  *(SP+0) = eval(a)
  *(SP+8) = tmpb
  call f

This change tickled a bug in how we find the arguments for intrinsic
calls, so that latent bug is fixed up as well.

Update #16760.

Change-Id: I0bf5edf370220f82bc036cf2085ecc24f356d166
Reviewed-on: https://go-review.googlesource.com/32551
Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
Keith Randall 2016-11-01 15:28:10 -07:00
parent 688995d1e9
commit cf28e5cc9d
3 changed files with 148 additions and 58 deletions

View File

@ -9,6 +9,7 @@ import (
"fmt"
"html"
"os"
"sort"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
@ -2532,8 +2533,8 @@ type intrinsicInfo struct {
}
// An intrinsicBuilder converts a call node n into an ssa value that
// implements that call as an intrinsic.
type intrinsicBuilder func(s *state, n *Node) *ssa.Value
// implements that call as an intrinsic. args is a list of arguments to the func.
type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
type intrinsicKey struct {
pkg string
@ -2569,104 +2570,104 @@ func intrinsicInit() {
// initial set of intrinsics.
i.std = map[intrinsicKey]intrinsicBuilder{
/******** runtime ********/
intrinsicKey{"runtime", "slicebytetostringtmp"}: disableForInstrumenting(func(s *state, n *Node) *ssa.Value {
intrinsicKey{"runtime", "slicebytetostringtmp"}: disableForInstrumenting(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
// for the backend instead of slicebytetostringtmp calls
// when not instrumenting.
slice := s.intrinsicFirstArg(n)
slice := args[0]
ptr := s.newValue1(ssa.OpSlicePtr, ptrto(Types[TUINT8]), slice)
len := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
}),
intrinsicKey{"runtime", "KeepAlive"}: func(s *state, n *Node) *ssa.Value {
data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), s.intrinsicFirstArg(n))
intrinsicKey{"runtime", "KeepAlive"}: func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, ptrto(Types[TUINT8]), args[0])
s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, ssa.TypeMem, data, s.mem())
return nil
},
/******** runtime/internal/sys ********/
intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
return s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
intrinsicKey{"runtime/internal/sys", "Ctz32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz32, Types[TUINT32], args[0])
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
return s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
intrinsicKey{"runtime/internal/sys", "Ctz64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpCtz64, Types[TUINT64], args[0])
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
return s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
intrinsicKey{"runtime/internal/sys", "Bswap32"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, Types[TUINT32], args[0])
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
intrinsicKey{"runtime/internal/sys", "Bswap64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
return s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
intrinsicKey{"runtime/internal/sys", "Bswap64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap64, Types[TUINT64], args[0])
}, sys.AMD64, sys.ARM64, sys.ARM, sys.S390X),
/******** runtime/internal/atomic ********/
intrinsicKey{"runtime/internal/atomic", "Load"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
intrinsicKey{"runtime/internal/atomic", "Load"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
intrinsicKey{"runtime/internal/atomic", "Load64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Loadp"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
intrinsicKey{"runtime/internal/atomic", "Loadp"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(ptrto(Types[TUINT8]), ssa.TypeMem), args[0], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, ptrto(Types[TUINT8]), v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Store"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, args[0], args[1], s.mem())
return nil
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Store64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, args[0], args[1], s.mem())
return nil
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, args[0], args[1], s.mem())
return nil
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Xchg"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Xchg64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Xadd"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Xadd"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Xadd64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), args[0], args[1], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Cas"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem())
intrinsicKey{"runtime/internal/atomic", "Cas"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem())
intrinsicKey{"runtime/internal/atomic", "Cas64"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
}, sys.AMD64, sys.ARM64, sys.S390X),
intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "And8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, args[0], args[1], s.mem())
return nil
}, sys.AMD64, sys.ARM64),
intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
intrinsicKey{"runtime/internal/atomic", "Or8"}: enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, args[0], args[1], s.mem())
return nil
}, sys.AMD64, sys.ARM64),
}
@ -2774,12 +2775,12 @@ func intrinsicInit() {
/******** math/big ********/
i.intSized[sizedIntrinsicKey{"math/big", "mulWW", 8}] =
enableOnArch(func(s *state, n *Node) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1))
enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpMul64uhilo, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1])
}, sys.AMD64)
i.intSized[sizedIntrinsicKey{"math/big", "divWW", 8}] =
enableOnArch(func(s *state, n *Node) *ssa.Value {
return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2))
enableOnArch(func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue3(ssa.OpDiv128u, ssa.MakeTuple(Types[TUINT64], Types[TUINT64]), args[0], args[1], args[2])
}, sys.AMD64)
}
@ -2820,7 +2821,7 @@ func isIntrinsicCall(n *Node) bool {
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
func (s *state) intrinsicCall(n *Node) *ssa.Value {
v := findIntrinsic(n.Left.Sym)(s, n)
v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
if ssa.IntrinsicsDebug > 0 {
x := v
if x == nil {
@ -2834,16 +2835,59 @@ func (s *state) intrinsicCall(n *Node) *ssa.Value {
return v
}
// intrinsicArg extracts the ith arg from n.List and returns its value.
func (s *state) intrinsicArg(n *Node, i int) *ssa.Value {
x := n.List.Slice()[i]
if x.Op == OAS {
x = x.Right
}
return s.expr(x)
type callArg struct {
offset int64
v *ssa.Value
}
func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
return s.intrinsicArg(n, 0)
type byOffset []callArg
func (x byOffset) Len() int { return len(x) }
func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byOffset) Less(i, j int) bool {
return x[i].offset < x[j].offset
}
// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
// This code is complicated because of how walk transforms calls. For a call node,
// each entry in n.List is either an assignment to OINDREGSP which actually
// stores an arg, or an assignment to a temporary which computes an arg
// which is later assigned.
// The args can also be out of order.
// TODO: when walk goes away someday, this code can go away also.
var args []callArg
temps := map[*Node]*ssa.Value{}
for _, a := range n.List.Slice() {
if a.Op != OAS {
s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
}
l, r := a.Left, a.Right
switch l.Op {
case ONAME:
// Evaluate and store to "temporary".
// Walk ensures these temporaries are dead outside of n.
temps[l] = s.expr(r)
case OINDREGSP:
// Store a value to an argument slot.
var v *ssa.Value
if x, ok := temps[r]; ok {
// This is a previously computed temporary.
v = x
} else {
// This is an explicit value; evaluate it.
v = s.expr(r)
}
args = append(args, callArg{l.Xoffset, v})
default:
s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
}
}
sort.Sort(byOffset(args))
res := make([]*ssa.Value, len(args))
for i, a := range args {
res[i] = a.v
}
return res
}
// Calls the function n using the specified call type.
@ -3281,8 +3325,6 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
// If returns is true, the block is marked as a call block. A new block
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
off := Ctxt.FixedFrameSize()

View File

@ -1182,6 +1182,12 @@ func ullmancalc(n *Node) {
ul = UINF
goto out
}
case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR,
OIND, ODOTPTR, ODOTTYPE, ODIV:
// These ops might panic, make sure they are done
// before we start marshaling args for a call. See issue 16760.
ul = UINF
goto out
}
ul = 1

View File

@ -0,0 +1,42 @@
// run
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Make sure we don't start marshaling (writing to the stack)
// arguments until those arguments are evaluated and known
// not to unconditinally panic. If they unconditionally panic,
// we write some args but never do the call. That messes up
// the logic which decides how big the argout section needs to be.
package main
type W interface {
Write([]byte)
}
type F func(W)
func foo(f F) {
defer func() {
if r := recover(); r != nil {
usestack(1000)
}
}()
f(nil)
}
func main() {
foo(func(w W) {
var x []string
w.Write([]byte(x[5]))
})
}
func usestack(n int) {
if n == 0 {
return
}
usestack(n - 1)
}