mirror of
https://github.com/golang/go
synced 2024-11-11 19:51:37 -07:00
cmd/compile: don't short-circuit copies whose source is volatile
Current optimization: When we copy a->b and then b->c, we might as well copy a->c instead of b->c (then b might be dead and go away). *Except* if a is a volatile location (might be clobbered by a call). In that case, we really do want to copy a immediately, because there might be a call before we can do the a->c copy. User calls can't happen in between, because the rule matches up the memory states. But calls inserted for memory barriers, particularly runtime.typedmemmove, can. (I guess we could introduce a register-calling-convention version of runtime.typedmemmove, but that seems a bigger change than this one.) Fixes #43570 Change-Id: Ifa518bb1a6f3a8dd46c352d4fd54ea9713b3eb1a Reviewed-on: https://go-review.googlesource.com/c/go/+/282492 Trust: Keith Randall <khr@golang.org> Trust: Josh Bleecher Snyder <josharian@gmail.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
parent
ae97717133
commit
304f769ffc
@ -2512,7 +2512,7 @@
|
||||
(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
|
||||
&& t1.Compare(t2) == types.CMPeq
|
||||
&& isSamePtr(tmp1, tmp2)
|
||||
&& isStackPtr(src)
|
||||
&& isStackPtr(src) && !isVolatile(src)
|
||||
&& disjoint(src, s, tmp2, s)
|
||||
&& (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
|
||||
=> (Move {t1} [s] dst src midmem)
|
||||
@ -2521,7 +2521,7 @@
|
||||
(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
|
||||
&& t1.Compare(t2) == types.CMPeq
|
||||
&& isSamePtr(tmp1, tmp2)
|
||||
&& isStackPtr(src)
|
||||
&& isStackPtr(src) && !isVolatile(src)
|
||||
&& disjoint(src, s, tmp2, s)
|
||||
&& (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
|
||||
=> (Move {t1} [s] dst src midmem)
|
||||
|
@ -13637,7 +13637,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
|
||||
return true
|
||||
}
|
||||
// match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
|
||||
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
|
||||
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
|
||||
// result: (Move {t1} [s] dst src midmem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
@ -13651,7 +13651,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
|
||||
t2 := auxToType(midmem.Aux)
|
||||
src := midmem.Args[1]
|
||||
tmp2 := midmem.Args[0]
|
||||
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
|
||||
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMove)
|
||||
@ -13661,7 +13661,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
|
||||
return true
|
||||
}
|
||||
// match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
|
||||
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
|
||||
// cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
|
||||
// result: (Move {t1} [s] dst src midmem)
|
||||
for {
|
||||
s := auxIntToInt64(v.AuxInt)
|
||||
@ -13679,7 +13679,7 @@ func rewriteValuegeneric_OpMove(v *Value) bool {
|
||||
t2 := auxToType(midmem_0.Aux)
|
||||
src := midmem_0.Args[1]
|
||||
tmp2 := midmem_0.Args[0]
|
||||
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
|
||||
if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
|
||||
break
|
||||
}
|
||||
v.reset(OpMove)
|
||||
|
40
test/fixedbugs/issue43570.go
Normal file
40
test/fixedbugs/issue43570.go
Normal file
@ -0,0 +1,40 @@
|
||||
// run
|
||||
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
type T [8]*int
|
||||
|
||||
//go:noinline
|
||||
func f(x int) T {
|
||||
return T{}
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func g(x int, t T) {
|
||||
if t != (T{}) {
|
||||
panic(fmt.Sprintf("bad: %v", t))
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
const N = 10000
|
||||
var q T
|
||||
func() {
|
||||
for i := 0; i < N; i++ {
|
||||
q = f(0)
|
||||
g(0, q)
|
||||
sink = make([]byte, 1024)
|
||||
}
|
||||
}()
|
||||
// Note that the closure is a trick to get the write to q to be a
|
||||
// write to a pointer that is known to be non-nil and requires
|
||||
// a write barrier.
|
||||
}
|
||||
|
||||
var sink []byte
|
Loading…
Reference in New Issue
Block a user