1
0
mirror of https://github.com/golang/go synced 2024-11-15 00:50:32 -07:00
go/test/live_regabi.go
Russ Cox c29444ef39 math/rand, math/rand/v2: use ChaCha8 for global rand
Move ChaCha8 code into internal/chacha8rand and use it to implement
runtime.rand, which is used for the unseeded global source for
both math/rand and math/rand/v2. This also affects the calculation of
the start point for iteration over very very large maps (when the
32-bit fastrand is not big enough).

The benefit is that misuse of the global random number generators
in math/rand and math/rand/v2 in contexts where non-predictable
randomness is important for security reasons is no longer a
security problem, removing a common mistake among programmers
who are unaware of the different kinds of randomness.

The cost is an extra 304 bytes per thread stored in the m struct
plus 2-3ns more per random uint64 due to the more sophisticated
algorithm. Using PCG looks like it would cost about the same,
although I haven't benchmarked that.

Before this, the math/rand and math/rand/v2 global generator
was wyrand (https://github.com/wangyi-fudan/wyhash).
For math/rand, using wyrand instead of the Mitchell/Reeds/Thompson
ALFG was justifiable, since the latter was not any better.
But for math/rand/v2, the global generator really should be
at least as good as one of the well-studied, specific algorithms
provided directly by the package, and it's not.

(Wyrand is still reasonable for scheduling and cache decisions.)

Good randomness does have a cost: about twice wyrand.

Also rationalize the various runtime rand references.

goos: linux
goarch: amd64
pkg: math/rand/v2
cpu: AMD Ryzen 9 7950X 16-Core Processor
                        │ bbb48afeb7.amd64 │           5cf807d1ea.amd64           │
                        │      sec/op      │    sec/op     vs base                │
ChaCha8-32                     1.862n ± 2%    1.861n ± 2%        ~ (p=0.825 n=20)
PCG_DXSM-32                    1.471n ± 1%    1.460n ± 2%        ~ (p=0.153 n=20)
SourceUint64-32                1.636n ± 2%    1.582n ± 1%   -3.30% (p=0.000 n=20)
GlobalInt64-32                 2.087n ± 1%    3.663n ± 1%  +75.54% (p=0.000 n=20)
GlobalInt64Parallel-32        0.1042n ± 1%   0.2026n ± 1%  +94.48% (p=0.000 n=20)
GlobalUint64-32                2.263n ± 2%    3.724n ± 1%  +64.57% (p=0.000 n=20)
GlobalUint64Parallel-32       0.1019n ± 1%   0.1973n ± 1%  +93.67% (p=0.000 n=20)
Int64-32                       1.771n ± 1%    1.774n ± 1%        ~ (p=0.449 n=20)
Uint64-32                      1.863n ± 2%    1.866n ± 1%        ~ (p=0.364 n=20)
GlobalIntN1000-32              3.134n ± 3%    4.730n ± 2%  +50.95% (p=0.000 n=20)
IntN1000-32                    2.489n ± 1%    2.489n ± 1%        ~ (p=0.683 n=20)
Int64N1000-32                  2.521n ± 1%    2.516n ± 1%        ~ (p=0.394 n=20)
Int64N1e8-32                   2.479n ± 1%    2.478n ± 2%        ~ (p=0.743 n=20)
Int64N1e9-32                   2.530n ± 2%    2.514n ± 2%        ~ (p=0.193 n=20)
Int64N2e9-32                   2.501n ± 1%    2.494n ± 1%        ~ (p=0.616 n=20)
Int64N1e18-32                  3.227n ± 1%    3.205n ± 1%        ~ (p=0.101 n=20)
Int64N2e18-32                  3.647n ± 1%    3.599n ± 1%        ~ (p=0.019 n=20)
Int64N4e18-32                  5.135n ± 1%    5.069n ± 2%        ~ (p=0.034 n=20)
Int32N1000-32                  2.657n ± 1%    2.637n ± 1%        ~ (p=0.180 n=20)
Int32N1e8-32                   2.636n ± 1%    2.636n ± 1%        ~ (p=0.763 n=20)
Int32N1e9-32                   2.660n ± 2%    2.638n ± 1%        ~ (p=0.358 n=20)
Int32N2e9-32                   2.662n ± 2%    2.618n ± 2%        ~ (p=0.064 n=20)
Float32-32                     2.272n ± 2%    2.239n ± 2%        ~ (p=0.194 n=20)
Float64-32                     2.272n ± 1%    2.286n ± 2%        ~ (p=0.763 n=20)
ExpFloat64-32                  3.762n ± 1%    3.744n ± 1%        ~ (p=0.171 n=20)
NormFloat64-32                 3.706n ± 1%    3.655n ± 2%        ~ (p=0.066 n=20)
Perm3-32                       32.93n ± 3%    34.62n ± 1%   +5.13% (p=0.000 n=20)
Perm30-32                      202.9n ± 1%    204.0n ± 1%        ~ (p=0.482 n=20)
Perm30ViaShuffle-32            115.0n ± 1%    114.9n ± 1%        ~ (p=0.358 n=20)
ShuffleOverhead-32             112.8n ± 1%    112.7n ± 1%        ~ (p=0.692 n=20)
Concurrent-32                  2.107n ± 0%    3.725n ± 1%  +76.75% (p=0.000 n=20)

goos: darwin
goarch: arm64
pkg: math/rand/v2
                       │ bbb48afeb7.arm64 │           5cf807d1ea.arm64            │
                       │      sec/op      │    sec/op     vs base                 │
ChaCha8-8                     2.480n ± 0%    2.429n ± 0%    -2.04% (p=0.000 n=20)
PCG_DXSM-8                    2.531n ± 0%    2.530n ± 0%         ~ (p=0.877 n=20)
SourceUint64-8                2.534n ± 0%    2.533n ± 0%         ~ (p=0.732 n=20)
GlobalInt64-8                 2.172n ± 1%    4.794n ± 0%  +120.67% (p=0.000 n=20)
GlobalInt64Parallel-8        0.4320n ± 0%   0.9605n ± 0%  +122.32% (p=0.000 n=20)
GlobalUint64-8                2.182n ± 0%    4.770n ± 0%  +118.58% (p=0.000 n=20)
GlobalUint64Parallel-8       0.4307n ± 0%   0.9583n ± 0%  +122.51% (p=0.000 n=20)
Int64-8                       4.107n ± 0%    4.104n ± 0%         ~ (p=0.416 n=20)
Uint64-8                      4.080n ± 0%    4.080n ± 0%         ~ (p=0.052 n=20)
GlobalIntN1000-8              2.814n ± 2%    5.643n ± 0%  +100.50% (p=0.000 n=20)
IntN1000-8                    4.141n ± 0%    4.139n ± 0%         ~ (p=0.140 n=20)
Int64N1000-8                  4.140n ± 0%    4.140n ± 0%         ~ (p=0.313 n=20)
Int64N1e8-8                   4.140n ± 0%    4.139n ± 0%         ~ (p=0.103 n=20)
Int64N1e9-8                   4.139n ± 0%    4.140n ± 0%         ~ (p=0.761 n=20)
Int64N2e9-8                   4.140n ± 0%    4.140n ± 0%         ~ (p=0.636 n=20)
Int64N1e18-8                  5.266n ± 0%    5.326n ± 1%    +1.14% (p=0.001 n=20)
Int64N2e18-8                  6.052n ± 0%    6.167n ± 0%    +1.90% (p=0.000 n=20)
Int64N4e18-8                  8.826n ± 0%    9.051n ± 0%    +2.55% (p=0.000 n=20)
Int32N1000-8                  4.127n ± 0%    4.132n ± 0%    +0.12% (p=0.000 n=20)
Int32N1e8-8                   4.126n ± 0%    4.131n ± 0%    +0.12% (p=0.000 n=20)
Int32N1e9-8                   4.127n ± 0%    4.132n ± 0%    +0.12% (p=0.000 n=20)
Int32N2e9-8                   4.132n ± 0%    4.131n ± 0%         ~ (p=0.017 n=20)
Float32-8                     4.109n ± 0%    4.105n ± 0%         ~ (p=0.379 n=20)
Float64-8                     4.107n ± 0%    4.106n ± 0%         ~ (p=0.867 n=20)
ExpFloat64-8                  5.339n ± 0%    5.383n ± 0%    +0.82% (p=0.000 n=20)
NormFloat64-8                 5.735n ± 0%    5.737n ± 1%         ~ (p=0.856 n=20)
Perm3-8                       26.65n ± 0%    26.80n ± 1%    +0.58% (p=0.000 n=20)
Perm30-8                      194.8n ± 1%    197.0n ± 0%    +1.18% (p=0.000 n=20)
Perm30ViaShuffle-8            156.6n ± 0%    157.6n ± 1%    +0.61% (p=0.000 n=20)
ShuffleOverhead-8             124.9n ± 0%    125.5n ± 0%    +0.52% (p=0.000 n=20)
Concurrent-8                  2.434n ± 3%    5.066n ± 0%  +108.09% (p=0.000 n=20)

goos: linux
goarch: 386
pkg: math/rand/v2
cpu: AMD Ryzen 9 7950X 16-Core Processor
                        │ bbb48afeb7.386 │            5cf807d1ea.386             │
                        │     sec/op     │    sec/op     vs base                 │
ChaCha8-32                  11.295n ± 1%    4.748n ± 2%   -57.96% (p=0.000 n=20)
PCG_DXSM-32                  7.693n ± 1%    7.738n ± 2%         ~ (p=0.542 n=20)
SourceUint64-32              7.658n ± 2%    7.622n ± 2%         ~ (p=0.344 n=20)
GlobalInt64-32               3.473n ± 2%    7.526n ± 2%  +116.73% (p=0.000 n=20)
GlobalInt64Parallel-32      0.3198n ± 0%   0.5444n ± 0%   +70.22% (p=0.000 n=20)
GlobalUint64-32              3.612n ± 0%    7.575n ± 1%  +109.69% (p=0.000 n=20)
GlobalUint64Parallel-32     0.3168n ± 0%   0.5403n ± 0%   +70.51% (p=0.000 n=20)
Int64-32                     7.673n ± 2%    7.789n ± 1%         ~ (p=0.122 n=20)
Uint64-32                    7.773n ± 1%    7.827n ± 2%         ~ (p=0.920 n=20)
GlobalIntN1000-32            6.268n ± 1%    9.581n ± 1%   +52.87% (p=0.000 n=20)
IntN1000-32                  10.33n ± 2%    10.45n ± 1%         ~ (p=0.233 n=20)
Int64N1000-32                10.98n ± 2%    11.01n ± 1%         ~ (p=0.401 n=20)
Int64N1e8-32                 11.19n ± 2%    10.97n ± 1%         ~ (p=0.033 n=20)
Int64N1e9-32                 11.06n ± 1%    11.08n ± 1%         ~ (p=0.498 n=20)
Int64N2e9-32                 11.10n ± 1%    11.01n ± 2%         ~ (p=0.995 n=20)
Int64N1e18-32                15.23n ± 2%    15.04n ± 1%         ~ (p=0.973 n=20)
Int64N2e18-32                15.89n ± 1%    15.85n ± 1%         ~ (p=0.409 n=20)
Int64N4e18-32                18.96n ± 2%    19.34n ± 2%         ~ (p=0.048 n=20)
Int32N1000-32                10.46n ± 2%    10.44n ± 2%         ~ (p=0.480 n=20)
Int32N1e8-32                 10.46n ± 2%    10.49n ± 2%         ~ (p=0.951 n=20)
Int32N1e9-32                 10.28n ± 2%    10.26n ± 1%         ~ (p=0.431 n=20)
Int32N2e9-32                 10.50n ± 2%    10.44n ± 2%         ~ (p=0.249 n=20)
Float32-32                   13.80n ± 2%    13.80n ± 2%         ~ (p=0.751 n=20)
Float64-32                   23.55n ± 2%    23.87n ± 0%         ~ (p=0.408 n=20)
ExpFloat64-32                15.36n ± 1%    15.29n ± 2%         ~ (p=0.316 n=20)
NormFloat64-32               13.57n ± 1%    13.79n ± 1%    +1.66% (p=0.005 n=20)
Perm3-32                     45.70n ± 2%    46.99n ± 2%    +2.81% (p=0.001 n=20)
Perm30-32                    399.0n ± 1%    403.8n ± 1%    +1.19% (p=0.006 n=20)
Perm30ViaShuffle-32          349.0n ± 1%    350.4n ± 1%         ~ (p=0.909 n=20)
ShuffleOverhead-32           322.3n ± 1%    323.8n ± 1%         ~ (p=0.410 n=20)
Concurrent-32                3.331n ± 1%    7.312n ± 1%  +119.50% (p=0.000 n=20)

For #61716.

Change-Id: Ibdddeed85c34d9ae397289dc899e04d4845f9ed2
Reviewed-on: https://go-review.googlesource.com/c/go/+/516860
Reviewed-by: Michael Pratt <mpratt@google.com>
Reviewed-by: Filippo Valsorda <filippo@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
2023-12-05 20:34:30 +00:00

743 lines
18 KiB
Go

// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
//go:build (amd64 && goexperiment.regabiargs) || (arm64 && goexperiment.regabiargs)
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// liveness tests with inlining disabled.
// see also live2.go.
package main
import "runtime"
func printnl()
//go:noescape
func printpointer(**int)
//go:noescape
func printintpointer(*int)
//go:noescape
func printstringpointer(*string)
//go:noescape
func printstring(string)
//go:noescape
func printbytepointer(*byte)
func printint(int)
func f1() {
var x *int // ERROR "stack object x \*int$"
printpointer(&x) // ERROR "live at call to printpointer: x$"
printpointer(&x)
}
func f2(b bool) {
if b {
printint(0) // nothing live here
return
}
var x *int // ERROR "stack object x \*int$"
printpointer(&x) // ERROR "live at call to printpointer: x$"
printpointer(&x)
}
func f3(b1, b2 bool) {
// Here x and y are ambiguously live. In previous go versions they
// were marked as live throughout the function to avoid being
// poisoned in GODEBUG=gcdead=1 mode; this is now no longer the
// case.
printint(0)
if b1 == false {
printint(0)
return
}
if b2 {
var x *int // ERROR "stack object x \*int$"
printpointer(&x) // ERROR "live at call to printpointer: x$"
printpointer(&x)
} else {
var y *int // ERROR "stack object y \*int$"
printpointer(&y) // ERROR "live at call to printpointer: y$"
printpointer(&y)
}
printint(0) // nothing is live here
}
// The old algorithm treated x as live on all code that
// could flow to a return statement, so it included the
// function entry and code above the declaration of x
// but would not include an indirect use of x in an infinite loop.
// Check that these cases are handled correctly.
func f4(b1, b2 bool) { // x not live here
if b2 {
printint(0) // x not live here
return
}
var z **int
x := new(int) // ERROR "stack object x \*int$"
*x = 42
z = &x
printint(**z) // ERROR "live at call to printint: x$"
if b2 {
printint(1) // x not live here
return
}
for {
printint(**z) // ERROR "live at call to printint: x$"
}
}
func f5(b1 bool) {
var z **int
if b1 {
x := new(int) // ERROR "stack object x \*int$"
*x = 42
z = &x
} else {
y := new(int) // ERROR "stack object y \*int$"
*y = 54
z = &y
}
printint(**z) // nothing live here
}
// confusion about the _ result used to cause spurious "live at entry to f6: _".
func f6() (_, y string) {
y = "hello"
return
}
// confusion about addressed results used to cause "live at entry to f7: x".
func f7() (x string) { // ERROR "stack object x string"
_ = &x
x = "hello"
return
}
// ignoring block returns used to cause "live at entry to f8: x, y".
func f8() (x, y string) {
return g8()
}
func g8() (string, string)
// ignoring block assignments used to cause "live at entry to f9: x"
// issue 7205
var i9 interface{}
func f9() bool {
g8()
x := i9
y := interface{}(g18()) // ERROR "live at call to convT: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$"
i9 = y // make y escape so the line above has to call convT
return x != y
}
// liveness formerly confused by UNDEF followed by RET,
// leading to "live at entry to f10: ~r1" (unnamed result).
func f10() string {
panic(1)
}
// liveness formerly confused by select, thinking runtime.selectgo
// can return to next instruction; it always jumps elsewhere.
// note that you have to use at least two cases in the select
// to get a true select; smaller selects compile to optimized helper functions.
var c chan *int
var b bool
// this used to have a spurious "live at entry to f11a: ~r0"
func f11a() *int {
select { // ERROR "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
case <-c:
return nil
case <-c:
return nil
}
}
func f11b() *int {
p := new(int)
if b {
// At this point p is dead: the code here cannot
// get to the bottom of the function.
// This used to have a spurious "live at call to printint: p".
printint(1) // nothing live here!
select { // ERROR "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
case <-c:
return nil
case <-c:
return nil
}
}
println(*p)
return nil
}
var sink *int
func f11c() *int {
p := new(int)
sink = p // prevent stack allocation, otherwise p is rematerializeable
if b {
// Unlike previous, the cases in this select fall through,
// so we can get to the println, so p is not dead.
printint(1) // ERROR "live at call to printint: p$"
select { // ERROR "live at call to selectgo: p$" "stack object .autotmp_[0-9]+ \[2\]runtime.scase$"
case <-c:
case <-c:
}
}
println(*p)
return nil
}
// similarly, select{} does not fall through.
// this used to have a spurious "live at entry to f12: ~r0".
func f12() *int {
if b {
select {}
} else {
return nil
}
}
// incorrectly placed VARDEF annotations can cause missing liveness annotations.
// this used to be missing the fact that s is live during the call to g13 (because it is
// needed for the call to h13).
func f13() {
s := g14()
s = h13(s, g13(s)) // ERROR "live at call to g13: s.ptr$"
}
func g13(string) string
func h13(string, string) string
// more incorrectly placed VARDEF.
func f14() {
x := g14() // ERROR "stack object x string$"
printstringpointer(&x)
}
func g14() string
// Checking that various temporaries do not persist or cause
// ambiguously live values that must be zeroed.
// The exact temporary names are inconsequential but we are
// trying to check that there is only one at any given site,
// and also that none show up in "ambiguously live" messages.
var m map[string]int
var mi map[interface{}]int
// str and iface are used to ensure that a temp is required for runtime calls below.
func str() string
func iface() interface{}
func f16() {
if b {
delete(mi, iface()) // ERROR "stack object .autotmp_[0-9]+ interface \{\}$"
}
delete(mi, iface())
delete(mi, iface())
}
var m2s map[string]*byte
var m2 map[[2]string]*byte
var x2 [2]string
var bp *byte
func f17a(p *byte) { // ERROR "live at entry to f17a: p$"
if b {
m2[x2] = p // ERROR "live at call to mapassign: p$"
}
m2[x2] = p // ERROR "live at call to mapassign: p$"
m2[x2] = p // ERROR "live at call to mapassign: p$"
}
func f17b(p *byte) { // ERROR "live at entry to f17b: p$"
// key temporary
if b {
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
}
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
m2s[str()] = p // ERROR "live at call to mapassign_faststr: p$" "live at call to str: p$"
}
func f17c() {
// key and value temporaries
if b {
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
}
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
m2s[str()] = f17d() // ERROR "live at call to f17d: .autotmp_[0-9]+$" "live at call to mapassign_faststr: .autotmp_[0-9]+$"
}
func f17d() *byte
func g18() [2]string
func f18() {
// key temporary for mapaccess.
// temporary introduced by orderexpr.
var z *byte
if b {
z = m2[g18()] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
z = m2[g18()]
z = m2[g18()]
printbytepointer(z)
}
var ch chan *byte
// byteptr is used to ensure that a temp is required for runtime calls below.
func byteptr() *byte
func f19() {
// dest temporary for channel receive.
var z *byte
if b {
z = <-ch // ERROR "stack object .autotmp_[0-9]+ \*byte$"
}
z = <-ch
z = <-ch // ERROR "live at call to chanrecv1: .autotmp_[0-9]+$"
printbytepointer(z)
}
func f20() {
// src temporary for channel send
if b {
ch <- byteptr() // ERROR "stack object .autotmp_[0-9]+ \*byte$"
}
ch <- byteptr()
ch <- byteptr()
}
func f21() {
// key temporary for mapaccess using array literal key.
var z *byte
if b {
z = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
z = m2[[2]string{"x", "y"}]
z = m2[[2]string{"x", "y"}]
printbytepointer(z)
}
func f23() {
// key temporary for two-result map access using array literal key.
var z *byte
var ok bool
if b {
z, ok = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
z, ok = m2[[2]string{"x", "y"}]
z, ok = m2[[2]string{"x", "y"}]
printbytepointer(z)
print(ok)
}
func f24() {
// key temporary for map access using array literal key.
// value temporary too.
if b {
m2[[2]string{"x", "y"}] = nil // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
m2[[2]string{"x", "y"}] = nil
m2[[2]string{"x", "y"}] = nil
}
// Non-open-coded defers should not cause autotmps. (Open-coded defers do create extra autotmps).
func f25(b bool) {
for i := 0; i < 2; i++ {
// Put in loop to make sure defer is not open-coded
defer g25()
}
if b {
return
}
var x string
x = g14()
printstring(x)
return
}
func g25()
// non-escaping ... slices passed to function call should die on return,
// so that the temporaries do not stack and do not cause ambiguously
// live variables.
func f26(b bool) {
if b {
print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "stack object .autotmp_[0-9]+ \[3\]interface \{\}$"
}
print26((*int)(nil), (*int)(nil), (*int)(nil))
print26((*int)(nil), (*int)(nil), (*int)(nil))
printnl()
}
//go:noescape
func print26(...interface{})
// non-escaping closures passed to function call should die on return
func f27(b bool) {
x := 0
if b {
call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
}
call27(func() { x++ })
call27(func() { x++ })
printnl()
}
// but defer does escape to later execution in the function
func f27defer(b bool) {
x := 0
if b {
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
}
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+"
return // ERROR "live at indirect call: .autotmp_[0-9]+"
}
// and newproc (go) escapes to the heap
func f27go(b bool) {
x := 0
if b {
go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go
}
go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go
printnl()
}
//go:noescape
func call27(func())
// concatstring slice should die on return
var s1, s2, s3, s4, s5, s6, s7, s8, s9, s10 string
func f28(b bool) {
if b {
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "stack object .autotmp_[0-9]+ \[10\]string$"
}
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10)
printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10)
}
// map iterator should die on end of range loop
func f29(b bool) {
if b {
for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hiter$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
}
for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$"
printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$"
}
}
// copy of array of pointers should die at end of range loop
var pstructarr [10]pstruct
// Struct size chosen to make pointer to element in pstructarr
// not computable by strength reduction.
type pstruct struct {
intp *int
_ [8]byte
}
func f30(b bool) {
// live temp during printintpointer(p):
// the internal iterator pointer if a pointer to pstruct in pstructarr
// can not be easily computed by strength reduction.
if b {
for _, p := range pstructarr { // ERROR "stack object .autotmp_[0-9]+ \[10\]pstruct$"
printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$"
}
}
for _, p := range pstructarr {
printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$"
}
for _, p := range pstructarr {
printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$"
}
}
// conversion to interface should not leave temporary behind
func f31(b1, b2, b3 bool) {
if b1 {
g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$"
}
if b2 {
h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$"
}
if b3 {
panic(g18())
}
print(b3)
}
func g31(interface{})
func h31(...interface{})
// non-escaping partial functions passed to function call should die on return
type T32 int
func (t *T32) Inc() { // ERROR "live at entry to \(\*T32\).Inc: t$"
*t++
}
var t32 T32
func f32(b bool) {
if b {
call32(t32.Inc) // ERROR "stack object .autotmp_[0-9]+ struct \{"
}
call32(t32.Inc)
call32(t32.Inc)
}
//go:noescape
func call32(func())
// temporaries introduced during if conditions and && || expressions
// should die once the condition has been acted upon.
var m33 map[interface{}]int
func f33() {
if m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}$"
printnl()
return
} else {
printnl()
}
printnl()
}
func f34() {
if m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}$"
printnl()
return
}
printnl()
}
func f35() {
if m33[byteptr()] == 0 && // ERROR "stack object .autotmp_[0-9]+ interface \{\}"
m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}"
printnl()
return
}
printnl()
}
func f36() {
if m33[byteptr()] == 0 || // ERROR "stack object .autotmp_[0-9]+ interface \{\}"
m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}"
printnl()
return
}
printnl()
}
func f37() {
if (m33[byteptr()] == 0 || // ERROR "stack object .autotmp_[0-9]+ interface \{\}"
m33[byteptr()] == 0) && // ERROR "stack object .autotmp_[0-9]+ interface \{\}"
m33[byteptr()] == 0 {
printnl()
return
}
printnl()
}
// select temps should disappear in the case bodies
var c38 chan string
func fc38() chan string
func fi38(int) *string
func fb38() *bool
func f38(b bool) {
// we don't care what temps are printed on the lines with output.
// we care that the println lines have no live variables
// and therefore no output.
if b {
select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ \[4\]runtime.scase$"
case <-fc38():
printnl()
case fc38() <- *fi38(1): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$"
printnl()
case *fi38(2) = <-fc38(): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$"
printnl()
case *fi38(3), *fb38() = <-fc38(): // ERROR "stack object .autotmp_[0-9]+ string$" "live at call to f[ibc]38:( .autotmp_[0-9]+)+$"
printnl()
}
printnl()
}
printnl()
}
// issue 8097: mishandling of x = x during return.
func f39() (x []int) {
x = []int{1}
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+$"
return x
}
func f39a() (x []int) {
x = []int{1}
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+$"
return
}
func f39b() (x [10]*int) {
x = [10]*int{}
x[0] = new(int) // ERROR "live at call to newobject: x$"
printnl() // ERROR "live at call to printnl: x$"
return x
}
func f39c() (x [10]*int) {
x = [10]*int{}
x[0] = new(int) // ERROR "live at call to newobject: x$"
printnl() // ERROR "live at call to printnl: x$"
return
}
// issue 8142: lost 'addrtaken' bit on inlined variables.
// no inlining in this test, so just checking that non-inlined works.
type T40 struct {
m map[int]int
}
//go:noescape
func useT40(*T40)
func newT40() *T40 {
ret := T40{}
ret.m = make(map[int]int, 42) // ERROR "live at call to makemap: &ret$"
return &ret
}
func bad40() {
t := newT40()
_ = t
printnl()
}
func good40() {
ret := T40{} // ERROR "stack object ret T40$"
ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$"
t := &ret
printnl() // ERROR "live at call to printnl: ret$"
// Note: ret is live at the printnl because the compiler moves &ret
// from before the printnl to after.
useT40(t)
}
func ddd1(x, y *int) { // ERROR "live at entry to ddd1: x y$"
ddd2(x, y) // ERROR "stack object .autotmp_[0-9]+ \[2\]\*int$"
printnl()
// Note: no .?autotmp live at printnl. See issue 16996.
}
func ddd2(a ...*int) { // ERROR "live at entry to ddd2: a$"
sink = a[0]
}
// issue 16016: autogenerated wrapper should have arguments live
type T struct{}
func (*T) Foo(ptr *int) {}
type R struct{ *T }
// issue 18860: output arguments must be live all the time if there is a defer.
// In particular, at printint r must be live.
func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$"
r = p
defer func() {
recover()
}()
printint(0) // ERROR "live at call to printint: .autotmp_[0-9]+ q r$"
r = q
return // ERROR "live at call to f41.func1: .autotmp_[0-9]+ r$"
}
func f42() {
var p, q, r int
f43([]*int{&p, &q, &r}) // ERROR "stack object .autotmp_[0-9]+ \[3\]\*int$"
f43([]*int{&p, &r, &q})
f43([]*int{&q, &p, &r})
}
//go:noescape
func f43(a []*int)
// Assigning to a sub-element that makes up an entire local variable
// should clobber that variable.
func f44(f func() [2]*int) interface{} { // ERROR "live at entry to f44: f"
type T struct {
s [1][2]*int
}
ret := T{} // ERROR "stack object ret T"
ret.s[0] = f()
return ret
}
func f45(a, b, c, d, e, f, g, h, i, j, k, l *byte) { // ERROR "live at entry to f45: a b c d e f g h i j k l"
f46(a, b, c, d, e, f, g, h, i, j, k, l) // ERROR "live at call to f46: a b c d e f g h i j k l"
runtime.KeepAlive(a)
runtime.KeepAlive(b)
runtime.KeepAlive(c)
runtime.KeepAlive(d)
runtime.KeepAlive(e)
runtime.KeepAlive(f)
runtime.KeepAlive(g)
runtime.KeepAlive(h)
runtime.KeepAlive(i)
runtime.KeepAlive(j)
runtime.KeepAlive(k)
runtime.KeepAlive(l)
}
//go:noinline
func f46(a, b, c, d, e, f, g, h, i, j, k, l *byte) {
}