1
0
mirror of https://github.com/golang/go synced 2024-11-27 01:31:21 -07:00
go/test/nosplit.go

426 lines
12 KiB
Go
Raw Normal View History

// +build !nacl,!js,!aix,!gcflags_noopt,gc
// run
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
const debug = false
var tests = `
# These are test cases for the linker analysis that detects chains of
# nosplit functions that would cause a stack overflow.
#
# Lines beginning with # are comments.
#
# Each test case describes a sequence of functions, one per line.
# Each function definition is the function name, then the frame size,
# then optionally the keyword 'nosplit', then the body of the function.
# The body is assembly code, with some shorthands.
# The shorthand 'call x' stands for CALL x(SB).
# The shorthand 'callind' stands for 'CALL R0', where R0 is a register.
# Each test case must define a function named start, and it must be first.
# That is, a line beginning "start " indicates the start of a new test case.
# Within a stanza, ; can be used instead of \n to separate lines.
#
# After the function definition, the test case ends with an optional
# REJECT line, specifying the architectures on which the case should
# be rejected. "REJECT" without any architectures means reject on all architectures.
# The linker should accept the test case on systems not explicitly rejected.
#
# 64-bit systems do not attempt to execute test cases with frame sizes
# that are only 32-bit aligned.
# Ordinary function should work
start 0
# Large frame marked nosplit is always wrong.
cmd/link: faster algorithm for nosplit stack checking, better errors The linker performs a global analysis of all nosplit call chains to check they fit in the stack space ensured by splittable functions. That analysis has two problems right now: 1. It's inefficient. It performs a top-down analysis, starting with every nosplit function and the nosplit stack limit and walking *down* the call graph to compute how much stack remains at every call. As a result, it visits the same functions over and over, often with different remaining stack depths. This approach is historical: this check was originally written in C and this approach avoided the need for any interesting data structures. 2. If some call chain is over the limit, it only reports a single call chain. As a result, if the check does fail, you often wind up playing whack-a-mole by guessing where the problem is in the one chain, trying to reduce the stack size, and then seeing if the link works or reports a different path. This CL completely rewrites the nosplit stack check. It now uses a bottom-up analysis, computing the maximum stack height required by every function's call tree. This visits every function exactly once, making it much more efficient. It uses slightly more heap space for intermediate storage, but still very little in the scheme of the overall link. For example, when linking cmd/go, the new algorithm virtually eliminates the time spent in this pass, and reduces overall link time: │ before │ after │ │ sec/op │ sec/op vs base │ Dostkcheck 7.926m ± 4% 1.831m ± 6% -76.90% (p=0.000 n=20) TotalTime 301.3m ± 1% 296.4m ± 3% -1.62% (p=0.040 n=20) │ before │ after │ │ B/op │ B/op vs base │ Dostkcheck 40.00Ki ± 0% 212.15Ki ± 0% +430.37% (p=0.000 n=20) Most of this time is spent analyzing the runtime, so for larger binaries, the total time saved is roughly the same, and proportionally less of the overall link. If the new implementation finds an error, it redoes the analysis, switching to preferring quality of error reporting over performance. For error reporting, it computes stack depths top-down (like the old algorithm), and reports *all* paths that are over the stack limit, presented as a tree for compactness. For example, this is the output from a simple test case from test/nosplit with two over-limit paths from f1: main.f1: nosplit stack overflow main.f1 grows 768 bytes, calls main.f2 grows 56 bytes, calls main.f4 grows 48 bytes 80 bytes over limit grows 768 bytes, calls main.f3 grows 104 bytes 80 bytes over limit While we're here, we do a few nice cleanups: - We add a debug output flag, which will be useful for understanding what our nosplit chains look like and which ones are close to running over. - We move the implementation out of the fog of lib.go to its own file. - The implementation is generally more Go-like and less C-like. Change-Id: If1ab31197f5215475559b93695c44a01bd16e276 Reviewed-on: https://go-review.googlesource.com/c/go/+/398176 Run-TryBot: Austin Clements <austin@google.com> Reviewed-by: Than McIntosh <thanm@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-04-01 13:51:12 -06:00
# Frame is so large it overflows cmd/link's int16.
start 100000 nosplit
REJECT
# Calling a large frame is okay.
start 0 call big
big 10000
# But not if the frame is nosplit.
start 0 call big
big 10000 nosplit
REJECT
# Recursion is okay.
start 0 call start
# Recursive nosplit runs out of space.
start 0 nosplit call start
REJECT
# Non-trivial recursion runs out of space.
start 0 call f1
f1 0 nosplit call f2
f2 0 nosplit call f1
REJECT
# Same but cycle starts below nosplit entry.
start 0 call f1
f1 0 nosplit call f2
f2 0 nosplit call f3
f3 0 nosplit call f2
REJECT
# Chains of ordinary functions okay.
start 0 call f1
f1 80 call f2
f2 80
# Chains of nosplit must fit in the stack limit, 128 bytes.
start 0 call f1
f1 80 nosplit call f2
f2 80 nosplit
REJECT
# Larger chains.
start 0 call f1
f1 16 call f2
f2 16 call f3
f3 16 call f4
f4 16 call f5
f5 16 call f6
f6 16 call f7
f7 16 call f8
f8 16 call end
end 1000
start 0 call f1
f1 16 nosplit call f2
f2 16 nosplit call f3
f3 16 nosplit call f4
f4 16 nosplit call f5
f5 16 nosplit call f6
f6 16 nosplit call f7
f7 16 nosplit call f8
f8 16 nosplit call end
end 1000
REJECT
# Two paths both go over the stack limit.
start 0 call f1
f1 80 nosplit call f2 call f3
f2 40 nosplit call f4
f3 96 nosplit
f4 40 nosplit
REJECT
# Test cases near the 128-byte limit.
# Ordinary stack split frame is always okay.
start 112
start 116
start 120
start 124
start 128
start 132
start 136
# A nosplit leaf can use the whole 128-CallSize bytes available on entry.
# (CallSize is 32 on ppc64, 8 on amd64 for frame pointer.)
start 96 nosplit
start 100 nosplit; REJECT ppc64 ppc64le
start 104 nosplit; REJECT ppc64 ppc64le arm64
start 108 nosplit; REJECT ppc64 ppc64le
start 112 nosplit; REJECT ppc64 ppc64le arm64
start 116 nosplit; REJECT ppc64 ppc64le
start 120 nosplit; REJECT ppc64 ppc64le amd64 arm64
start 124 nosplit; REJECT ppc64 ppc64le amd64
start 128 nosplit; REJECT
start 132 nosplit; REJECT
start 136 nosplit; REJECT
# Calling a nosplit function from a nosplit function requires
# having room for the saved caller PC and the called frame.
# Because ARM doesn't save LR in the leaf, it gets an extra 4 bytes.
# Because arm64 doesn't save LR in the leaf, it gets an extra 8 bytes.
# ppc64 doesn't save LR in the leaf, but CallSize is 32, so it gets 24 bytes.
# Because AMD64 uses frame pointer, it has 8 fewer bytes.
start 96 nosplit call f; f 0 nosplit
start 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
start 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le arm64
start 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
start 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 arm64
start 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
start 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 arm64
start 124 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 386
start 128 nosplit call f; f 0 nosplit; REJECT
start 132 nosplit call f; f 0 nosplit; REJECT
start 136 nosplit call f; f 0 nosplit; REJECT
# Calling a splitting function from a nosplit function requires
# having room for the saved caller PC of the call but also the
# saved caller PC for the call to morestack.
# Architectures differ in the same way as before.
start 96 nosplit call f; f 0 call f
start 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
start 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64
start 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
start 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64
start 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
start 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386 arm64
start 124 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
start 128 nosplit call f; f 0 call f; REJECT
start 132 nosplit call f; f 0 call f; REJECT
start 136 nosplit call f; f 0 call f; REJECT
# Indirect calls are assumed to be splitting functions.
start 96 nosplit callind
start 100 nosplit callind; REJECT ppc64 ppc64le
start 104 nosplit callind; REJECT ppc64 ppc64le amd64 arm64
start 108 nosplit callind; REJECT ppc64 ppc64le amd64
start 112 nosplit callind; REJECT ppc64 ppc64le amd64 arm64
start 116 nosplit callind; REJECT ppc64 ppc64le amd64
start 120 nosplit callind; REJECT ppc64 ppc64le amd64 386 arm64
start 124 nosplit callind; REJECT ppc64 ppc64le amd64 386
start 128 nosplit callind; REJECT
start 132 nosplit callind; REJECT
start 136 nosplit callind; REJECT
# Issue 7623
start 0 call f; f 112
start 0 call f; f 116
start 0 call f; f 120
start 0 call f; f 124
start 0 call f; f 128
start 0 call f; f 132
start 0 call f; f 136
`
var (
commentRE = regexp.MustCompile(`(?m)^#.*`)
rejectRE = regexp.MustCompile(`(?s)\A(.+?)((\n|; *)REJECT(.*))?\z`)
lineRE = regexp.MustCompile(`(\w+) (\d+)( nosplit)?(.*)`)
callRE = regexp.MustCompile(`\bcall (\w+)\b`)
callindRE = regexp.MustCompile(`\bcallind\b`)
)
func main() {
goarch := os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
dir, err := ioutil.TempDir("", "go-test-nosplit")
if err != nil {
bug()
fmt.Printf("creating temp dir: %v\n", err)
return
}
defer os.RemoveAll(dir)
os.Setenv("GOPATH", filepath.Join(dir, "_gopath"))
if err := ioutil.WriteFile(filepath.Join(dir, "go.mod"), []byte("module go-test-nosplit\n"), 0666); err != nil {
log.Panic(err)
}
tests = strings.Replace(tests, "\t", " ", -1)
tests = commentRE.ReplaceAllString(tests, "")
nok := 0
nfail := 0
TestCases:
for len(tests) > 0 {
var stanza string
i := strings.Index(tests, "\nstart ")
if i < 0 {
stanza, tests = tests, ""
} else {
stanza, tests = tests[:i], tests[i+1:]
}
m := rejectRE.FindStringSubmatch(stanza)
if m == nil {
bug()
fmt.Printf("invalid stanza:\n\t%s\n", indent(stanza))
continue
}
lines := strings.TrimSpace(m[1])
reject := false
if m[2] != "" {
if strings.TrimSpace(m[4]) == "" {
reject = true
} else {
for _, rej := range strings.Fields(m[4]) {
if rej == goarch {
reject = true
}
}
}
}
if lines == "" && !reject {
continue
}
var gobuf bytes.Buffer
fmt.Fprintf(&gobuf, "package main\n")
var buf bytes.Buffer
ptrSize := 4
switch goarch {
case "mips", "mipsle":
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "mips64", "mips64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "loong64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "ppc64", "ppc64le":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (CTR)\n")
case "arm":
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "arm64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER (R0)\n")
case "amd64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER AX\n")
case "riscv64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER A0\n")
case "s390x":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER R10\n")
default:
fmt.Fprintf(&buf, "#define REGISTER AX\n")
}
// Since all of the functions we're generating are
// ABI0, first enter ABI0 via a splittable function
// and then go to the chain we're testing. This way we
// don't have to account for ABI wrappers in the chain.
fmt.Fprintf(&gobuf, "func main0()\n")
fmt.Fprintf(&gobuf, "func main() { main0() }\n")
fmt.Fprintf(&buf, "TEXT ·main0(SB),0,$0-0\n\tCALL ·start(SB)\n")
adjusted := false
for _, line := range strings.Split(lines, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
for _, subline := range strings.Split(line, ";") {
subline = strings.TrimSpace(subline)
if subline == "" {
continue
}
m := lineRE.FindStringSubmatch(subline)
if m == nil {
bug()
fmt.Printf("invalid function line: %s\n", subline)
continue TestCases
}
name := m[1]
size, _ := strconv.Atoi(m[2])
if size%ptrSize == 4 {
continue TestCases
}
nosplit := m[3]
body := m[4]
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 18:34:47 -07:00
// The limit was originally 128 but is now 800 (928-128).
// Instead of rewriting the test cases above, adjust
// the first nosplit frame to use up the extra bytes.
// This isn't exactly right because we could have
// nosplit -> split -> nosplit, but it's good enough.
if !adjusted && nosplit != "" {
adjusted = true
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 18:34:47 -07:00
size += (928 - 128) - 128
// Noopt builds have a larger stackguard.
// See ../src/cmd/dist/buildruntime.go:stackGuardMultiplier
// This increase is included in objabi.StackGuard
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
runtime: static lock ranking for the runtime (enabled by GOEXPERIMENT) I took some of the infrastructure from Austin's lock logging CR https://go-review.googlesource.com/c/go/+/192704 (with deadlock detection from the logs), and developed a setup to give static lock ranking for runtime locks. Static lock ranking establishes a documented total ordering among locks, and then reports an error if the total order is violated. This can happen if a deadlock happens (by acquiring a sequence of locks in different orders), or if just one side of a possible deadlock happens. Lock ordering deadlocks cannot happen as long as the lock ordering is followed. Along the way, I found a deadlock involving the new timer code, which Ian fixed via https://go-review.googlesource.com/c/go/+/207348, as well as two other potential deadlocks. See the constants at the top of runtime/lockrank.go to show the static lock ranking that I ended up with, along with some comments. This is great documentation of the current intended lock ordering when acquiring multiple locks in the runtime. I also added an array lockPartialOrder[] which shows and enforces the current partial ordering among locks (which is embedded within the total ordering). This is more specific about the dependencies among locks. I don't try to check the ranking within a lock class with multiple locks that can be acquired at the same time (i.e. check the ranking when multiple hchan locks are acquired). Currently, I am doing a lockInit() call to set the lock rank of most locks. Any lock that is not otherwise initialized is assumed to be a leaf lock (a very high rank lock), so that eliminates the need to do anything for a bunch of locks (including all architecture-dependent locks). For two locks, root.lock and notifyList.lock (only in the runtime/sema.go file), it is not as easy to do lock initialization, so instead, I am passing the lock rank with the lock calls. For Windows compilation, I needed to increase the StackGuard size from 896 to 928 because of the new lock-rank checking functions. Checking of the static lock ranking is enabled by setting GOEXPERIMENT=staticlockranking before doing a run. To make sure that the static lock ranking code has no overhead in memory or CPU when not enabled by GOEXPERIMENT, I changed 'go build/install' so that it defines a build tag (with the same name) whenever any experiment has been baked into the toolchain (by checking Expstring()). This allows me to avoid increasing the size of the 'mutex' type when static lock ranking is not enabled. Fixes #38029 Change-Id: I154217ff307c47051f8dae9c2a03b53081acd83a Reviewed-on: https://go-review.googlesource.com/c/go/+/207619 Reviewed-by: Dan Scales <danscales@google.com> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Dan Scales <danscales@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-11-13 18:34:47 -07:00
size += 928
}
}
}
if nosplit != "" {
nosplit = ",7"
} else {
nosplit = ",0"
}
body = callRE.ReplaceAllString(body, "CALL ·$1(SB);")
body = callindRE.ReplaceAllString(body, "CALL REGISTER;")
fmt.Fprintf(&gobuf, "func %s()\n", name)
fmt.Fprintf(&buf, "TEXT ·%s(SB)%s,$%d-0\n\t%s\n\tRET\n\n", name, nosplit, size, body)
}
}
if debug {
fmt.Printf("===\n%s\n", strings.TrimSpace(stanza))
fmt.Printf("-- main.go --\n%s", gobuf.String())
fmt.Printf("-- asm.s --\n%s", buf.String())
}
if err := ioutil.WriteFile(filepath.Join(dir, "asm.s"), buf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), gobuf.Bytes(), 0666); err != nil {
log.Fatal(err)
}
cmd := exec.Command("go", "build")
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err == nil {
nok++
if reject {
bug()
fmt.Printf("accepted incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
}
} else {
nfail++
if !reject {
bug()
fmt.Printf("rejected incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
fmt.Printf("\n\tlinker output:\n\t%s\n", indent(string(output)))
}
}
}
if !bugged && (nok == 0 || nfail == 0) {
bug()
fmt.Printf("not enough test cases run\n")
}
}
func indent(s string) string {
return strings.Replace(s, "\n", "\n\t", -1)
}
var bugged = false
func bug() {
if !bugged {
bugged = true
fmt.Printf("BUG\n")
}
}