mirror of
https://github.com/golang/go
synced 2024-11-14 07:50:21 -07:00
c2c1822b12
A new pass run after ssa building (before any other optimization) identifies the "first" ssa node for each statement. Other "noise" nodes are tagged as being never appropriate for a statement boundary (e.g., VarKill, VarDef, Phi). Rewrite, deadcode, cse, and nilcheck are modified to move the statement boundaries forward whenever possible if a boundary-tagged ssa value is removed; never-boundary nodes are ignored in this search (some operations involving constants are also tagged as never-boundary and also ignored because they are likely to be moved or removed during optimization). Code generation treats all nodes except those explicitly marked as statement boundaries as "not statement" nodes, and floats statement boundaries to the beginning of each same-line run of instructions found within a basic block. Line number html conversion was modified to make statement boundary nodes a bit more obvious by prepending a "+". The code in fuse.go that glued together the value slices of two blocks produced a result that depended on the former capacities (not lengths) of the two slices. This causes differences in the 386 bootstrap, and also can sometimes put values into an order that does a worse job of preserving statement boundaries when values are removed. Portions of two delve tests that had caught problems were incorporated into ssa/debug_test.go. There are some opportunities to do better with optimized code, but the next-ing is not lying or overly jumpy. Over 4 CLs, compilebench geomean measured binary size increase of 3.5% and compile user time increase of 3.8% (this is after optimization to reuse a sparse map instead of creating multiple maps.) This CL worsens the optimized-debugging experience with Delve; we need to work with the delve team so that they can use the is_stmt marks that we're emitting now. The reference output changes from time to time depending on other changes in the compiler, sometimes better, sometimes worse. This CL now includes a test ensuring that 99+% of the lines in the Go command itself (a handy optimized binary) include is_stmt markers. Change-Id: I359c94e06843f1eb41f9da437bd614885aa9644a Reviewed-on: https://go-review.googlesource.com/102435 Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
96 lines
2.3 KiB
Go
96 lines
2.3 KiB
Go
// asmcheck
|
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package codegen
|
|
|
|
import "runtime"
|
|
|
|
// This file contains code generation tests related to the use of the
|
|
// stack.
|
|
|
|
// Check that stack stores are optimized away.
|
|
|
|
// 386:"TEXT\t.*, [$]0-"
|
|
// amd64:"TEXT\t.*, [$]0-"
|
|
// arm:"TEXT\t.*, [$]-4-"
|
|
// arm64:"TEXT\t.*, [$]-8-"
|
|
// mips:"TEXT\t.*, [$]-4-"
|
|
// ppc64le:"TEXT\t.*, [$]0-"
|
|
// s390x:"TEXT\t.*, [$]0-"
|
|
func StackStore() int {
|
|
var x int
|
|
return *(&x)
|
|
}
|
|
|
|
type T struct {
|
|
A, B, C, D int // keep exported fields
|
|
x, y, z int // reset unexported fields
|
|
}
|
|
|
|
// Check that large structs are cleared directly (issue #24416).
|
|
|
|
// 386:"TEXT\t.*, [$]0-"
|
|
// amd64:"TEXT\t.*, [$]0-"
|
|
// arm:"TEXT\t.*, [$]0-" (spills return address)
|
|
// arm64:"TEXT\t.*, [$]-8-"
|
|
// mips:"TEXT\t.*, [$]-4-"
|
|
// ppc64le:"TEXT\t.*, [$]0-"
|
|
// s390x:"TEXT\t.*, [$]0-"
|
|
func ZeroLargeStruct(x *T) {
|
|
t := T{}
|
|
*x = t
|
|
}
|
|
|
|
// Check that structs are partially initialised directly (issue #24386).
|
|
|
|
// Notes:
|
|
// - 386 fails due to spilling a register
|
|
// amd64:"TEXT\t.*, [$]0-"
|
|
// arm:"TEXT\t.*, [$]0-" (spills return address)
|
|
// arm64:"TEXT\t.*, [$]-8-"
|
|
// ppc64le:"TEXT\t.*, [$]0-"
|
|
// s390x:"TEXT\t.*, [$]0-"
|
|
// Note: that 386 currently has to spill a register.
|
|
func KeepWanted(t *T) {
|
|
*t = T{A: t.A, B: t.B, C: t.C, D: t.D}
|
|
}
|
|
|
|
// Check that small array operations avoid using the stack (issue #15925).
|
|
|
|
// Notes:
|
|
// - 386 fails due to spilling a register
|
|
// - arm & mips fail due to softfloat calls
|
|
// amd64:"TEXT\t.*, [$]0-"
|
|
// arm64:"TEXT\t.*, [$]-8-"
|
|
// ppc64le:"TEXT\t.*, [$]0-"
|
|
// s390x:"TEXT\t.*, [$]0-"
|
|
func ArrayAdd64(a, b [4]float64) [4]float64 {
|
|
return [4]float64{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]}
|
|
}
|
|
|
|
// Check that small array initialization avoids using the stack.
|
|
|
|
// 386:"TEXT\t.*, [$]0-"
|
|
// amd64:"TEXT\t.*, [$]0-"
|
|
// arm:"TEXT\t.*, [$]0-" (spills return address)
|
|
// arm64:"TEXT\t.*, [$]-8-"
|
|
// mips:"TEXT\t.*, [$]-4-"
|
|
// ppc64le:"TEXT\t.*, [$]0-"
|
|
// s390x:"TEXT\t.*, [$]0-"
|
|
func ArrayInit(i, j int) [4]int {
|
|
return [4]int{i, 0, j, 0}
|
|
}
|
|
|
|
// Check that assembly output has matching offset and base register
|
|
// (issue #21064).
|
|
|
|
func check_asmout(a, b int) int {
|
|
runtime.GC() // use some frame
|
|
// amd64:`.*b\+24\(SP\)`
|
|
// arm:`.*b\+4\(FP\)`
|
|
return b
|
|
}
|