mirror of
https://github.com/golang/go
synced 2024-11-07 11:46:17 -07:00
6d00e8c478
Move ops can be faster than memmove calls because the number of bytes to be moved is fixed and they don't incur the overhead of a call. This change allows memmove to be converted into a Move op when the arguments are disjoint. The optimization is only enabled on s390x at the moment, however other architectures may also benefit from it in the future. The memmove inlining rule triggers an extra 12 times when compiling the standard library. It will most likely make more of a difference as the disjoint function is improved over time (to recognize fresh heap allocations for example). Change-Id: I9af570dcfff28257b8e59e0ff584a46d8e248310 Reviewed-on: https://go-review.googlesource.com/110064 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ilya Tocar <ilya.tocar@intel.com>
81 lines
1.5 KiB
Go
81 lines
1.5 KiB
Go
// asmcheck
|
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package codegen
|
|
|
|
import "runtime"
|
|
|
|
// Check small copies are replaced with moves.
|
|
|
|
func movesmall4() {
|
|
x := [...]byte{1, 2, 3, 4}
|
|
// 386:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// arm:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
func movesmall7() {
|
|
x := [...]byte{1, 2, 3, 4, 5, 6, 7}
|
|
// 386:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
func movesmall16() {
|
|
x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
|
// amd64:-".*memmove"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
var x [256]byte
|
|
|
|
// Check that large disjoint copies are replaced with moves.
|
|
|
|
func moveDisjointStack() {
|
|
var s [256]byte
|
|
// s390x:-".*memmove"
|
|
copy(s[:], x[:])
|
|
runtime.KeepAlive(&s)
|
|
}
|
|
|
|
func moveDisjointArg(b *[256]byte) {
|
|
var s [256]byte
|
|
// s390x:-".*memmove"
|
|
copy(s[:], b[:])
|
|
runtime.KeepAlive(&s)
|
|
}
|
|
|
|
func moveDisjointNoOverlap(a *[256]byte) {
|
|
// s390x:-".*memmove"
|
|
copy(a[:], a[128:])
|
|
}
|
|
|
|
// Check that no branches are generated when the pointers are [not] equal.
|
|
|
|
func ptrEqual() {
|
|
// amd64:-"JEQ",-"JNE"
|
|
// ppc64le:-"BEQ",-"BNE"
|
|
// s390x:-"BEQ",-"BNE"
|
|
copy(x[:], x[:])
|
|
}
|
|
|
|
func ptrOneOffset() {
|
|
// amd64:-"JEQ",-"JNE"
|
|
// ppc64le:-"BEQ",-"BNE"
|
|
// s390x:-"BEQ",-"BNE"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
func ptrBothOffset() {
|
|
// amd64:-"JEQ",-"JNE"
|
|
// ppc64le:-"BEQ",-"BNE"
|
|
// s390x:-"BEQ",-"BNE"
|
|
copy(x[1:], x[2:])
|
|
}
|