1
0
mirror of https://github.com/golang/go synced 2024-11-25 19:17:57 -07:00
go/test/codegen/mathbits.go

909 lines
20 KiB
Go
Raw Normal View History

// asmcheck
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package codegen
import "math/bits"
// ----------------------- //
// bits.LeadingZeros //
// ----------------------- //
func LeadingZeros(n uint) int {
// amd64/v1,amd64/v2:"BSRQ"
// amd64/v3:"LZCNTQ", -"BSRQ"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV",-"SUB"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"CNTLZD"
return bits.LeadingZeros(n)
}
func LeadingZeros64(n uint64) int {
// amd64/v1,amd64/v2:"BSRQ"
// amd64/v3:"LZCNTQ", -"BSRQ"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV",-"SUB"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"CNTLZD"
return bits.LeadingZeros64(n)
}
func LeadingZeros32(n uint32) int {
// amd64/v1,amd64/v2:"BSRQ","LEAQ",-"CMOVQEQ"
// amd64/v3: "LZCNTL",- "BSRL"
// s390x:"FLOGR"
cmd/compile: optimize math/bits Len32 intrinsic on arm64 Arm64 has a 32-bit CLZ instruction CLZW, which can be used for intrinsic Len32. Function LeadingZeros32 calls Len32, with this change, the assembly code of LeadingZeros32 becomes more concise. Go code: func f32(x uint32) { z = bits.LeadingZeros32(x) } Before: "".f32 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f32(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0004 00004 (test.go:7) MOVWU "".x(FP), R0 0x0008 00008 ($GOROOT/src/math/bits/bits.go:30) CLZ R0, R0 0x000c 00012 ($GOROOT/src/math/bits/bits.go:30) SUB $32, R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) After: "".f32 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f32(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0004 00004 (test.go:7) MOVWU "".x(FP), R0 0x0008 00008 ($GOROOT/src/math/bits/bits.go:30) CLZW R0, R0 0x000c 00012 (test.go:7) MOVD R0, "".z(SB) 0x0018 00024 (test.go:7) RET (R30) Benchmarks: name old time/op new time/op delta LeadingZeros-8 2.53ns ± 0% 2.55ns ± 0% +0.67% (p=0.000 n=10+10) LeadingZeros8-8 3.56ns ± 0% 3.56ns ± 0% ~ (all equal) LeadingZeros16-8 3.55ns ± 0% 3.56ns ± 0% ~ (p=0.465 n=10+10) LeadingZeros32-8 3.55ns ± 0% 2.96ns ± 0% -16.71% (p=0.000 n=10+7) LeadingZeros64-8 2.53ns ± 0% 2.54ns ± 0% ~ (p=0.059 n=8+10) Change-Id: Ie5666bb82909e341060e02ffd4e86c0e5d67e90a Reviewed-on: https://go-review.googlesource.com/c/157000 Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2019-01-02 02:14:26 -07:00
// arm:"CLZ" arm64:"CLZW"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZW",-"SUB"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"CNTLZW"
return bits.LeadingZeros32(n)
}
func LeadingZeros16(n uint16) int {
// amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
// amd64/v3: "LZCNTL",- "BSRL"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"CNTLZD"
return bits.LeadingZeros16(n)
}
func LeadingZeros8(n uint8) int {
// amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
// amd64/v3: "LZCNTL",- "BSRL"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"CNTLZD"
return bits.LeadingZeros8(n)
}
// --------------- //
// bits.Len* //
// --------------- //
func Len(n uint) int {
// amd64/v1,amd64/v2:"BSRQ"
// amd64/v3: "LZCNTQ"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"SUBC","CNTLZD"
return bits.Len(n)
}
func Len64(n uint64) int {
// amd64/v1,amd64/v2:"BSRQ"
// amd64/v3: "LZCNTQ"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"SUBC","CNTLZD"
return bits.Len64(n)
}
func SubFromLen64(n uint64) int {
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV",-"ADD"
// ppc64x:"CNTLZD",-"SUBC"
return 64 - bits.Len64(n)
}
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
func CompareWithLen64(n uint64) bool {
// loong64:"CLZV",-"ADD",-"[$]64",-"[$]9"
return bits.Len64(n) < 9
}
func Len32(n uint32) int {
// amd64/v1,amd64/v2:"BSRQ","LEAQ",-"CMOVQEQ"
// amd64/v3: "LZCNTL"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZW"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x: "CNTLZW"
return bits.Len32(n)
}
func Len16(n uint16) int {
// amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
// amd64/v3: "LZCNTL"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"SUBC","CNTLZD"
return bits.Len16(n)
}
func Len8(n uint8) int {
// amd64/v1,amd64/v2:"BSRL","LEAL",-"CMOVQEQ"
// amd64/v3: "LZCNTL"
// s390x:"FLOGR"
// arm:"CLZ" arm64:"CLZ"
cmd/compile: wire up math/bits.Len intrinsics for loong64 For the SubFromLen64 codegen test case to work as intended, we need to fold c-(-(x-d)) into x+(c-d). Still, some instances of LeadingZeros are not optimized into single CLZ instructions right now (actually, the LeadingZeros micro-benchmarks are currently still compiled with redundant adds/subs of 64, due to interference of loop optimizations before lowering), but perf numbers indicate it's not that bad after all. Micro-benchmark results on Loongson 3A5000 and 3A6000: goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A5000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 3.660n ± 0% 1.348n ± 0% -63.17% (p=0.000 n=20) LeadingZeros8 1.777n ± 0% 1.767n ± 0% -0.56% (p=0.000 n=20) LeadingZeros16 2.816n ± 0% 1.770n ± 0% -37.14% (p=0.000 n=20) LeadingZeros32 5.293n ± 1% 1.683n ± 0% -68.21% (p=0.000 n=20) LeadingZeros64 3.622n ± 0% 1.349n ± 0% -62.76% (p=0.000 n=20) geomean 3.229n 1.571n -51.35% goos: linux goarch: loong64 pkg: math/bits cpu: Loongson-3A6000 @ 2500.00MHz | bench.old | bench.new | | sec/op | sec/op vs base | LeadingZeros 2.410n ± 0% 1.103n ± 1% -54.23% (p=0.000 n=20) LeadingZeros8 1.236n ± 0% 1.501n ± 0% +21.44% (p=0.000 n=20) LeadingZeros16 2.106n ± 0% 1.501n ± 0% -28.73% (p=0.000 n=20) LeadingZeros32 2.860n ± 0% 1.324n ± 0% -53.72% (p=0.000 n=20) LeadingZeros64 2.6135n ± 0% 0.9509n ± 0% -63.62% (p=0.000 n=20) geomean 2.159n 1.256n -41.81% Updates #59120 This patch is a copy of CL 483356. Co-authored-by: WANG Xuerui <git@xen0n.name> Change-Id: Iee81a17f7da06d77a427e73dfcc016f2b15ae556 Reviewed-on: https://go-review.googlesource.com/c/go/+/624575 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: abner chenc <chenguoqi@loongson.cn>
2024-11-01 20:59:20 -06:00
// loong64:"CLZV"
// mips:"CLZ"
// wasm:"I64Clz"
// ppc64x:"SUBC","CNTLZD"
return bits.Len8(n)
}
// -------------------- //
// bits.OnesCount //
// -------------------- //
// TODO(register args) Restore a m d 6 4 / v 1 :.*x86HasPOPCNT when only one ABI is tested.
func OnesCount(n uint) int {
// amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
// amd64:"POPCNTQ"
// arm64:"VCNT","VUADDLV"
cmd/compile: implement OnesCount{8,16,32,64} intrinsics on s390x This CL implements the math/bits.OnesCount{8,16,32,64} functions as intrinsics on s390x using the 'population count' (popcnt) instruction. This instruction was released as the 'population-count' facility which uses the same facility bit (45) as the 'distinct-operands' facility which is a pre-requisite for Go on s390x. We can therefore use it without a feature check. The s390x popcnt instruction treats a 64 bit register as a vector of 8 bytes, summing the number of ones in each byte individually. It then writes the results to the corresponding bytes in the output register. Therefore to implement OnesCount{16,32,64} we need to sum the individual byte counts using some extra instructions. To do this efficiently I've added some additional pseudo operations to the s390x SSA backend. Unlike other architectures the new instruction sequence is faster for OnesCount8, so that is implemented using the intrinsic. name old time/op new time/op delta OnesCount 3.21ns ± 1% 1.35ns ± 0% -58.00% (p=0.000 n=20+20) OnesCount8 0.91ns ± 1% 0.81ns ± 0% -11.43% (p=0.000 n=20+20) OnesCount16 1.51ns ± 3% 1.21ns ± 0% -19.71% (p=0.000 n=20+17) OnesCount32 1.91ns ± 0% 1.12ns ± 1% -41.60% (p=0.000 n=19+20) OnesCount64 3.18ns ± 4% 1.35ns ± 0% -57.52% (p=0.000 n=20+20) Change-Id: Id54f0bd28b6db9a887ad12c0d72fcc168ef9c4e0 Reviewed-on: https://go-review.googlesource.com/114675 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-05-25 10:54:58 -06:00
// s390x:"POPCNT"
// ppc64x:"POPCNTD"
// wasm:"I64Popcnt"
return bits.OnesCount(n)
}
func OnesCount64(n uint64) int {
// amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
// amd64:"POPCNTQ"
// arm64:"VCNT","VUADDLV"
cmd/compile: implement OnesCount{8,16,32,64} intrinsics on s390x This CL implements the math/bits.OnesCount{8,16,32,64} functions as intrinsics on s390x using the 'population count' (popcnt) instruction. This instruction was released as the 'population-count' facility which uses the same facility bit (45) as the 'distinct-operands' facility which is a pre-requisite for Go on s390x. We can therefore use it without a feature check. The s390x popcnt instruction treats a 64 bit register as a vector of 8 bytes, summing the number of ones in each byte individually. It then writes the results to the corresponding bytes in the output register. Therefore to implement OnesCount{16,32,64} we need to sum the individual byte counts using some extra instructions. To do this efficiently I've added some additional pseudo operations to the s390x SSA backend. Unlike other architectures the new instruction sequence is faster for OnesCount8, so that is implemented using the intrinsic. name old time/op new time/op delta OnesCount 3.21ns ± 1% 1.35ns ± 0% -58.00% (p=0.000 n=20+20) OnesCount8 0.91ns ± 1% 0.81ns ± 0% -11.43% (p=0.000 n=20+20) OnesCount16 1.51ns ± 3% 1.21ns ± 0% -19.71% (p=0.000 n=20+17) OnesCount32 1.91ns ± 0% 1.12ns ± 1% -41.60% (p=0.000 n=19+20) OnesCount64 3.18ns ± 4% 1.35ns ± 0% -57.52% (p=0.000 n=20+20) Change-Id: Id54f0bd28b6db9a887ad12c0d72fcc168ef9c4e0 Reviewed-on: https://go-review.googlesource.com/114675 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-05-25 10:54:58 -06:00
// s390x:"POPCNT"
// ppc64x:"POPCNTD"
// wasm:"I64Popcnt"
return bits.OnesCount64(n)
}
func OnesCount32(n uint32) int {
// amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
// amd64:"POPCNTL"
// arm64:"VCNT","VUADDLV"
cmd/compile: implement OnesCount{8,16,32,64} intrinsics on s390x This CL implements the math/bits.OnesCount{8,16,32,64} functions as intrinsics on s390x using the 'population count' (popcnt) instruction. This instruction was released as the 'population-count' facility which uses the same facility bit (45) as the 'distinct-operands' facility which is a pre-requisite for Go on s390x. We can therefore use it without a feature check. The s390x popcnt instruction treats a 64 bit register as a vector of 8 bytes, summing the number of ones in each byte individually. It then writes the results to the corresponding bytes in the output register. Therefore to implement OnesCount{16,32,64} we need to sum the individual byte counts using some extra instructions. To do this efficiently I've added some additional pseudo operations to the s390x SSA backend. Unlike other architectures the new instruction sequence is faster for OnesCount8, so that is implemented using the intrinsic. name old time/op new time/op delta OnesCount 3.21ns ± 1% 1.35ns ± 0% -58.00% (p=0.000 n=20+20) OnesCount8 0.91ns ± 1% 0.81ns ± 0% -11.43% (p=0.000 n=20+20) OnesCount16 1.51ns ± 3% 1.21ns ± 0% -19.71% (p=0.000 n=20+17) OnesCount32 1.91ns ± 0% 1.12ns ± 1% -41.60% (p=0.000 n=19+20) OnesCount64 3.18ns ± 4% 1.35ns ± 0% -57.52% (p=0.000 n=20+20) Change-Id: Id54f0bd28b6db9a887ad12c0d72fcc168ef9c4e0 Reviewed-on: https://go-review.googlesource.com/114675 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-05-25 10:54:58 -06:00
// s390x:"POPCNT"
// ppc64x:"POPCNTW"
// wasm:"I64Popcnt"
return bits.OnesCount32(n)
}
func OnesCount16(n uint16) int {
// amd64/v2:-".*x86HasPOPCNT" amd64/v3:-".*x86HasPOPCNT"
// amd64:"POPCNTL"
// arm64:"VCNT","VUADDLV"
cmd/compile: implement OnesCount{8,16,32,64} intrinsics on s390x This CL implements the math/bits.OnesCount{8,16,32,64} functions as intrinsics on s390x using the 'population count' (popcnt) instruction. This instruction was released as the 'population-count' facility which uses the same facility bit (45) as the 'distinct-operands' facility which is a pre-requisite for Go on s390x. We can therefore use it without a feature check. The s390x popcnt instruction treats a 64 bit register as a vector of 8 bytes, summing the number of ones in each byte individually. It then writes the results to the corresponding bytes in the output register. Therefore to implement OnesCount{16,32,64} we need to sum the individual byte counts using some extra instructions. To do this efficiently I've added some additional pseudo operations to the s390x SSA backend. Unlike other architectures the new instruction sequence is faster for OnesCount8, so that is implemented using the intrinsic. name old time/op new time/op delta OnesCount 3.21ns ± 1% 1.35ns ± 0% -58.00% (p=0.000 n=20+20) OnesCount8 0.91ns ± 1% 0.81ns ± 0% -11.43% (p=0.000 n=20+20) OnesCount16 1.51ns ± 3% 1.21ns ± 0% -19.71% (p=0.000 n=20+17) OnesCount32 1.91ns ± 0% 1.12ns ± 1% -41.60% (p=0.000 n=19+20) OnesCount64 3.18ns ± 4% 1.35ns ± 0% -57.52% (p=0.000 n=20+20) Change-Id: Id54f0bd28b6db9a887ad12c0d72fcc168ef9c4e0 Reviewed-on: https://go-review.googlesource.com/114675 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-05-25 10:54:58 -06:00
// s390x:"POPCNT"
// ppc64x:"POPCNTW"
// wasm:"I64Popcnt"
return bits.OnesCount16(n)
}
cmd/compile: implement OnesCount{8,16,32,64} intrinsics on s390x This CL implements the math/bits.OnesCount{8,16,32,64} functions as intrinsics on s390x using the 'population count' (popcnt) instruction. This instruction was released as the 'population-count' facility which uses the same facility bit (45) as the 'distinct-operands' facility which is a pre-requisite for Go on s390x. We can therefore use it without a feature check. The s390x popcnt instruction treats a 64 bit register as a vector of 8 bytes, summing the number of ones in each byte individually. It then writes the results to the corresponding bytes in the output register. Therefore to implement OnesCount{16,32,64} we need to sum the individual byte counts using some extra instructions. To do this efficiently I've added some additional pseudo operations to the s390x SSA backend. Unlike other architectures the new instruction sequence is faster for OnesCount8, so that is implemented using the intrinsic. name old time/op new time/op delta OnesCount 3.21ns ± 1% 1.35ns ± 0% -58.00% (p=0.000 n=20+20) OnesCount8 0.91ns ± 1% 0.81ns ± 0% -11.43% (p=0.000 n=20+20) OnesCount16 1.51ns ± 3% 1.21ns ± 0% -19.71% (p=0.000 n=20+17) OnesCount32 1.91ns ± 0% 1.12ns ± 1% -41.60% (p=0.000 n=19+20) OnesCount64 3.18ns ± 4% 1.35ns ± 0% -57.52% (p=0.000 n=20+20) Change-Id: Id54f0bd28b6db9a887ad12c0d72fcc168ef9c4e0 Reviewed-on: https://go-review.googlesource.com/114675 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-05-25 10:54:58 -06:00
func OnesCount8(n uint8) int {
// s390x:"POPCNT"
// ppc64x:"POPCNTB"
// wasm:"I64Popcnt"
cmd/compile: implement OnesCount{8,16,32,64} intrinsics on s390x This CL implements the math/bits.OnesCount{8,16,32,64} functions as intrinsics on s390x using the 'population count' (popcnt) instruction. This instruction was released as the 'population-count' facility which uses the same facility bit (45) as the 'distinct-operands' facility which is a pre-requisite for Go on s390x. We can therefore use it without a feature check. The s390x popcnt instruction treats a 64 bit register as a vector of 8 bytes, summing the number of ones in each byte individually. It then writes the results to the corresponding bytes in the output register. Therefore to implement OnesCount{16,32,64} we need to sum the individual byte counts using some extra instructions. To do this efficiently I've added some additional pseudo operations to the s390x SSA backend. Unlike other architectures the new instruction sequence is faster for OnesCount8, so that is implemented using the intrinsic. name old time/op new time/op delta OnesCount 3.21ns ± 1% 1.35ns ± 0% -58.00% (p=0.000 n=20+20) OnesCount8 0.91ns ± 1% 0.81ns ± 0% -11.43% (p=0.000 n=20+20) OnesCount16 1.51ns ± 3% 1.21ns ± 0% -19.71% (p=0.000 n=20+17) OnesCount32 1.91ns ± 0% 1.12ns ± 1% -41.60% (p=0.000 n=19+20) OnesCount64 3.18ns ± 4% 1.35ns ± 0% -57.52% (p=0.000 n=20+20) Change-Id: Id54f0bd28b6db9a887ad12c0d72fcc168ef9c4e0 Reviewed-on: https://go-review.googlesource.com/114675 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
2018-05-25 10:54:58 -06:00
return bits.OnesCount8(n)
}
// ----------------------- //
// bits.ReverseBytes //
// ----------------------- //
func ReverseBytes(n uint) uint {
// amd64:"BSWAPQ"
// 386:"BSWAPL"
// s390x:"MOVDBR"
// arm64:"REV"
return bits.ReverseBytes(n)
}
func ReverseBytes64(n uint64) uint64 {
// amd64:"BSWAPQ"
// 386:"BSWAPL"
// s390x:"MOVDBR"
// arm64:"REV"
// ppc64x/power10: "BRD"
return bits.ReverseBytes64(n)
}
func ReverseBytes32(n uint32) uint32 {
// amd64:"BSWAPL"
// 386:"BSWAPL"
// s390x:"MOVWBR"
// arm64:"REVW"
// ppc64x/power10: "BRW"
return bits.ReverseBytes32(n)
}
func ReverseBytes16(n uint16) uint16 {
// amd64:"ROLW"
2019-02-10 23:37:49 -07:00
// arm64:"REV16W",-"UBFX",-"ORR"
// arm/5:"SLL","SRL","ORR"
// arm/6:"REV16"
// arm/7:"REV16"
// ppc64x/power10: "BRH"
return bits.ReverseBytes16(n)
}
// --------------------- //
// bits.RotateLeft //
// --------------------- //
func RotateLeft64(n uint64) uint64 {
// amd64:"ROLQ"
// arm64:"ROR"
// loong64:"ROTRV"
// ppc64x:"ROTL"
// riscv64:"RORI"
cmd/compile: optimize shift pairs and masks on s390x Optimize combinations of left and right shifts by a constant value into a 'rotate then insert selected bits [into zero]' instruction. Use the same instruction for contiguous masks since it has some benefits over 'and immediate' (not restricted to 32-bits, does not overwrite source register). To keep the complexity of this change under control I've only implemented 64 bit operations for now. There are a lot more optimizations that can be done with this instruction family. However, since their function overlaps with other instructions we need to be somewhat careful not to break existing optimization rules by creating optimization dead ends. This is particularly true of the load/store merging rules which contain lots of zero extensions and shifts. This CL does interfere with the store merging rules when an operand is shifted left before it is stored: binary.BigEndian.PutUint64(b, x << 1) This is unfortunate but it's not critical and somewhat complex so I plan to fix that in a follow up CL. file before after Δ % addr2line 4117446 4117282 -164 -0.004% api 4945184 4942752 -2432 -0.049% asm 4998079 4991891 -6188 -0.124% buildid 2685158 2684074 -1084 -0.040% cgo 4553732 4553394 -338 -0.007% compile 19294446 19245070 -49376 -0.256% cover 4897105 4891319 -5786 -0.118% dist 3544389 3542785 -1604 -0.045% doc 3926795 3927617 +822 +0.021% fix 3302958 3293868 -9090 -0.275% link 6546274 6543456 -2818 -0.043% nm 4102021 4100825 -1196 -0.029% objdump 4542431 4548483 +6052 +0.133% pack 2482465 2416389 -66076 -2.662% pprof 13366541 13363915 -2626 -0.020% test2json 2829007 2761515 -67492 -2.386% trace 10216164 10219684 +3520 +0.034% vet 6773956 6773572 -384 -0.006% total 107124151 106917891 -206260 -0.193% Change-Id: I7591cce41e06867ba10a745daae9333513062746 Reviewed-on: https://go-review.googlesource.com/c/go/+/233317 Run-TryBot: Michael Munday <mike.munday@ibm.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Trust: Michael Munday <mike.munday@ibm.com>
2020-05-11 10:44:48 -06:00
// s390x:"RISBGZ\t[$]0, [$]63, [$]37, "
// wasm:"I64Rotl"
return bits.RotateLeft64(n, 37)
}
func RotateLeft32(n uint32) uint32 {
// amd64:"ROLL" 386:"ROLL"
// arm:`MOVW\tR[0-9]+@>23`
// arm64:"RORW"
// loong64:"ROTR\t"
// ppc64x:"ROTLW"
// riscv64:"RORIW"
// s390x:"RLL"
// wasm:"I32Rotl"
return bits.RotateLeft32(n, 9)
}
func RotateLeft16(n uint16, s int) uint16 {
// amd64:"ROLW" 386:"ROLW"
// arm64:"RORW",-"CSEL"
// loong64:"ROTR\t","SLLV"
return bits.RotateLeft16(n, s)
}
func RotateLeft8(n uint8, s int) uint8 {
// amd64:"ROLB" 386:"ROLB"
// arm64:"LSL","LSR",-"CSEL"
// loong64:"OR","SLLV","SRLV"
return bits.RotateLeft8(n, s)
}
func RotateLeftVariable(n uint, m int) uint {
// amd64:"ROLQ"
// arm64:"ROR"
// loong64:"ROTRV"
// ppc64x:"ROTL"
// riscv64:"ROL"
// s390x:"RLLG"
// wasm:"I64Rotl"
return bits.RotateLeft(n, m)
}
func RotateLeftVariable64(n uint64, m int) uint64 {
// amd64:"ROLQ"
// arm64:"ROR"
// loong64:"ROTRV"
// ppc64x:"ROTL"
// riscv64:"ROL"
// s390x:"RLLG"
// wasm:"I64Rotl"
return bits.RotateLeft64(n, m)
}
func RotateLeftVariable32(n uint32, m int) uint32 {
// arm:`MOVW\tR[0-9]+@>R[0-9]+`
// amd64:"ROLL"
// arm64:"RORW"
// loong64:"ROTR\t"
// ppc64x:"ROTLW"
// riscv64:"ROLW"
// s390x:"RLL"
// wasm:"I32Rotl"
return bits.RotateLeft32(n, m)
}
// ------------------------ //
// bits.TrailingZeros //
// ------------------------ //
func TrailingZeros(n uint) int {
// amd64/v1,amd64/v2:"BSFQ","MOVL\t\\$64","CMOVQEQ"
// amd64/v3:"TZCNTQ"
// 386:"BSFL"
// arm:"CLZ"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"RBIT","CLZ"
// s390x:"FLOGR"
// ppc64x/power8:"ANDN","POPCNTD"
// ppc64x/power9: "CNTTZD"
// wasm:"I64Ctz"
return bits.TrailingZeros(n)
}
func TrailingZeros64(n uint64) int {
// amd64/v1,amd64/v2:"BSFQ","MOVL\t\\$64","CMOVQEQ"
// amd64/v3:"TZCNTQ"
// 386:"BSFL"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"RBIT","CLZ"
// s390x:"FLOGR"
// ppc64x/power8:"ANDN","POPCNTD"
// ppc64x/power9: "CNTTZD"
// wasm:"I64Ctz"
return bits.TrailingZeros64(n)
}
func TrailingZeros64Subtract(n uint64) int {
// ppc64x/power8:"NEG","SUBC","ANDN","POPCNTD"
// ppc64x/power9:"SUBC","CNTTZD"
return bits.TrailingZeros64(1 - n)
}
func TrailingZeros32(n uint32) int {
// amd64/v1,amd64/v2:"BTSQ\\t\\$32","BSFQ"
// amd64/v3:"TZCNTL"
// 386:"BSFL"
// arm:"CLZ"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"RBITW","CLZW"
// s390x:"FLOGR","MOVWZ"
// ppc64x/power8:"ANDN","POPCNTW"
// ppc64x/power9: "CNTTZW"
// wasm:"I64Ctz"
return bits.TrailingZeros32(n)
}
func TrailingZeros16(n uint16) int {
// amd64:"BSFL","ORL\\t\\$65536"
// 386:"BSFL\t"
// arm:"ORR\t\\$65536","CLZ",-"MOVHU\tR"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"ORR\t\\$65536","RBITW","CLZW",-"MOVHU\tR",-"RBIT\t",-"CLZ\t"
// s390x:"FLOGR","OR\t\\$65536"
// ppc64x/power8:"POPCNTD","ORIS\\t\\$1"
// ppc64x/power9:"CNTTZD","ORIS\\t\\$1"
// wasm:"I64Ctz"
return bits.TrailingZeros16(n)
}
func TrailingZeros8(n uint8) int {
// amd64:"BSFL","ORL\\t\\$256"
// 386:"BSFL"
// arm:"ORR\t\\$256","CLZ",-"MOVBU\tR"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"ORR\t\\$256","RBITW","CLZW",-"MOVBU\tR",-"RBIT\t",-"CLZ\t"
// s390x:"FLOGR","OR\t\\$256"
// wasm:"I64Ctz"
return bits.TrailingZeros8(n)
}
// IterateBitsNN checks special handling of TrailingZerosNN when the input is known to be non-zero.
func IterateBits(n uint) int {
i := 0
for n != 0 {
// amd64/v1,amd64/v2:"BSFQ",-"CMOVEQ"
// amd64/v3:"TZCNTQ"
i += bits.TrailingZeros(n)
n &= n - 1
}
return i
}
func IterateBits64(n uint64) int {
i := 0
for n != 0 {
// amd64/v1,amd64/v2:"BSFQ",-"CMOVEQ"
// amd64/v3:"TZCNTQ"
i += bits.TrailingZeros64(n)
n &= n - 1
}
return i
}
func IterateBits32(n uint32) int {
i := 0
for n != 0 {
// amd64/v1,amd64/v2:"BSFL",-"BTSQ"
// amd64/v3:"TZCNTL"
i += bits.TrailingZeros32(n)
n &= n - 1
}
return i
}
func IterateBits16(n uint16) int {
i := 0
for n != 0 {
// amd64/v1,amd64/v2:"BSFL",-"BTSL"
// amd64/v3:"TZCNTL"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"RBITW","CLZW",-"ORR"
i += bits.TrailingZeros16(n)
n &= n - 1
}
return i
}
func IterateBits8(n uint8) int {
i := 0
for n != 0 {
// amd64/v1,amd64/v2:"BSFL",-"BTSL"
// amd64/v3:"TZCNTL"
cmd/compile: eliminate unnecessary type conversions in TrailingZeros(16|8) for arm64 This CL eliminates unnecessary type conversion operations: OpZeroExt16to64 and OpZeroExt8to64. If the input argrument is a nonzero value, then ORconst operation can also be eliminated. Benchmarks: name old time/op new time/op delta TrailingZeros-8 2.75ns ± 0% 2.75ns ± 0% ~ (all equal) TrailingZeros8-8 3.49ns ± 1% 2.93ns ± 0% -16.00% (p=0.000 n=10+10) TrailingZeros16-8 3.49ns ± 1% 2.93ns ± 0% -16.05% (p=0.000 n=9+10) TrailingZeros32-8 2.67ns ± 1% 2.68ns ± 1% ~ (p=0.468 n=10+10) TrailingZeros64-8 2.67ns ± 1% 2.65ns ± 0% -0.62% (p=0.022 n=10+9) code: func f16(x uint) { z = bits.TrailingZeros16(uint16(x)) } Before: "".f16 STEXT size=48 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) MOVHU R0, R0 0x0008 00008 (test.go:7) ORR $65536, R0, R0 0x000c 00012 (test.go:7) RBIT R0, R0 0x0010 00016 (test.go:7) CLZ R0, R0 0x0014 00020 (test.go:7) MOVD R0, "".z(SB) 0x0020 00032 (test.go:7) RET (R30) This line of code is unnecessary: 0x0004 00004 (test.go:7) MOVHU R0, R0 After: "".f16 STEXT size=32 args=0x8 locals=0x0 leaf 0x0000 00000 (test.go:7) TEXT "".f16(SB), LEAF|NOFRAME|ABIInternal, $0-8 0x0000 00000 (test.go:7) FUNCDATA ZR, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) FUNCDATA $3, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0000 00000 (test.go:7) PCDATA $2, ZR 0x0000 00000 (test.go:7) PCDATA ZR, ZR 0x0000 00000 (test.go:7) MOVD "".x(FP), R0 0x0004 00004 (test.go:7) ORR $65536, R0, R0 0x0008 00008 (test.go:7) RBITW R0, R0 0x000c 00012 (test.go:7) CLZW R0, R0 0x0010 00016 (test.go:7) MOVD R0, "".z(SB) 0x001c 00028 (test.go:7) RET (R30) The situation of TrailingZeros8 is similar to TrailingZeros16. Change-Id: I473bdca06be8460a0be87abbae6fe640017e4c9d Reviewed-on: https://go-review.googlesource.com/c/go/+/156999 Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-03 02:25:06 -07:00
// arm64:"RBITW","CLZW",-"ORR"
i += bits.TrailingZeros8(n)
n &= n - 1
}
return i
}
cmd/compile: intrinsify math/bits.Mul Add SSA rules to intrinsify Mul/Mul64 (AMD64 and ARM64). SSA rules for other functions and architectures are left as a future optimization. Benchmark results on AMD64/ARM64 before and after SSA implementation are below. amd64 name old time/op new time/op delta Add-4 1.78ns ± 0% 1.85ns ±12% ~ (p=0.397 n=4+5) Add32-4 1.71ns ± 1% 1.70ns ± 0% ~ (p=0.683 n=5+5) Add64-4 1.80ns ± 2% 1.77ns ± 0% -1.22% (p=0.048 n=5+5) Sub-4 1.78ns ± 0% 1.78ns ± 0% ~ (all equal) Sub32-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=1.000 n=5+5) Sub64-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=0.968 n=5+4) Mul-4 11.5ns ± 1% 1.8ns ± 2% -84.39% (p=0.008 n=5+5) Mul32-4 1.39ns ± 0% 1.38ns ± 3% ~ (p=0.175 n=5+5) Mul64-4 6.85ns ± 1% 1.78ns ± 1% -73.97% (p=0.008 n=5+5) Div-4 57.1ns ± 1% 56.7ns ± 0% ~ (p=0.087 n=5+5) Div32-4 18.0ns ± 0% 18.0ns ± 0% ~ (all equal) Div64-4 56.4ns ±10% 53.6ns ± 1% ~ (p=0.071 n=5+5) arm64 name old time/op new time/op delta Add-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add64-96 5.52ns ± 0% 5.51ns ± 0% ~ (p=0.444 n=5+5) Sub-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub64-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Mul-96 34.6ns ± 0% 5.0ns ± 0% -85.52% (p=0.008 n=5+5) Mul32-96 4.51ns ± 0% 4.51ns ± 0% ~ (all equal) Mul64-96 21.1ns ± 0% 5.0ns ± 0% -76.26% (p=0.008 n=5+5) Div-96 64.7ns ± 0% 64.7ns ± 0% ~ (all equal) Div32-96 17.0ns ± 0% 17.0ns ± 0% ~ (all equal) Div64-96 53.1ns ± 0% 53.1ns ± 0% ~ (all equal) Updates #24813 Change-Id: I9bda6d2102f65cae3d436a2087b47ed8bafeb068 Reviewed-on: https://go-review.googlesource.com/129415 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2018-08-14 16:41:22 -06:00
// --------------- //
// bits.Add* //
// --------------- //
func Add(x, y, ci uint) (r, co uint) {
// arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
// amd64:"NEGL","ADCQ","SBBQ","NEGQ"
// ppc64x: "ADDC", "ADDE", "ADDZE"
// s390x:"ADDE","ADDC\t[$]-1,"
// riscv64: "ADD","SLTU"
return bits.Add(x, y, ci)
}
func AddC(x, ci uint) (r, co uint) {
// arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
// amd64:"NEGL","ADCQ","SBBQ","NEGQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", "SGTU"
// ppc64x: "ADDC", "ADDE", "ADDZE"
// s390x:"ADDE","ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV","SGTU"
// riscv64: "ADD","SLTU"
return bits.Add(x, 7, ci)
}
func AddZ(x, y uint) (r, co uint) {
// arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP"
// amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", "SGTU"
// ppc64x: "ADDC", -"ADDE", "ADDZE"
// s390x:"ADDC",-"ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV","SGTU"
// riscv64: "ADD","SLTU"
return bits.Add(x, y, 0)
}
func AddR(x, y, ci uint) uint {
// arm64:"ADDS","ADCS",-"ADD\t",-"CMP"
// amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", -"SGTU"
// ppc64x: "ADDC", "ADDE", -"ADDZE"
// s390x:"ADDE","ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV",-"SGTU"
// riscv64: "ADD",-"SLTU"
r, _ := bits.Add(x, y, ci)
return r
}
func AddM(p, q, r *[3]uint) {
var c uint
r[0], c = bits.Add(p[0], q[0], c)
// arm64:"ADCS",-"ADD\t",-"CMP"
// amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ"
// s390x:"ADDE",-"ADDC\t[$]-1,"
r[1], c = bits.Add(p[1], q[1], c)
r[2], c = bits.Add(p[2], q[2], c)
}
func Add64(x, y, ci uint64) (r, co uint64) {
// arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
// amd64:"NEGL","ADCQ","SBBQ","NEGQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", "SGTU"
// ppc64x: "ADDC", "ADDE", "ADDZE"
// s390x:"ADDE","ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV","SGTU"
// riscv64: "ADD","SLTU"
return bits.Add64(x, y, ci)
}
func Add64C(x, ci uint64) (r, co uint64) {
// arm64:"ADDS","ADCS","ADC",-"ADD\t",-"CMP"
// amd64:"NEGL","ADCQ","SBBQ","NEGQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", "SGTU"
// ppc64x: "ADDC", "ADDE", "ADDZE"
// s390x:"ADDE","ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV","SGTU"
// riscv64: "ADD","SLTU"
return bits.Add64(x, 7, ci)
}
func Add64Z(x, y uint64) (r, co uint64) {
// arm64:"ADDS","ADC",-"ADCS",-"ADD\t",-"CMP"
// amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", "SGTU"
// ppc64x: "ADDC", -"ADDE", "ADDZE"
// s390x:"ADDC",-"ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV","SGTU"
// riscv64: "ADD","SLTU"
return bits.Add64(x, y, 0)
}
func Add64R(x, y, ci uint64) uint64 {
// arm64:"ADDS","ADCS",-"ADD\t",-"CMP"
// amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ"
cmd/compile: intrinsify Add64 on loong64 This is a follow up of CL 420094 on loong64. Reduce go toolchain size slightly on linux/loong64. compilecmp HEAD~1 -> HEAD HEAD~1 (8a32354219): internal/trace: use strings.Builder HEAD (1767784ac3): cmd/compile: intrinsify Add64 on loong64 platform: linux/loong64 file before after Δ % addr2line 3882616 3882536 -80 -0.002% api 5528866 5528450 -416 -0.008% asm 5133780 5133796 +16 +0.000% cgo 4668787 4668491 -296 -0.006% compile 25163409 25164729 +1320 +0.005% cover 4658055 4658007 -48 -0.001% dist 3437783 3437727 -56 -0.002% doc 3883069 3883205 +136 +0.004% fix 3383254 3383070 -184 -0.005% link 6747559 6747023 -536 -0.008% nm 3793923 3793939 +16 +0.000% objdump 4256628 4256812 +184 +0.004% pack 2356328 2356144 -184 -0.008% pprof 14233370 14131910 -101460 -0.713% test2json 2638668 2638476 -192 -0.007% trace 13392065 13360781 -31284 -0.234% vet 7456388 7455588 -800 -0.011% total 132498256 132364392 -133864 -0.101% file before after Δ % compile/internal/ssa.a 35644590 35649482 +4892 +0.014% compile/internal/ssagen.a 4101250 4099858 -1392 -0.034% internal/edwards25519/field.a 226064 201718 -24346 -10.770% internal/nistec/fiat.a 1689922 1212254 -477668 -28.266% tls.a 3256798 3256800 +2 +0.000% big.a 1718552 1708518 -10034 -0.584% bits.a 107786 106762 -1024 -0.950% cmplx.a 169434 168214 -1220 -0.720% math.a 581302 578762 -2540 -0.437% netip.a 556096 555922 -174 -0.031% net.a 3286526 3286528 +2 +0.000% runtime.a 8644786 8644510 -276 -0.003% strconv.a 519098 518374 -724 -0.139% golang.org/x/crypto/internal/poly1305.a 115398 109546 -5852 -5.071% total 260913122 260392768 -520354 -0.199% Change-Id: I75b2bb7761fa5a0d0d032d4ebe3582d092ea77be Reviewed-on: https://go-review.googlesource.com/c/go/+/428556 Reviewed-by: Carlos Amedee <carlos@golang.org> Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org> Reviewed-by: David Chase <drchase@google.com> TryBot-Result: Gopher Robot <gobot@golang.org>
2022-09-06 08:12:16 -06:00
// loong64: "ADDV", -"SGTU"
// ppc64x: "ADDC", "ADDE", -"ADDZE"
// s390x:"ADDE","ADDC\t[$]-1,"
cmd/compile: intrinsify Add64 on mips64 This CL intrinsify Add64 on mips64. pkg: math/bits _ sec/op _ sec/op vs base _ Add64-4 2.783n _ 0% 1.950n _ 0% -29.93% (p=0.000 n=8) Add64multiple-4 5.713n _ 0% 3.063n _ 0% -46.38% (p=0.000 n=8) pkg: crypto/elliptic _ sec/op _ sec/op vs base _ ScalarBaseMult/P256-4 353.7_ _ 0% 282.7_ _ 0% -20.09% (p=0.000 n=8) ScalarBaseMult/P224-4 330.5_ _ 0% 250.0_ _ 0% -24.37% (p=0.000 n=8) ScalarBaseMult/P384-4 1228.8_ _ 0% 791.5_ _ 0% -35.59% (p=0.000 n=8) ScalarBaseMult/P521-4 15.412m _ 0% 2.438m _ 0% -84.18% (p=0.000 n=8) ScalarMult/P256-4 1189.4_ _ 0% 904.2_ _ 0% -23.98% (p=0.000 n=8) ScalarMult/P224-4 1138.8_ _ 0% 813.8_ _ 0% -28.54% (p=0.000 n=8) ScalarMult/P384-4 4.419m _ 0% 2.692m _ 0% -39.08% (p=0.000 n=8) ScalarMult/P521-4 59.768m _ 0% 8.773m _ 0% -85.32% (p=0.000 n=8) MarshalUnmarshal/P256/Uncompressed-4 8.697_ _ 1% 7.923_ _ 1% -8.91% (p=0.000 n=8) MarshalUnmarshal/P256/Compressed-4 104.75_ _ 0% 66.29_ _ 0% -36.72% (p=0.000 n=8) MarshalUnmarshal/P224/Uncompressed-4 8.728_ _ 1% 7.823_ _ 1% -10.37% (p=0.000 n=8) MarshalUnmarshal/P224/Compressed-4 1035.7_ _ 0% 676.5_ _ 2% -34.69% (p=0.000 n=8) MarshalUnmarshal/P384/Uncompressed-4 15.32_ _ 1% 11.81_ _ 1% -22.90% (p=0.000 n=8) MarshalUnmarshal/P384/Compressed-4 399.8_ _ 0% 217.4_ _ 0% -45.62% (p=0.000 n=8) MarshalUnmarshal/P521/Uncompressed-4 96.79_ _ 0% 20.32_ _ 0% -79.01% (p=0.000 n=8) MarshalUnmarshal/P521/Compressed-4 6640.4_ _ 0% 790.8_ _ 0% -88.09% (p=0.000 n=8) Change-Id: I8a0960b9665720c1d3e57dce36386e74db37fefa Reviewed-on: https://go-review.googlesource.com/c/go/+/498496 Reviewed-by: Heschi Kreinick <heschi@google.com> Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Joel Sing <joel@sing.id.au> Reviewed-by: Keith Randall <khr@golang.org>
2023-05-25 23:26:51 -06:00
// mips64:"ADDV",-"SGTU"
// riscv64: "ADD",-"SLTU"
r, _ := bits.Add64(x, y, ci)
return r
}
func Add64M(p, q, r *[3]uint64) {
var c uint64
r[0], c = bits.Add64(p[0], q[0], c)
// arm64:"ADCS",-"ADD\t",-"CMP"
// amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ"
// ppc64x: -"ADDC", "ADDE", -"ADDZE"
// s390x:"ADDE",-"ADDC\t[$]-1,"
r[1], c = bits.Add64(p[1], q[1], c)
r[2], c = bits.Add64(p[2], q[2], c)
}
func Add64M0(p, q, r *[3]uint64) {
var c uint64
r[0], c = bits.Add64(p[0], q[0], 0)
// ppc64x: -"ADDC", -"ADDE", "ADDZE\tR[1-9]"
r[1], c = bits.Add64(p[1], 0, c)
// ppc64x: -"ADDC", "ADDE", -"ADDZE"
r[2], c = bits.Add64(p[2], p[2], c)
}
cmd/compile: lower Add64/Sub64 into ssa on PPC64 math/bits.Add64 and math/bits.Sub64 now lower and optimize directly in SSA form. The optimization of carry chains focuses around eliding XER<->GPR transfers of the CA bit when used exclusively as an input to a single carry operations, or when the CA value is known. This also adds support for handling XER spills in the assembler which could happen if carry chains contain inter-dependencies on each other (which seems very unlikely with practical usage), or a clobber happens (SRAW/SRAD/SUBFC operations clobber CA). With PPC64 Add64/Sub64 lowering into SSA and this patch, the net performance difference in crypto/elliptic benchmarks on P9/ppc64le are: name old time/op new time/op delta ScalarBaseMult/P256 46.3µs ± 0% 46.9µs ± 0% +1.34% ScalarBaseMult/P224 356µs ± 0% 209µs ± 0% -41.14% ScalarBaseMult/P384 1.20ms ± 0% 0.57ms ± 0% -52.14% ScalarBaseMult/P521 3.38ms ± 0% 1.44ms ± 0% -57.27% ScalarMult/P256 199µs ± 0% 199µs ± 0% -0.17% ScalarMult/P224 357µs ± 0% 212µs ± 0% -40.56% ScalarMult/P384 1.20ms ± 0% 0.58ms ± 0% -51.86% ScalarMult/P521 3.37ms ± 0% 1.44ms ± 0% -57.32% MarshalUnmarshal/P256/Uncompressed 2.59µs ± 0% 2.52µs ± 0% -2.63% MarshalUnmarshal/P256/Compressed 2.58µs ± 0% 2.52µs ± 0% -2.06% MarshalUnmarshal/P224/Uncompressed 1.54µs ± 0% 1.40µs ± 0% -9.42% MarshalUnmarshal/P224/Compressed 1.54µs ± 0% 1.39µs ± 0% -9.87% MarshalUnmarshal/P384/Uncompressed 2.40µs ± 0% 1.80µs ± 0% -24.93% MarshalUnmarshal/P384/Compressed 2.35µs ± 0% 1.81µs ± 0% -23.03% MarshalUnmarshal/P521/Uncompressed 3.79µs ± 0% 2.58µs ± 0% -31.81% MarshalUnmarshal/P521/Compressed 3.80µs ± 0% 2.60µs ± 0% -31.67% Note, P256 uses an asm implementation, thus, little variation is expected. Change-Id: I88a24f6bf0f4f285c649e40243b1ab69cc452b71 Reviewed-on: https://go-review.googlesource.com/c/go/+/346870 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@google.com>
2021-06-08 12:16:01 -06:00
func Add64MSaveC(p, q, r, c *[2]uint64) {
// ppc64x: "ADDC\tR", "ADDZE"
cmd/compile: lower Add64/Sub64 into ssa on PPC64 math/bits.Add64 and math/bits.Sub64 now lower and optimize directly in SSA form. The optimization of carry chains focuses around eliding XER<->GPR transfers of the CA bit when used exclusively as an input to a single carry operations, or when the CA value is known. This also adds support for handling XER spills in the assembler which could happen if carry chains contain inter-dependencies on each other (which seems very unlikely with practical usage), or a clobber happens (SRAW/SRAD/SUBFC operations clobber CA). With PPC64 Add64/Sub64 lowering into SSA and this patch, the net performance difference in crypto/elliptic benchmarks on P9/ppc64le are: name old time/op new time/op delta ScalarBaseMult/P256 46.3µs ± 0% 46.9µs ± 0% +1.34% ScalarBaseMult/P224 356µs ± 0% 209µs ± 0% -41.14% ScalarBaseMult/P384 1.20ms ± 0% 0.57ms ± 0% -52.14% ScalarBaseMult/P521 3.38ms ± 0% 1.44ms ± 0% -57.27% ScalarMult/P256 199µs ± 0% 199µs ± 0% -0.17% ScalarMult/P224 357µs ± 0% 212µs ± 0% -40.56% ScalarMult/P384 1.20ms ± 0% 0.58ms ± 0% -51.86% ScalarMult/P521 3.37ms ± 0% 1.44ms ± 0% -57.32% MarshalUnmarshal/P256/Uncompressed 2.59µs ± 0% 2.52µs ± 0% -2.63% MarshalUnmarshal/P256/Compressed 2.58µs ± 0% 2.52µs ± 0% -2.06% MarshalUnmarshal/P224/Uncompressed 1.54µs ± 0% 1.40µs ± 0% -9.42% MarshalUnmarshal/P224/Compressed 1.54µs ± 0% 1.39µs ± 0% -9.87% MarshalUnmarshal/P384/Uncompressed 2.40µs ± 0% 1.80µs ± 0% -24.93% MarshalUnmarshal/P384/Compressed 2.35µs ± 0% 1.81µs ± 0% -23.03% MarshalUnmarshal/P521/Uncompressed 3.79µs ± 0% 2.58µs ± 0% -31.81% MarshalUnmarshal/P521/Compressed 3.80µs ± 0% 2.60µs ± 0% -31.67% Note, P256 uses an asm implementation, thus, little variation is expected. Change-Id: I88a24f6bf0f4f285c649e40243b1ab69cc452b71 Reviewed-on: https://go-review.googlesource.com/c/go/+/346870 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@google.com>
2021-06-08 12:16:01 -06:00
r[0], c[0] = bits.Add64(p[0], q[0], 0)
// ppc64x: "ADDC\t[$]-1", "ADDE", "ADDZE"
cmd/compile: lower Add64/Sub64 into ssa on PPC64 math/bits.Add64 and math/bits.Sub64 now lower and optimize directly in SSA form. The optimization of carry chains focuses around eliding XER<->GPR transfers of the CA bit when used exclusively as an input to a single carry operations, or when the CA value is known. This also adds support for handling XER spills in the assembler which could happen if carry chains contain inter-dependencies on each other (which seems very unlikely with practical usage), or a clobber happens (SRAW/SRAD/SUBFC operations clobber CA). With PPC64 Add64/Sub64 lowering into SSA and this patch, the net performance difference in crypto/elliptic benchmarks on P9/ppc64le are: name old time/op new time/op delta ScalarBaseMult/P256 46.3µs ± 0% 46.9µs ± 0% +1.34% ScalarBaseMult/P224 356µs ± 0% 209µs ± 0% -41.14% ScalarBaseMult/P384 1.20ms ± 0% 0.57ms ± 0% -52.14% ScalarBaseMult/P521 3.38ms ± 0% 1.44ms ± 0% -57.27% ScalarMult/P256 199µs ± 0% 199µs ± 0% -0.17% ScalarMult/P224 357µs ± 0% 212µs ± 0% -40.56% ScalarMult/P384 1.20ms ± 0% 0.58ms ± 0% -51.86% ScalarMult/P521 3.37ms ± 0% 1.44ms ± 0% -57.32% MarshalUnmarshal/P256/Uncompressed 2.59µs ± 0% 2.52µs ± 0% -2.63% MarshalUnmarshal/P256/Compressed 2.58µs ± 0% 2.52µs ± 0% -2.06% MarshalUnmarshal/P224/Uncompressed 1.54µs ± 0% 1.40µs ± 0% -9.42% MarshalUnmarshal/P224/Compressed 1.54µs ± 0% 1.39µs ± 0% -9.87% MarshalUnmarshal/P384/Uncompressed 2.40µs ± 0% 1.80µs ± 0% -24.93% MarshalUnmarshal/P384/Compressed 2.35µs ± 0% 1.81µs ± 0% -23.03% MarshalUnmarshal/P521/Uncompressed 3.79µs ± 0% 2.58µs ± 0% -31.81% MarshalUnmarshal/P521/Compressed 3.80µs ± 0% 2.60µs ± 0% -31.67% Note, P256 uses an asm implementation, thus, little variation is expected. Change-Id: I88a24f6bf0f4f285c649e40243b1ab69cc452b71 Reviewed-on: https://go-review.googlesource.com/c/go/+/346870 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@google.com>
2021-06-08 12:16:01 -06:00
r[1], c[1] = bits.Add64(p[1], q[1], c[0])
}
func Add64PanicOnOverflowEQ(a, b uint64) uint64 {
r, c := bits.Add64(a, b, 0)
// s390x:"BRC\t[$]3,",-"ADDE"
if c == 1 {
panic("overflow")
}
return r
}
func Add64PanicOnOverflowNE(a, b uint64) uint64 {
r, c := bits.Add64(a, b, 0)
// s390x:"BRC\t[$]3,",-"ADDE"
if c != 0 {
panic("overflow")
}
return r
}
func Add64PanicOnOverflowGT(a, b uint64) uint64 {
r, c := bits.Add64(a, b, 0)
// s390x:"BRC\t[$]3,",-"ADDE"
if c > 0 {
panic("overflow")
}
return r
}
func Add64MPanicOnOverflowEQ(a, b [2]uint64) [2]uint64 {
var r [2]uint64
var c uint64
r[0], c = bits.Add64(a[0], b[0], c)
r[1], c = bits.Add64(a[1], b[1], c)
// s390x:"BRC\t[$]3,"
if c == 1 {
panic("overflow")
}
return r
}
func Add64MPanicOnOverflowNE(a, b [2]uint64) [2]uint64 {
var r [2]uint64
var c uint64
r[0], c = bits.Add64(a[0], b[0], c)
r[1], c = bits.Add64(a[1], b[1], c)
// s390x:"BRC\t[$]3,"
if c != 0 {
panic("overflow")
}
return r
}
func Add64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 {
var r [2]uint64
var c uint64
r[0], c = bits.Add64(a[0], b[0], c)
r[1], c = bits.Add64(a[1], b[1], c)
// s390x:"BRC\t[$]3,"
if c > 0 {
panic("overflow")
}
return r
}
// Verify independent carry chain operations are scheduled efficiently
// and do not cause unnecessary save/restore of the CA bit.
//
// This is an example of why CarryChainTail priority must be lower
// (earlier in the block) than Memory. f[0]=f1 could be scheduled
// after the first two lower 64 bit limb adds, but before either
// high 64 bit limbs are added.
//
// This is what happened on PPC64 when compiling
// crypto/internal/edwards25519/field.feMulGeneric.
func Add64MultipleChains(a, b, c, d [2]uint64) {
var cx, d1, d2 uint64
a1, a2 := a[0], a[1]
b1, b2 := b[0], b[1]
c1, c2 := c[0], c[1]
// ppc64x: "ADDC\tR\\d+,", -"ADDE", -"MOVD\tXER"
d1, cx = bits.Add64(a1, b1, 0)
// ppc64x: "ADDE", -"ADDC", -"MOVD\t.*, XER"
d2, _ = bits.Add64(a2, b2, cx)
// ppc64x: "ADDC\tR\\d+,", -"ADDE", -"MOVD\tXER"
d1, cx = bits.Add64(c1, d1, 0)
// ppc64x: "ADDE", -"ADDC", -"MOVD\t.*, XER"
d2, _ = bits.Add64(c2, d2, cx)
d[0] = d1
d[1] = d2
}
// --------------- //
// bits.Sub* //
// --------------- //
func Sub(x, y, ci uint) (r, co uint) {
// amd64:"NEGL","SBBQ","NEGQ"
// arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
// loong64:"SUBV","SGTU"
// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
// s390x:"SUBE"
// mips64:"SUBV","SGTU"
// riscv64: "SUB","SLTU"
return bits.Sub(x, y, ci)
}
func SubC(x, ci uint) (r, co uint) {
// amd64:"NEGL","SBBQ","NEGQ"
// arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
// loong64:"SUBV","SGTU"
// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
// s390x:"SUBE"
// mips64:"SUBV","SGTU"
// riscv64: "SUB","SLTU"
return bits.Sub(x, 7, ci)
}
func SubZ(x, y uint) (r, co uint) {
// amd64:"SUBQ","SBBQ","NEGQ",-"NEGL"
// arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP"
// loong64:"SUBV","SGTU"
// ppc64x:"SUBC", -"SUBE", "SUBZE", "NEG"
// s390x:"SUBC"
// mips64:"SUBV","SGTU"
// riscv64: "SUB","SLTU"
return bits.Sub(x, y, 0)
}
func SubR(x, y, ci uint) uint {
// amd64:"NEGL","SBBQ",-"NEGQ"
// arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP"
// loong64:"SUBV",-"SGTU"
// ppc64x:"SUBC", "SUBE", -"SUBZE", -"NEG"
// s390x:"SUBE"
// riscv64: "SUB",-"SLTU"
r, _ := bits.Sub(x, y, ci)
return r
}
func SubM(p, q, r *[3]uint) {
var c uint
r[0], c = bits.Sub(p[0], q[0], c)
// amd64:"SBBQ",-"NEGL",-"NEGQ"
// arm64:"SBCS",-"NEGS",-"NGC",-"NEG",-"ADD",-"SUB",-"CMP"
// ppc64x:-"SUBC", "SUBE", -"SUBZE", -"NEG"
// s390x:"SUBE"
r[1], c = bits.Sub(p[1], q[1], c)
r[2], c = bits.Sub(p[2], q[2], c)
}
func Sub64(x, y, ci uint64) (r, co uint64) {
// amd64:"NEGL","SBBQ","NEGQ"
// arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
// loong64:"SUBV","SGTU"
// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
// s390x:"SUBE"
// mips64:"SUBV","SGTU"
// riscv64: "SUB","SLTU"
return bits.Sub64(x, y, ci)
}
func Sub64C(x, ci uint64) (r, co uint64) {
// amd64:"NEGL","SBBQ","NEGQ"
// arm64:"NEGS","SBCS","NGC","NEG",-"ADD",-"SUB",-"CMP"
// loong64:"SUBV","SGTU"
// ppc64x:"SUBC", "SUBE", "SUBZE", "NEG"
// s390x:"SUBE"
// mips64:"SUBV","SGTU"
// riscv64: "SUB","SLTU"
return bits.Sub64(x, 7, ci)
}
func Sub64Z(x, y uint64) (r, co uint64) {
// amd64:"SUBQ","SBBQ","NEGQ",-"NEGL"
// arm64:"SUBS","NGC","NEG",-"SBCS",-"ADD",-"SUB\t",-"CMP"
// loong64:"SUBV","SGTU"
// ppc64x:"SUBC", -"SUBE", "SUBZE", "NEG"
// s390x:"SUBC"
// mips64:"SUBV","SGTU"
// riscv64: "SUB","SLTU"
return bits.Sub64(x, y, 0)
}
func Sub64R(x, y, ci uint64) uint64 {
// amd64:"NEGL","SBBQ",-"NEGQ"
// arm64:"NEGS","SBCS",-"NGC",-"NEG\t",-"ADD",-"SUB",-"CMP"
// loong64:"SUBV",-"SGTU"
// ppc64x:"SUBC", "SUBE", -"SUBZE", -"NEG"
// s390x:"SUBE"
// riscv64: "SUB",-"SLTU"
r, _ := bits.Sub64(x, y, ci)
return r
}
func Sub64M(p, q, r *[3]uint64) {
var c uint64
r[0], c = bits.Sub64(p[0], q[0], c)
// amd64:"SBBQ",-"NEGL",-"NEGQ"
// arm64:"SBCS",-"NEGS",-"NGC",-"NEG",-"ADD",-"SUB",-"CMP"
// s390x:"SUBE"
r[1], c = bits.Sub64(p[1], q[1], c)
r[2], c = bits.Sub64(p[2], q[2], c)
}
cmd/compile: lower Add64/Sub64 into ssa on PPC64 math/bits.Add64 and math/bits.Sub64 now lower and optimize directly in SSA form. The optimization of carry chains focuses around eliding XER<->GPR transfers of the CA bit when used exclusively as an input to a single carry operations, or when the CA value is known. This also adds support for handling XER spills in the assembler which could happen if carry chains contain inter-dependencies on each other (which seems very unlikely with practical usage), or a clobber happens (SRAW/SRAD/SUBFC operations clobber CA). With PPC64 Add64/Sub64 lowering into SSA and this patch, the net performance difference in crypto/elliptic benchmarks on P9/ppc64le are: name old time/op new time/op delta ScalarBaseMult/P256 46.3µs ± 0% 46.9µs ± 0% +1.34% ScalarBaseMult/P224 356µs ± 0% 209µs ± 0% -41.14% ScalarBaseMult/P384 1.20ms ± 0% 0.57ms ± 0% -52.14% ScalarBaseMult/P521 3.38ms ± 0% 1.44ms ± 0% -57.27% ScalarMult/P256 199µs ± 0% 199µs ± 0% -0.17% ScalarMult/P224 357µs ± 0% 212µs ± 0% -40.56% ScalarMult/P384 1.20ms ± 0% 0.58ms ± 0% -51.86% ScalarMult/P521 3.37ms ± 0% 1.44ms ± 0% -57.32% MarshalUnmarshal/P256/Uncompressed 2.59µs ± 0% 2.52µs ± 0% -2.63% MarshalUnmarshal/P256/Compressed 2.58µs ± 0% 2.52µs ± 0% -2.06% MarshalUnmarshal/P224/Uncompressed 1.54µs ± 0% 1.40µs ± 0% -9.42% MarshalUnmarshal/P224/Compressed 1.54µs ± 0% 1.39µs ± 0% -9.87% MarshalUnmarshal/P384/Uncompressed 2.40µs ± 0% 1.80µs ± 0% -24.93% MarshalUnmarshal/P384/Compressed 2.35µs ± 0% 1.81µs ± 0% -23.03% MarshalUnmarshal/P521/Uncompressed 3.79µs ± 0% 2.58µs ± 0% -31.81% MarshalUnmarshal/P521/Compressed 3.80µs ± 0% 2.60µs ± 0% -31.67% Note, P256 uses an asm implementation, thus, little variation is expected. Change-Id: I88a24f6bf0f4f285c649e40243b1ab69cc452b71 Reviewed-on: https://go-review.googlesource.com/c/go/+/346870 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@google.com>
2021-06-08 12:16:01 -06:00
func Sub64MSaveC(p, q, r, c *[2]uint64) {
// ppc64x:"SUBC\tR\\d+, R\\d+,", "SUBZE", "NEG"
cmd/compile: lower Add64/Sub64 into ssa on PPC64 math/bits.Add64 and math/bits.Sub64 now lower and optimize directly in SSA form. The optimization of carry chains focuses around eliding XER<->GPR transfers of the CA bit when used exclusively as an input to a single carry operations, or when the CA value is known. This also adds support for handling XER spills in the assembler which could happen if carry chains contain inter-dependencies on each other (which seems very unlikely with practical usage), or a clobber happens (SRAW/SRAD/SUBFC operations clobber CA). With PPC64 Add64/Sub64 lowering into SSA and this patch, the net performance difference in crypto/elliptic benchmarks on P9/ppc64le are: name old time/op new time/op delta ScalarBaseMult/P256 46.3µs ± 0% 46.9µs ± 0% +1.34% ScalarBaseMult/P224 356µs ± 0% 209µs ± 0% -41.14% ScalarBaseMult/P384 1.20ms ± 0% 0.57ms ± 0% -52.14% ScalarBaseMult/P521 3.38ms ± 0% 1.44ms ± 0% -57.27% ScalarMult/P256 199µs ± 0% 199µs ± 0% -0.17% ScalarMult/P224 357µs ± 0% 212µs ± 0% -40.56% ScalarMult/P384 1.20ms ± 0% 0.58ms ± 0% -51.86% ScalarMult/P521 3.37ms ± 0% 1.44ms ± 0% -57.32% MarshalUnmarshal/P256/Uncompressed 2.59µs ± 0% 2.52µs ± 0% -2.63% MarshalUnmarshal/P256/Compressed 2.58µs ± 0% 2.52µs ± 0% -2.06% MarshalUnmarshal/P224/Uncompressed 1.54µs ± 0% 1.40µs ± 0% -9.42% MarshalUnmarshal/P224/Compressed 1.54µs ± 0% 1.39µs ± 0% -9.87% MarshalUnmarshal/P384/Uncompressed 2.40µs ± 0% 1.80µs ± 0% -24.93% MarshalUnmarshal/P384/Compressed 2.35µs ± 0% 1.81µs ± 0% -23.03% MarshalUnmarshal/P521/Uncompressed 3.79µs ± 0% 2.58µs ± 0% -31.81% MarshalUnmarshal/P521/Compressed 3.80µs ± 0% 2.60µs ± 0% -31.67% Note, P256 uses an asm implementation, thus, little variation is expected. Change-Id: I88a24f6bf0f4f285c649e40243b1ab69cc452b71 Reviewed-on: https://go-review.googlesource.com/c/go/+/346870 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@google.com>
2021-06-08 12:16:01 -06:00
r[0], c[0] = bits.Sub64(p[0], q[0], 0)
// ppc64x:"SUBC\tR\\d+, [$]0,", "SUBE", "SUBZE", "NEG"
cmd/compile: lower Add64/Sub64 into ssa on PPC64 math/bits.Add64 and math/bits.Sub64 now lower and optimize directly in SSA form. The optimization of carry chains focuses around eliding XER<->GPR transfers of the CA bit when used exclusively as an input to a single carry operations, or when the CA value is known. This also adds support for handling XER spills in the assembler which could happen if carry chains contain inter-dependencies on each other (which seems very unlikely with practical usage), or a clobber happens (SRAW/SRAD/SUBFC operations clobber CA). With PPC64 Add64/Sub64 lowering into SSA and this patch, the net performance difference in crypto/elliptic benchmarks on P9/ppc64le are: name old time/op new time/op delta ScalarBaseMult/P256 46.3µs ± 0% 46.9µs ± 0% +1.34% ScalarBaseMult/P224 356µs ± 0% 209µs ± 0% -41.14% ScalarBaseMult/P384 1.20ms ± 0% 0.57ms ± 0% -52.14% ScalarBaseMult/P521 3.38ms ± 0% 1.44ms ± 0% -57.27% ScalarMult/P256 199µs ± 0% 199µs ± 0% -0.17% ScalarMult/P224 357µs ± 0% 212µs ± 0% -40.56% ScalarMult/P384 1.20ms ± 0% 0.58ms ± 0% -51.86% ScalarMult/P521 3.37ms ± 0% 1.44ms ± 0% -57.32% MarshalUnmarshal/P256/Uncompressed 2.59µs ± 0% 2.52µs ± 0% -2.63% MarshalUnmarshal/P256/Compressed 2.58µs ± 0% 2.52µs ± 0% -2.06% MarshalUnmarshal/P224/Uncompressed 1.54µs ± 0% 1.40µs ± 0% -9.42% MarshalUnmarshal/P224/Compressed 1.54µs ± 0% 1.39µs ± 0% -9.87% MarshalUnmarshal/P384/Uncompressed 2.40µs ± 0% 1.80µs ± 0% -24.93% MarshalUnmarshal/P384/Compressed 2.35µs ± 0% 1.81µs ± 0% -23.03% MarshalUnmarshal/P521/Uncompressed 3.79µs ± 0% 2.58µs ± 0% -31.81% MarshalUnmarshal/P521/Compressed 3.80µs ± 0% 2.60µs ± 0% -31.67% Note, P256 uses an asm implementation, thus, little variation is expected. Change-Id: I88a24f6bf0f4f285c649e40243b1ab69cc452b71 Reviewed-on: https://go-review.googlesource.com/c/go/+/346870 Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Dmitri Shuralyov <dmitshur@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@google.com>
2021-06-08 12:16:01 -06:00
r[1], c[1] = bits.Sub64(p[1], q[1], c[0])
}
func Sub64PanicOnOverflowEQ(a, b uint64) uint64 {
r, b := bits.Sub64(a, b, 0)
// s390x:"BRC\t[$]12,",-"ADDE",-"SUBE"
if b == 1 {
panic("overflow")
}
return r
}
func Sub64PanicOnOverflowNE(a, b uint64) uint64 {
r, b := bits.Sub64(a, b, 0)
// s390x:"BRC\t[$]12,",-"ADDE",-"SUBE"
if b != 0 {
panic("overflow")
}
return r
}
func Sub64PanicOnOverflowGT(a, b uint64) uint64 {
r, b := bits.Sub64(a, b, 0)
// s390x:"BRC\t[$]12,",-"ADDE",-"SUBE"
if b > 0 {
panic("overflow")
}
return r
}
func Sub64MPanicOnOverflowEQ(a, b [2]uint64) [2]uint64 {
var r [2]uint64
var c uint64
r[0], c = bits.Sub64(a[0], b[0], c)
r[1], c = bits.Sub64(a[1], b[1], c)
// s390x:"BRC\t[$]12,"
if c == 1 {
panic("overflow")
}
return r
}
func Sub64MPanicOnOverflowNE(a, b [2]uint64) [2]uint64 {
var r [2]uint64
var c uint64
r[0], c = bits.Sub64(a[0], b[0], c)
r[1], c = bits.Sub64(a[1], b[1], c)
// s390x:"BRC\t[$]12,"
if c != 0 {
panic("overflow")
}
return r
}
func Sub64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 {
var r [2]uint64
var c uint64
r[0], c = bits.Sub64(a[0], b[0], c)
r[1], c = bits.Sub64(a[1], b[1], c)
// s390x:"BRC\t[$]12,"
if c > 0 {
panic("overflow")
}
return r
}
cmd/compile: intrinsify math/bits.Mul Add SSA rules to intrinsify Mul/Mul64 (AMD64 and ARM64). SSA rules for other functions and architectures are left as a future optimization. Benchmark results on AMD64/ARM64 before and after SSA implementation are below. amd64 name old time/op new time/op delta Add-4 1.78ns ± 0% 1.85ns ±12% ~ (p=0.397 n=4+5) Add32-4 1.71ns ± 1% 1.70ns ± 0% ~ (p=0.683 n=5+5) Add64-4 1.80ns ± 2% 1.77ns ± 0% -1.22% (p=0.048 n=5+5) Sub-4 1.78ns ± 0% 1.78ns ± 0% ~ (all equal) Sub32-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=1.000 n=5+5) Sub64-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=0.968 n=5+4) Mul-4 11.5ns ± 1% 1.8ns ± 2% -84.39% (p=0.008 n=5+5) Mul32-4 1.39ns ± 0% 1.38ns ± 3% ~ (p=0.175 n=5+5) Mul64-4 6.85ns ± 1% 1.78ns ± 1% -73.97% (p=0.008 n=5+5) Div-4 57.1ns ± 1% 56.7ns ± 0% ~ (p=0.087 n=5+5) Div32-4 18.0ns ± 0% 18.0ns ± 0% ~ (all equal) Div64-4 56.4ns ±10% 53.6ns ± 1% ~ (p=0.071 n=5+5) arm64 name old time/op new time/op delta Add-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add64-96 5.52ns ± 0% 5.51ns ± 0% ~ (p=0.444 n=5+5) Sub-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub64-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Mul-96 34.6ns ± 0% 5.0ns ± 0% -85.52% (p=0.008 n=5+5) Mul32-96 4.51ns ± 0% 4.51ns ± 0% ~ (all equal) Mul64-96 21.1ns ± 0% 5.0ns ± 0% -76.26% (p=0.008 n=5+5) Div-96 64.7ns ± 0% 64.7ns ± 0% ~ (all equal) Div32-96 17.0ns ± 0% 17.0ns ± 0% ~ (all equal) Div64-96 53.1ns ± 0% 53.1ns ± 0% ~ (all equal) Updates #24813 Change-Id: I9bda6d2102f65cae3d436a2087b47ed8bafeb068 Reviewed-on: https://go-review.googlesource.com/129415 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2018-08-14 16:41:22 -06:00
// --------------- //
// bits.Mul* //
// --------------- //
func Mul(x, y uint) (hi, lo uint) {
// amd64:"MULQ"
// arm64:"UMULH","MUL"
// ppc64x:"MULHDU","MULLD"
// s390x:"MLGR"
// mips64: "MULVU"
// riscv64:"MULHU","MUL"
cmd/compile: intrinsify math/bits.Mul Add SSA rules to intrinsify Mul/Mul64 (AMD64 and ARM64). SSA rules for other functions and architectures are left as a future optimization. Benchmark results on AMD64/ARM64 before and after SSA implementation are below. amd64 name old time/op new time/op delta Add-4 1.78ns ± 0% 1.85ns ±12% ~ (p=0.397 n=4+5) Add32-4 1.71ns ± 1% 1.70ns ± 0% ~ (p=0.683 n=5+5) Add64-4 1.80ns ± 2% 1.77ns ± 0% -1.22% (p=0.048 n=5+5) Sub-4 1.78ns ± 0% 1.78ns ± 0% ~ (all equal) Sub32-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=1.000 n=5+5) Sub64-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=0.968 n=5+4) Mul-4 11.5ns ± 1% 1.8ns ± 2% -84.39% (p=0.008 n=5+5) Mul32-4 1.39ns ± 0% 1.38ns ± 3% ~ (p=0.175 n=5+5) Mul64-4 6.85ns ± 1% 1.78ns ± 1% -73.97% (p=0.008 n=5+5) Div-4 57.1ns ± 1% 56.7ns ± 0% ~ (p=0.087 n=5+5) Div32-4 18.0ns ± 0% 18.0ns ± 0% ~ (all equal) Div64-4 56.4ns ±10% 53.6ns ± 1% ~ (p=0.071 n=5+5) arm64 name old time/op new time/op delta Add-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add64-96 5.52ns ± 0% 5.51ns ± 0% ~ (p=0.444 n=5+5) Sub-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub64-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Mul-96 34.6ns ± 0% 5.0ns ± 0% -85.52% (p=0.008 n=5+5) Mul32-96 4.51ns ± 0% 4.51ns ± 0% ~ (all equal) Mul64-96 21.1ns ± 0% 5.0ns ± 0% -76.26% (p=0.008 n=5+5) Div-96 64.7ns ± 0% 64.7ns ± 0% ~ (all equal) Div32-96 17.0ns ± 0% 17.0ns ± 0% ~ (all equal) Div64-96 53.1ns ± 0% 53.1ns ± 0% ~ (all equal) Updates #24813 Change-Id: I9bda6d2102f65cae3d436a2087b47ed8bafeb068 Reviewed-on: https://go-review.googlesource.com/129415 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2018-08-14 16:41:22 -06:00
return bits.Mul(x, y)
}
func Mul64(x, y uint64) (hi, lo uint64) {
// amd64:"MULQ"
// arm64:"UMULH","MUL"
// ppc64x:"MULHDU","MULLD"
// s390x:"MLGR"
// mips64: "MULVU"
// riscv64:"MULHU","MUL"
cmd/compile: intrinsify math/bits.Mul Add SSA rules to intrinsify Mul/Mul64 (AMD64 and ARM64). SSA rules for other functions and architectures are left as a future optimization. Benchmark results on AMD64/ARM64 before and after SSA implementation are below. amd64 name old time/op new time/op delta Add-4 1.78ns ± 0% 1.85ns ±12% ~ (p=0.397 n=4+5) Add32-4 1.71ns ± 1% 1.70ns ± 0% ~ (p=0.683 n=5+5) Add64-4 1.80ns ± 2% 1.77ns ± 0% -1.22% (p=0.048 n=5+5) Sub-4 1.78ns ± 0% 1.78ns ± 0% ~ (all equal) Sub32-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=1.000 n=5+5) Sub64-4 1.78ns ± 1% 1.78ns ± 0% ~ (p=0.968 n=5+4) Mul-4 11.5ns ± 1% 1.8ns ± 2% -84.39% (p=0.008 n=5+5) Mul32-4 1.39ns ± 0% 1.38ns ± 3% ~ (p=0.175 n=5+5) Mul64-4 6.85ns ± 1% 1.78ns ± 1% -73.97% (p=0.008 n=5+5) Div-4 57.1ns ± 1% 56.7ns ± 0% ~ (p=0.087 n=5+5) Div32-4 18.0ns ± 0% 18.0ns ± 0% ~ (all equal) Div64-4 56.4ns ±10% 53.6ns ± 1% ~ (p=0.071 n=5+5) arm64 name old time/op new time/op delta Add-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Add64-96 5.52ns ± 0% 5.51ns ± 0% ~ (p=0.444 n=5+5) Sub-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub32-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Sub64-96 5.51ns ± 0% 5.51ns ± 0% ~ (all equal) Mul-96 34.6ns ± 0% 5.0ns ± 0% -85.52% (p=0.008 n=5+5) Mul32-96 4.51ns ± 0% 4.51ns ± 0% ~ (all equal) Mul64-96 21.1ns ± 0% 5.0ns ± 0% -76.26% (p=0.008 n=5+5) Div-96 64.7ns ± 0% 64.7ns ± 0% ~ (all equal) Div32-96 17.0ns ± 0% 17.0ns ± 0% ~ (all equal) Div64-96 53.1ns ± 0% 53.1ns ± 0% ~ (all equal) Updates #24813 Change-Id: I9bda6d2102f65cae3d436a2087b47ed8bafeb068 Reviewed-on: https://go-review.googlesource.com/129415 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
2018-08-14 16:41:22 -06:00
return bits.Mul64(x, y)
}
func Mul64HiOnly(x, y uint64) uint64 {
// arm64:"UMULH",-"MUL"
// riscv64:"MULHU",-"MUL\t"
hi, _ := bits.Mul64(x, y)
return hi
}
func Mul64LoOnly(x, y uint64) uint64 {
// arm64:"MUL",-"UMULH"
// riscv64:"MUL\t",-"MULHU"
_, lo := bits.Mul64(x, y)
return lo
}
// --------------- //
// bits.Div* //
// --------------- //
func Div(hi, lo, x uint) (q, r uint) {
// amd64:"DIVQ"
return bits.Div(hi, lo, x)
}
cmd/compile: optimize math/bits.Div32 for arm64 Benchmark: name old time/op new time/op delta Div-8 22.0ns ± 0% 22.0ns ± 0% ~ (all equal) Div32-8 6.51ns ± 0% 3.00ns ± 0% -53.90% (p=0.000 n=10+8) Div64-8 22.5ns ± 0% 22.5ns ± 0% ~ (all equal) Code: func div32(hi, lo, y uint32) (q, r uint32) {return bits.Div32(hi, lo, y)} Before: 0x0020 00032 (test.go:24) MOVWU "".y+8(FP), R0 0x0024 00036 ($GOROOT/src/math/bits/bits.go:472) CBZW R0, 132 0x0028 00040 ($GOROOT/src/math/bits/bits.go:472) MOVWU "".hi(FP), R1 0x002c 00044 ($GOROOT/src/math/bits/bits.go:472) CMPW R1, R0 0x0030 00048 ($GOROOT/src/math/bits/bits.go:472) BLS 96 0x0034 00052 ($GOROOT/src/math/bits/bits.go:475) MOVWU "".lo+4(FP), R2 0x0038 00056 ($GOROOT/src/math/bits/bits.go:475) ORR R1<<32, R2, R1 0x003c 00060 ($GOROOT/src/math/bits/bits.go:476) CBZ R0, 140 0x0040 00064 ($GOROOT/src/math/bits/bits.go:476) UDIV R0, R1, R2 0x0044 00068 (test.go:24) MOVW R2, "".q+16(FP) 0x0048 00072 ($GOROOT/src/math/bits/bits.go:476) UREM R0, R1, R0 0x0050 00080 (test.go:24) MOVW R0, "".r+20(FP) 0x0054 00084 (test.go:24) MOVD -8(RSP), R29 0x0058 00088 (test.go:24) MOVD.P 32(RSP), R30 0x005c 00092 (test.go:24) RET (R30) After: 0x001c 00028 (test.go:24) MOVWU "".y+8(FP), R0 0x0020 00032 (test.go:24) CBZW R0, 92 0x0024 00036 (test.go:24) MOVWU "".hi(FP), R1 0x0028 00040 (test.go:24) CMPW R0, R1 0x002c 00044 (test.go:24) BHS 84 0x0030 00048 (test.go:24) MOVWU "".lo+4(FP), R2 0x0034 00052 (test.go:24) ORR R1<<32, R2, R4 0x0038 00056 (test.go:24) UDIV R0, R4, R3 0x003c 00060 (test.go:24) MSUB R3, R4, R0, R4 0x0040 00064 (test.go:24) MOVW R3, "".q+16(FP) 0x0044 00068 (test.go:24) MOVW R4, "".r+20(FP) 0x0048 00072 (test.go:24) MOVD -8(RSP), R29 0x004c 00076 (test.go:24) MOVD.P 16(RSP), R30 0x0050 00080 (test.go:24) RET (R30) UREM instruction in the previous assembly code will be converted to UDIV and MSUB instructions on arm64. However the UDIV instruction in UREM is unnecessary, because it's a duplicate of the previous UDIV. This CL adds a rule to have this extra UDIV instruction removed by CSE. Change-Id: Ie2508784320020b2de022806d09f75a7871bb3d7 Reviewed-on: https://go-review.googlesource.com/c/159577 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com> Run-TryBot: Bryan C. Mills <bcmills@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
2019-01-22 02:10:59 -07:00
func Div32(hi, lo, x uint32) (q, r uint32) {
// arm64:"ORR","UDIV","MSUB",-"UREM"
return bits.Div32(hi, lo, x)
}
func Div64(hi, lo, x uint64) (q, r uint64) {
// amd64:"DIVQ"
return bits.Div64(hi, lo, x)
}
func Div64degenerate(x uint64) (q, r uint64) {
// amd64:-"DIVQ"
return bits.Div64(0, x, 5)
}