mirror of
https://github.com/golang/go
synced 2024-11-18 11:04:42 -07:00
ddc7d2a80c
Usually optimization rules have corresponding priorities, some need to be run first, some run next, and some run last, which produces the best code. But currently our optimization rules have no priority, this CL adds a late lower pass that runs those rules that need to be run at last, such as split unreasonable constant folding. This pass can be seen as the second round of the lower pass. For example: func foo(a, b uint64) uint64 { d := a+0x1234568 d1 := b+0x1234568 return d&d1 } The code generated by the master branch: 0x0004 00004 ADD $19088744, R0, R2 // movz+movk+add 0x0010 00016 ADD $19088744, R1, R1 // movz+movk+add 0x001c 00028 AND R1, R2, R0 This is because the current constant folding optimization rules do not take into account the range of constants, causing the constant to be loaded repeatedly. This CL splits these unreasonable constants folding in the late lower pass. With this CL the generated code: 0x0004 00004 MOVD $19088744, R2 // movz+movk 0x000c 00012 ADD R0, R2, R3 0x0010 00016 ADD R1, R2, R1 0x0014 00020 AND R1, R3, R0 This CL also adds constant folding optimization for ADDS instruction. In addition, in order not to introduce the codegen regression, an optimization rule is added to change the addition of a negative number into a subtraction of a positive number. go1 benchmarks: name old time/op new time/op delta BinaryTree17-8 1.22s ± 1% 1.24s ± 0% +1.56% (p=0.008 n=5+5) Fannkuch11-8 1.54s ± 0% 1.53s ± 0% -0.69% (p=0.016 n=4+5) FmtFprintfEmpty-8 14.1ns ± 0% 14.1ns ± 0% ~ (p=0.079 n=4+5) FmtFprintfString-8 26.0ns ± 0% 26.1ns ± 0% +0.23% (p=0.008 n=5+5) FmtFprintfInt-8 32.3ns ± 0% 32.9ns ± 1% +1.72% (p=0.008 n=5+5) FmtFprintfIntInt-8 54.5ns ± 0% 55.5ns ± 0% +1.83% (p=0.008 n=5+5) FmtFprintfPrefixedInt-8 61.5ns ± 0% 62.0ns ± 0% +0.93% (p=0.008 n=5+5) FmtFprintfFloat-8 72.0ns ± 0% 73.6ns ± 0% +2.24% (p=0.008 n=5+5) FmtManyArgs-8 221ns ± 0% 224ns ± 0% +1.22% (p=0.008 n=5+5) GobDecode-8 1.91ms ± 0% 1.93ms ± 0% +0.98% (p=0.008 n=5+5) GobEncode-8 1.40ms ± 1% 1.39ms ± 0% -0.79% (p=0.032 n=5+5) Gzip-8 115ms ± 0% 117ms ± 1% +1.17% (p=0.008 n=5+5) Gunzip-8 19.4ms ± 1% 19.3ms ± 0% -0.71% (p=0.016 n=5+4) HTTPClientServer-8 27.0µs ± 0% 27.3µs ± 0% +0.80% (p=0.008 n=5+5) JSONEncode-8 3.36ms ± 1% 3.33ms ± 0% ~ (p=0.056 n=5+5) JSONDecode-8 17.5ms ± 2% 17.8ms ± 0% +1.71% (p=0.016 n=5+4) Mandelbrot200-8 2.29ms ± 0% 2.29ms ± 0% ~ (p=0.151 n=5+5) GoParse-8 1.35ms ± 1% 1.36ms ± 1% ~ (p=0.056 n=5+5) RegexpMatchEasy0_32-8 24.5ns ± 0% 24.5ns ± 0% ~ (p=0.444 n=4+5) RegexpMatchEasy0_1K-8 131ns ±11% 118ns ± 6% ~ (p=0.056 n=5+5) RegexpMatchEasy1_32-8 22.9ns ± 0% 22.9ns ± 0% ~ (p=0.905 n=4+5) RegexpMatchEasy1_1K-8 126ns ± 0% 127ns ± 0% ~ (p=0.063 n=4+5) RegexpMatchMedium_32-8 486ns ± 5% 483ns ± 0% ~ (p=0.381 n=5+4) RegexpMatchMedium_1K-8 15.4µs ± 1% 15.5µs ± 0% ~ (p=0.151 n=5+5) RegexpMatchHard_32-8 687ns ± 0% 686ns ± 0% ~ (p=0.103 n=5+5) RegexpMatchHard_1K-8 20.7µs ± 0% 20.7µs ± 1% ~ (p=0.151 n=5+5) Revcomp-8 175ms ± 2% 176ms ± 3% ~ (p=1.000 n=5+5) Template-8 20.4ms ± 6% 20.1ms ± 2% ~ (p=0.151 n=5+5) TimeParse-8 112ns ± 0% 113ns ± 0% +0.97% (p=0.016 n=5+4) TimeFormat-8 156ns ± 0% 145ns ± 0% -7.14% (p=0.029 n=4+4) Change-Id: I3ced26e89041f873ac989586514ccc5ee09f13da Reviewed-on: https://go-review.googlesource.com/c/go/+/425134 Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org> Run-TryBot: Eric Fang <eric.fang@arm.com>
717 lines
13 KiB
Go
717 lines
13 KiB
Go
// asmcheck
|
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package codegen
|
|
|
|
import "unsafe"
|
|
|
|
// This file contains code generation tests related to the comparison
|
|
// operators.
|
|
|
|
// -------------- //
|
|
// Equality //
|
|
// -------------- //
|
|
|
|
// Check that compare to constant string use 2/4/8 byte compares
|
|
|
|
func CompareString1(s string) bool {
|
|
// amd64:`CMPW\t\(.*\), [$]`
|
|
// arm64:`MOVHU\t\(.*\), [R]`,`MOVD\t[$]`,`CMPW\tR`
|
|
// ppc64le:`MOVHZ\t\(.*\), [R]`,`CMPW\t.*, [$]`
|
|
// s390x:`MOVHBR\t\(.*\), [R]`,`CMPW\t.*, [$]`
|
|
return s == "xx"
|
|
}
|
|
|
|
func CompareString2(s string) bool {
|
|
// amd64:`CMPL\t\(.*\), [$]`
|
|
// arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]`
|
|
// ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]`
|
|
// s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [$]`
|
|
return s == "xxxx"
|
|
}
|
|
|
|
func CompareString3(s string) bool {
|
|
// amd64:`CMPQ\t\(.*\), [A-Z]`
|
|
// arm64:-`CMPW\t`
|
|
// ppc64:-`CMPW\t`
|
|
// ppc64le:-`CMPW\t`
|
|
// s390x:-`CMPW\t`
|
|
return s == "xxxxxxxx"
|
|
}
|
|
|
|
// Check that arrays compare use 2/4/8 byte compares
|
|
|
|
func CompareArray1(a, b [2]byte) bool {
|
|
// amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// arm64:-`MOVBU\t`
|
|
// ppc64le:-`MOVBZ\t`
|
|
// s390x:-`MOVBZ\t`
|
|
return a == b
|
|
}
|
|
|
|
func CompareArray2(a, b [3]uint16) bool {
|
|
// amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
return a == b
|
|
}
|
|
|
|
func CompareArray3(a, b [3]int16) bool {
|
|
// amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:`CMPW\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
return a == b
|
|
}
|
|
|
|
func CompareArray4(a, b [12]int8) bool {
|
|
// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:`CMPL\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
return a == b
|
|
}
|
|
|
|
func CompareArray5(a, b [15]byte) bool {
|
|
// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
return a == b
|
|
}
|
|
|
|
// This was a TODO in mapaccess1_faststr
|
|
func CompareArray6(a, b unsafe.Pointer) bool {
|
|
// amd64:`CMPL\t\(.*\), [A-Z]`
|
|
// arm64:`MOVWU\t\(.*\), [R]`,`CMPW\t.*, [R]`
|
|
// ppc64le:`MOVWZ\t\(.*\), [R]`,`CMPW\t.*, [R]`
|
|
// s390x:`MOVWBR\t\(.*\), [R]`,`CMPW\t.*, [R]`
|
|
return *((*[4]byte)(a)) != *((*[4]byte)(b))
|
|
}
|
|
|
|
// Check that some structs generate 2/4/8 byte compares.
|
|
|
|
type T1 struct {
|
|
a [8]byte
|
|
}
|
|
|
|
func CompareStruct1(s1, s2 T1) bool {
|
|
// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:-`CALL`
|
|
return s1 == s2
|
|
}
|
|
|
|
type T2 struct {
|
|
a [16]byte
|
|
}
|
|
|
|
func CompareStruct2(s1, s2 T2) bool {
|
|
// amd64:`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:-`CALL`
|
|
return s1 == s2
|
|
}
|
|
|
|
// Assert that a memequal call is still generated when
|
|
// inlining would increase binary size too much.
|
|
|
|
type T3 struct {
|
|
a [24]byte
|
|
}
|
|
|
|
func CompareStruct3(s1, s2 T3) bool {
|
|
// amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:`CALL`
|
|
return s1 == s2
|
|
}
|
|
|
|
type T4 struct {
|
|
a [32]byte
|
|
}
|
|
|
|
func CompareStruct4(s1, s2 T4) bool {
|
|
// amd64:-`CMPQ\tcommand-line-arguments[.+_a-z0-9]+\(SP\), [A-Z]`
|
|
// amd64:`CALL`
|
|
return s1 == s2
|
|
}
|
|
|
|
// -------------- //
|
|
// Ordering //
|
|
// -------------- //
|
|
|
|
// Test that LEAQ/ADDQconst are folded into SETx ops
|
|
|
|
var r bool
|
|
|
|
func CmpFold(x uint32) {
|
|
// amd64:`SETHI\t.*\(SB\)`
|
|
r = x > 4
|
|
}
|
|
|
|
// Test that direct comparisons with memory are generated when
|
|
// possible
|
|
|
|
func CmpMem1(p int, q *int) bool {
|
|
// amd64:`CMPQ\t\(.*\), [A-Z]`
|
|
return p < *q
|
|
}
|
|
|
|
func CmpMem2(p *int, q int) bool {
|
|
// amd64:`CMPQ\t\(.*\), [A-Z]`
|
|
return *p < q
|
|
}
|
|
|
|
func CmpMem3(p *int) bool {
|
|
// amd64:`CMPQ\t\(.*\), [$]7`
|
|
return *p < 7
|
|
}
|
|
|
|
func CmpMem4(p *int) bool {
|
|
// amd64:`CMPQ\t\(.*\), [$]7`
|
|
return 7 < *p
|
|
}
|
|
|
|
func CmpMem5(p **int) {
|
|
// amd64:`CMPL\truntime.writeBarrier\(SB\), [$]0`
|
|
*p = nil
|
|
}
|
|
|
|
func CmpMem6(a []int) int {
|
|
// 386:`CMPL\s8\([A-Z]+\),`
|
|
// amd64:`CMPQ\s16\([A-Z]+\),`
|
|
if a[1] > a[2] {
|
|
return 1
|
|
} else {
|
|
return 2
|
|
}
|
|
}
|
|
|
|
// Check tbz/tbnz are generated when comparing against zero on arm64
|
|
|
|
func CmpZero1(a int32, ptr *int) {
|
|
if a < 0 { // arm64:"TBZ"
|
|
*ptr = 0
|
|
}
|
|
}
|
|
|
|
func CmpZero2(a int64, ptr *int) {
|
|
if a < 0 { // arm64:"TBZ"
|
|
*ptr = 0
|
|
}
|
|
}
|
|
|
|
func CmpZero3(a int32, ptr *int) {
|
|
if a >= 0 { // arm64:"TBNZ"
|
|
*ptr = 0
|
|
}
|
|
}
|
|
|
|
func CmpZero4(a int64, ptr *int) {
|
|
if a >= 0 { // arm64:"TBNZ"
|
|
*ptr = 0
|
|
}
|
|
}
|
|
|
|
func CmpToZero(a, b, d int32, e, f int64, deOptC0, deOptC1 bool) int32 {
|
|
// arm:`TST`,-`AND`
|
|
// arm64:`TSTW`,-`AND`
|
|
// 386:`TESTL`,-`ANDL`
|
|
// amd64:`TESTL`,-`ANDL`
|
|
c0 := a&b < 0
|
|
// arm:`CMN`,-`ADD`
|
|
// arm64:`CMNW`,-`ADD`
|
|
c1 := a+b < 0
|
|
// arm:`TEQ`,-`XOR`
|
|
c2 := a^b < 0
|
|
// arm64:`TST`,-`AND`
|
|
// amd64:`TESTQ`,-`ANDQ`
|
|
c3 := e&f < 0
|
|
// arm64:`CMN`,-`ADD`
|
|
c4 := e+f < 0
|
|
// not optimized to single CMNW/CMN due to further use of b+d
|
|
// arm64:`ADD`,-`CMNW`
|
|
// arm:`ADD`,-`CMN`
|
|
c5 := b+d == 0
|
|
// not optimized to single TSTW/TST due to further use of a&d
|
|
// arm64:`AND`,-`TSTW`
|
|
// arm:`AND`,-`TST`
|
|
// 386:`ANDL`
|
|
c6 := a&d >= 0
|
|
// arm64:`TST\sR[0-9]+<<3,\sR[0-9]+`
|
|
c7 := e&(f<<3) < 0
|
|
// arm64:`CMN\sR[0-9]+<<3,\sR[0-9]+`
|
|
c8 := e+(f<<3) < 0
|
|
if c0 {
|
|
return 1
|
|
} else if c1 {
|
|
return 2
|
|
} else if c2 {
|
|
return 3
|
|
} else if c3 {
|
|
return 4
|
|
} else if c4 {
|
|
return 5
|
|
} else if c5 {
|
|
return 6
|
|
} else if c6 {
|
|
return 7
|
|
} else if c7 {
|
|
return 9
|
|
} else if c8 {
|
|
return 10
|
|
} else if deOptC0 {
|
|
return b + d
|
|
} else if deOptC1 {
|
|
return a & d
|
|
} else {
|
|
return 0
|
|
}
|
|
}
|
|
|
|
func CmpLogicalToZero(a, b, c uint32, d, e uint64) uint64 {
|
|
|
|
// ppc64:"ANDCC",-"CMPW"
|
|
// ppc64le:"ANDCC",-"CMPW"
|
|
// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
|
|
if a&63 == 0 {
|
|
return 1
|
|
}
|
|
|
|
// ppc64:"ANDCC",-"CMP"
|
|
// ppc64le:"ANDCC",-"CMP"
|
|
// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
|
|
if d&255 == 0 {
|
|
return 1
|
|
}
|
|
|
|
// ppc64:"ANDCC",-"CMP"
|
|
// ppc64le:"ANDCC",-"CMP"
|
|
// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
|
|
if d&e == 0 {
|
|
return 1
|
|
}
|
|
// ppc64:"ORCC",-"CMP"
|
|
// ppc64le:"ORCC",-"CMP"
|
|
// wasm:"I64Eqz",-"I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
|
|
if d|e == 0 {
|
|
return 1
|
|
}
|
|
|
|
// ppc64:"XORCC",-"CMP"
|
|
// ppc64le:"XORCC",-"CMP"
|
|
// wasm:"I64Eqz","I32Eqz",-"I64ExtendI32U",-"I32WrapI64"
|
|
if e^d == 0 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// The following CmpToZero_ex* check that cmp|cmn with bmi|bpl are generated for
|
|
// 'comparing to zero' expressions
|
|
|
|
// var + const
|
|
// 'x-const' might be canonicalized to 'x+(-const)', so we check both
|
|
// CMN and CMP for subtraction expressions to make the pattern robust.
|
|
func CmpToZero_ex1(a int64, e int32) int {
|
|
// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if a+3 < 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm64:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)`
|
|
if a+5 <= 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if a+13 >= 0 {
|
|
return 2
|
|
}
|
|
|
|
// arm64:`CMP|CMN`,-`(ADD|SUB)`,`(BMI|BPL)`
|
|
if a-7 < 0 {
|
|
return 3
|
|
}
|
|
|
|
// arm64:`SUB`,`TBZ`
|
|
if a-11 >= 0 {
|
|
return 4
|
|
}
|
|
|
|
// arm64:`SUB`,`CMP`,`BGT`
|
|
if a-19 > 0 {
|
|
return 4
|
|
}
|
|
|
|
// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
|
|
// arm:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if e+3 < 0 {
|
|
return 5
|
|
}
|
|
|
|
// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
|
|
// arm:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if e+13 >= 0 {
|
|
return 6
|
|
}
|
|
|
|
// arm64:`CMPW|CMNW`,`(BMI|BPL)`
|
|
// arm:`CMP|CMN`, -`(ADD|SUB)`, `(BMI|BPL)`
|
|
if e-7 < 0 {
|
|
return 7
|
|
}
|
|
|
|
// arm64:`SUB`,`TBNZ`
|
|
// arm:`CMP|CMN`, -`(ADD|SUB)`, `(BMI|BPL)`
|
|
if e-11 >= 0 {
|
|
return 8
|
|
}
|
|
|
|
return 0
|
|
}
|
|
|
|
// var + var
|
|
// TODO: optimize 'var - var'
|
|
func CmpToZero_ex2(a, b, c int64, e, f, g int32) int {
|
|
// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if a+b < 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm64:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)`
|
|
if a+c <= 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm64:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if b+c >= 0 {
|
|
return 2
|
|
}
|
|
|
|
// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
|
|
// arm:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if e+f < 0 {
|
|
return 5
|
|
}
|
|
|
|
// arm64:`CMNW`,-`ADDW`,`(BMI|BPL)`
|
|
// arm:`CMN`,-`ADD`,`(BMI|BPL)`
|
|
if f+g >= 0 {
|
|
return 6
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// var + var*var
|
|
func CmpToZero_ex3(a, b, c, d int64, e, f, g, h int32) int {
|
|
// arm64:`CMN`,-`MADD`,`MUL`,`(BMI|BPL)`
|
|
if a+b*c < 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm64:`CMN`,-`MADD`,`MUL`,`(BMI|BPL)`
|
|
if b+c*d >= 0 {
|
|
return 2
|
|
}
|
|
|
|
// arm64:`CMNW`,-`MADDW`,`MULW`,`BEQ`,`(BMI|BPL)`
|
|
// arm:`CMN`,-`MULA`,`MUL`,`BEQ`,`(BMI|BPL)`
|
|
if e+f*g > 0 {
|
|
return 5
|
|
}
|
|
|
|
// arm64:`CMNW`,-`MADDW`,`MULW`,`BEQ`,`(BMI|BPL)`
|
|
// arm:`CMN`,-`MULA`,`MUL`,`BEQ`,`(BMI|BPL)`
|
|
if f+g*h <= 0 {
|
|
return 6
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// var - var*var
|
|
func CmpToZero_ex4(a, b, c, d int64, e, f, g, h int32) int {
|
|
// arm64:`CMP`,-`MSUB`,`MUL`,`BEQ`,`(BMI|BPL)`
|
|
if a-b*c > 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm64:`CMP`,-`MSUB`,`MUL`,`(BMI|BPL)`
|
|
if b-c*d >= 0 {
|
|
return 2
|
|
}
|
|
|
|
// arm64:`CMPW`,-`MSUBW`,`MULW`,`(BMI|BPL)`
|
|
if e-f*g < 0 {
|
|
return 5
|
|
}
|
|
|
|
// arm64:`CMPW`,-`MSUBW`,`MULW`,`(BMI|BPL)`
|
|
if f-g*h >= 0 {
|
|
return 6
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func CmpToZero_ex5(e, f int32, u uint32) int {
|
|
// arm:`CMN`,-`ADD`,`BEQ`,`(BMI|BPL)`
|
|
if e+f<<1 > 0 {
|
|
return 1
|
|
}
|
|
|
|
// arm:`CMP`,-`SUB`,`(BMI|BPL)`
|
|
if f-int32(u>>2) >= 0 {
|
|
return 2
|
|
}
|
|
return 0
|
|
}
|
|
func UintLtZero(a uint8, b uint16, c uint32, d uint64) int {
|
|
// amd64: -`(TESTB|TESTW|TESTL|TESTQ|JCC|JCS)`
|
|
// arm64: -`(CMPW|CMP|BHS|BLO)`
|
|
if a < 0 || b < 0 || c < 0 || d < 0 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func UintGeqZero(a uint8, b uint16, c uint32, d uint64) int {
|
|
// amd64: -`(TESTB|TESTW|TESTL|TESTQ|JCS|JCC)`
|
|
// arm64: -`(CMPW|CMP|BLO|BHS)`
|
|
if a >= 0 || b >= 0 || c >= 0 || d >= 0 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func UintGtZero(a uint8, b uint16, c uint32, d uint64) int {
|
|
// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BLS|BHI)`
|
|
if a > 0 || b > 0 || c > 0 || d > 0 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func UintLeqZero(a uint8, b uint16, c uint32, d uint64) int {
|
|
// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BHI|BLS)`
|
|
if a <= 0 || b <= 0 || c <= 0 || d <= 0 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func UintLtOne(a uint8, b uint16, c uint32, d uint64) int {
|
|
// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BHS|BLO)`
|
|
if a < 1 || b < 1 || c < 1 || d < 1 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func UintGeqOne(a uint8, b uint16, c uint32, d uint64) int {
|
|
// arm64: `(CBN?ZW)`, `(CBN?Z[^W])`, -`(CMPW|CMP|BLO|BHS)`
|
|
if a >= 1 || b >= 1 || c >= 1 || d >= 1 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func CmpToZeroU_ex1(a uint8, b uint16, c uint32, d uint64) int {
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if 0 < a {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if 0 < b {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if 0 < c {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if 0 < d {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func CmpToZeroU_ex2(a uint8, b uint16, c uint32, d uint64) int {
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if a <= 0 {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if b <= 0 {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if c <= 0 {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if d <= 0 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func CmpToOneU_ex1(a uint8, b uint16, c uint32, d uint64) int {
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if a < 1 {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if b < 1 {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if c < 1 {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LtU"
|
|
if d < 1 {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
func CmpToOneU_ex2(a uint8, b uint16, c uint32, d uint64) int {
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if 1 <= a {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if 1 <= b {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if 1 <= c {
|
|
return 1
|
|
}
|
|
// wasm:"I64Eqz"-"I64LeU"
|
|
if 1 <= d {
|
|
return 1
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// Check that small memequals are replaced with eq instructions
|
|
|
|
func equalConstString1() bool {
|
|
a := string("A")
|
|
b := string("Z")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a == b
|
|
}
|
|
|
|
func equalVarString1(a string) bool {
|
|
b := string("Z")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a[:1] == b
|
|
}
|
|
|
|
func equalConstString2() bool {
|
|
a := string("AA")
|
|
b := string("ZZ")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a == b
|
|
}
|
|
|
|
func equalVarString2(a string) bool {
|
|
b := string("ZZ")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a[:2] == b
|
|
}
|
|
|
|
func equalConstString4() bool {
|
|
a := string("AAAA")
|
|
b := string("ZZZZ")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a == b
|
|
}
|
|
|
|
func equalVarString4(a string) bool {
|
|
b := string("ZZZZ")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a[:4] == b
|
|
}
|
|
|
|
func equalConstString8() bool {
|
|
a := string("AAAAAAAA")
|
|
b := string("ZZZZZZZZ")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a == b
|
|
}
|
|
|
|
func equalVarString8(a string) bool {
|
|
b := string("ZZZZZZZZ")
|
|
// amd64:-".*memequal"
|
|
// arm64:-".*memequal"
|
|
// ppc64:-".*memequal"
|
|
// ppc64le:-".*memequal"
|
|
return a[:8] == b
|
|
}
|
|
|
|
func cmpToCmn(a, b, c, d int) int {
|
|
var c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11 int
|
|
// arm64:`CMN`,-`CMP`
|
|
if a < -8 {
|
|
c1 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if a+1 == 0 {
|
|
c2 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if a+3 != 0 {
|
|
c3 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if a+b == 0 {
|
|
c4 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if b+c != 0 {
|
|
c5 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if a == -c {
|
|
c6 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if b != -d {
|
|
c7 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if a*b+c == 0 {
|
|
c8 = 1
|
|
}
|
|
// arm64:`CMN`,-`CMP`
|
|
if a*c+b != 0 {
|
|
c9 = 1
|
|
}
|
|
// arm64:`CMP`,-`CMN`
|
|
if b*c-a == 0 {
|
|
c10 = 1
|
|
}
|
|
// arm64:`CMP`,-`CMN`
|
|
if a*d-b != 0 {
|
|
c11 = 1
|
|
}
|
|
return c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 + c10 + c11
|
|
}
|