From 7f7184686b79e3361aa514ff2d387add97f6d06b Mon Sep 17 00:00:00 2001 From: Constantin Konstantinidis Date: Sun, 10 May 2020 08:10:47 +0200 Subject: [PATCH] cmd/compile: enforce strongly typed rules for ARM (2) Toolstash-check successful from L0 until L268 Change-Id: Ifc55ea1e4177c21107c521fc72da2da7b507b8ba Reviewed-on: https://go-review.googlesource.com/c/go/+/232811 Reviewed-by: Keith Randall Trust: Giovanni Bajo --- src/cmd/compile/internal/ssa/gen/ARM.rules | 276 ++++++++++----------- src/cmd/compile/internal/ssa/rewriteARM.go | 130 +++++----- 2 files changed, 203 insertions(+), 203 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 983f8848497..3564a81518d 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -2,66 +2,66 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -(Add(Ptr|32|16|8) ...) -> (ADD ...) -(Add(32|64)F ...) -> (ADD(F|D) ...) -(Add32carry ...) -> (ADDS ...) -(Add32withcarry ...) -> (ADC ...) +(Add(Ptr|32|16|8) ...) => (ADD ...) +(Add(32|64)F ...) => (ADD(F|D) ...) +(Add32carry ...) => (ADDS ...) +(Add32withcarry ...) => (ADC ...) -(Sub(Ptr|32|16|8) ...) -> (SUB ...) -(Sub(32|64)F ...) -> (SUB(F|D) ...) -(Sub32carry ...) -> (SUBS ...) -(Sub32withcarry ...) -> (SBC ...) +(Sub(Ptr|32|16|8) ...) => (SUB ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) +(Sub32carry ...) => (SUBS ...) +(Sub32withcarry ...) => (SBC ...) -(Mul(32|16|8) ...) -> (MUL ...) -(Mul(32|64)F ...) -> (MUL(F|D) ...) -(Hmul(32|32u) ...) -> (HMU(L|LU) ...) -(Mul32uhilo ...) -> (MULLU ...) +(Mul(32|16|8) ...) => (MUL ...) +(Mul(32|64)F ...) => (MUL(F|D) ...) +(Hmul(32|32u) ...) => (HMU(L|LU) ...) +(Mul32uhilo ...) => (MULLU ...) -(Div32 x y) -> +(Div32 x y) => (SUB (XOR // negate the result if one operand is negative (Select0 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) // negate x if negative (SUB (XOR y (Signmask y)) (Signmask y)))) // negate y if negative (Signmask (XOR x y))) (Signmask (XOR x y))) -(Div32u x y) -> (Select0 (CALLudiv x y)) -(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y)) -(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) -(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y)) -(Div8u x y) -> (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) -(Div(32|64)F ...) -> (DIV(F|D) ...) +(Div32u x y) => (Select0 (CALLudiv x y)) +(Div16 x y) => (Div32 (SignExt16to32 x) (SignExt16to32 y)) +(Div16u x y) => (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Div8 x y) => (Div32 (SignExt8to32 x) (SignExt8to32 y)) +(Div8u x y) => (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) +(Div(32|64)F ...) => (DIV(F|D) ...) -(Mod32 x y) -> +(Mod32 x y) => (SUB (XOR // negate the result if x is negative (Select1 (CALLudiv (SUB (XOR x (Signmask x)) (Signmask x)) // negate x if negative (SUB (XOR y (Signmask y)) (Signmask y)))) // negate y if negative (Signmask x)) (Signmask x)) -(Mod32u x y) -> (Select1 (CALLudiv x y)) -(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y)) -(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) -(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y)) -(Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) +(Mod32u x y) => (Select1 (CALLudiv x y)) +(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y)) +(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y)) +(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) // (x + y) / 2 with x>=y -> (x - y) / 2 + y -(Avg32u x y) -> (ADD (SRLconst (SUB x y) [1]) y) +(Avg32u x y) => (ADD (SRLconst (SUB x y) [1]) y) -(And(32|16|8) ...) -> (AND ...) -(Or(32|16|8) ...) -> (OR ...) -(Xor(32|16|8) ...) -> (XOR ...) +(And(32|16|8) ...) => (AND ...) +(Or(32|16|8) ...) => (OR ...) +(Xor(32|16|8) ...) => (XOR ...) // unary ops -(Neg(32|16|8) x) -> (RSBconst [0] x) -(Neg(32|64)F ...) -> (NEG(F|D) ...) +(Neg(32|16|8) x) => (RSBconst [0] x) +(Neg(32|64)F ...) => (NEG(F|D) ...) -(Com(32|16|8) ...) -> (MVN ...) +(Com(32|16|8) ...) => (MVN ...) -(Sqrt ...) -> (SQRTD ...) -(Abs ...) -> (ABSD ...) +(Sqrt ...) => (SQRTD ...) +(Abs ...) => (ABSD ...) // TODO: optimize this for ARMv5 and ARMv6 -(Ctz32NonZero ...) -> (Ctz32 ...) -(Ctz16NonZero ...) -> (Ctz32 ...) -(Ctz8NonZero ...) -> (Ctz32 ...) +(Ctz32NonZero ...) => (Ctz32 ...) +(Ctz16NonZero ...) => (Ctz32 ...) +(Ctz8NonZero ...) => (Ctz32 ...) // count trailing zero for ARMv5 and ARMv6 // 32 - CLZ(x&-x - 1) @@ -78,7 +78,7 @@ (Ctz8 x) && objabi.GOARM==7 -> (CLZ (RBIT (ORconst [0x100] x))) // bit length -(BitLen32 x) -> (RSBconst [32] (CLZ x)) +(BitLen32 x) => (RSBconst [32] (CLZ x)) // byte swap for ARMv5 // let (a, b, c, d) be the bytes of x from high to low @@ -98,50 +98,50 @@ (Bswap32 x) && objabi.GOARM>=6 -> (REV x) // boolean ops -- booleans are represented with 0=false, 1=true -(AndB ...) -> (AND ...) -(OrB ...) -> (OR ...) -(EqB x y) -> (XORconst [1] (XOR x y)) -(NeqB ...) -> (XOR ...) -(Not x) -> (XORconst [1] x) +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XORconst [1] (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) // shifts // hardware instruction uses only the low byte of the shift // we compare to 256 to ensure Go semantics for large shifts -(Lsh32x32 x y) -> (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) -(Lsh32x16 x y) -> (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) -(Lsh32x8 x y) -> (SLL x (ZeroExt8to32 y)) +(Lsh32x32 x y) => (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) +(Lsh32x16 x y) => (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Lsh32x8 x y) => (SLL x (ZeroExt8to32 y)) -(Lsh16x32 x y) -> (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) -(Lsh16x16 x y) -> (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) -(Lsh16x8 x y) -> (SLL x (ZeroExt8to32 y)) +(Lsh16x32 x y) => (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) +(Lsh16x16 x y) => (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Lsh16x8 x y) => (SLL x (ZeroExt8to32 y)) -(Lsh8x32 x y) -> (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) -(Lsh8x16 x y) -> (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) -(Lsh8x8 x y) -> (SLL x (ZeroExt8to32 y)) +(Lsh8x32 x y) => (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) +(Lsh8x16 x y) => (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Lsh8x8 x y) => (SLL x (ZeroExt8to32 y)) -(Rsh32Ux32 x y) -> (CMOVWHSconst (SRL x y) (CMPconst [256] y) [0]) -(Rsh32Ux16 x y) -> (CMOVWHSconst (SRL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) -(Rsh32Ux8 x y) -> (SRL x (ZeroExt8to32 y)) +(Rsh32Ux32 x y) => (CMOVWHSconst (SRL x y) (CMPconst [256] y) [0]) +(Rsh32Ux16 x y) => (CMOVWHSconst (SRL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Rsh32Ux8 x y) => (SRL x (ZeroExt8to32 y)) -(Rsh16Ux32 x y) -> (CMOVWHSconst (SRL (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) -(Rsh16Ux16 x y) -> (CMOVWHSconst (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) -(Rsh16Ux8 x y) -> (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) +(Rsh16Ux32 x y) => (CMOVWHSconst (SRL (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) +(Rsh16Ux16 x y) => (CMOVWHSconst (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Rsh16Ux8 x y) => (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) -(Rsh8Ux32 x y) -> (CMOVWHSconst (SRL (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) -(Rsh8Ux16 x y) -> (CMOVWHSconst (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) -(Rsh8Ux8 x y) -> (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) +(Rsh8Ux32 x y) => (CMOVWHSconst (SRL (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) +(Rsh8Ux16 x y) => (CMOVWHSconst (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Rsh8Ux8 x y) => (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) -(Rsh32x32 x y) -> (SRAcond x y (CMPconst [256] y)) -(Rsh32x16 x y) -> (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) -(Rsh32x8 x y) -> (SRA x (ZeroExt8to32 y)) +(Rsh32x32 x y) => (SRAcond x y (CMPconst [256] y)) +(Rsh32x16 x y) => (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) +(Rsh32x8 x y) => (SRA x (ZeroExt8to32 y)) -(Rsh16x32 x y) -> (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) -(Rsh16x16 x y) -> (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) -(Rsh16x8 x y) -> (SRA (SignExt16to32 x) (ZeroExt8to32 y)) +(Rsh16x32 x y) => (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) +(Rsh16x16 x y) => (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) +(Rsh16x8 x y) => (SRA (SignExt16to32 x) (ZeroExt8to32 y)) -(Rsh8x32 x y) -> (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) -(Rsh8x16 x y) -> (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) -(Rsh8x8 x y) -> (SRA (SignExt8to32 x) (ZeroExt8to32 y)) +(Rsh8x32 x y) => (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) +(Rsh8x16 x y) => (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) +(Rsh8x8 x y) => (SRA (SignExt8to32 x) (ZeroExt8to32 y)) // constant shifts // generic opt rewrites all constant shifts to shift by Const64 @@ -156,108 +156,108 @@ (Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst x [24]) [c+24]) // large constant shifts -(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0]) -(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0]) -(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0]) -(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (Const16 [0]) -(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0]) -(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (Const8 [0]) +(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) +(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) // large constant signed right shift, we leave the sign bit -(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31]) -(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst x [16]) [31]) -(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst x [24]) [31]) +(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SRAconst x [31]) +(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SRAconst (SLLconst x [16]) [31]) +(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst x [24]) [31]) // constants (Const(8|16|32) ...) -> (MOVWconst ...) (Const(32F|64F) ...) -> (MOV(F|D)const ...) -(ConstNil) -> (MOVWconst [0]) +(ConstNil) => (MOVWconst [0]) (ConstBool ...) -> (MOVWconst ...) // truncations // Because we ignore high parts of registers, truncates are just copies. -(Trunc16to8 ...) -> (Copy ...) -(Trunc32to8 ...) -> (Copy ...) -(Trunc32to16 ...) -> (Copy ...) +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) // Zero-/Sign-extensions -(ZeroExt8to16 ...) -> (MOVBUreg ...) -(ZeroExt8to32 ...) -> (MOVBUreg ...) -(ZeroExt16to32 ...) -> (MOVHUreg ...) +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) -(SignExt8to16 ...) -> (MOVBreg ...) -(SignExt8to32 ...) -> (MOVBreg ...) -(SignExt16to32 ...) -> (MOVHreg ...) +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) -(Signmask x) -> (SRAconst x [31]) -(Zeromask x) -> (SRAconst (RSBshiftRL x x [1]) [31]) // sign bit of uint32(x)>>1 - x -(Slicemask x) -> (SRAconst (RSBconst [0] x) [31]) +(Signmask x) => (SRAconst x [31]) +(Zeromask x) => (SRAconst (RSBshiftRL x x [1]) [31]) // sign bit of uint32(x)>>1 - x +(Slicemask x) => (SRAconst (RSBconst [0] x) [31]) // float <-> int conversion -(Cvt32to32F ...) -> (MOVWF ...) -(Cvt32to64F ...) -> (MOVWD ...) -(Cvt32Uto32F ...) -> (MOVWUF ...) -(Cvt32Uto64F ...) -> (MOVWUD ...) -(Cvt32Fto32 ...) -> (MOVFW ...) -(Cvt64Fto32 ...) -> (MOVDW ...) -(Cvt32Fto32U ...) -> (MOVFWU ...) -(Cvt64Fto32U ...) -> (MOVDWU ...) -(Cvt32Fto64F ...) -> (MOVFD ...) -(Cvt64Fto32F ...) -> (MOVDF ...) +(Cvt32to32F ...) => (MOVWF ...) +(Cvt32to64F ...) => (MOVWD ...) +(Cvt32Uto32F ...) => (MOVWUF ...) +(Cvt32Uto64F ...) => (MOVWUD ...) +(Cvt32Fto32 ...) => (MOVFW ...) +(Cvt64Fto32 ...) => (MOVDW ...) +(Cvt32Fto32U ...) => (MOVFWU ...) +(Cvt64Fto32U ...) => (MOVDWU ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) -(Round(32|64)F ...) -> (Copy ...) +(Round(32|64)F ...) => (Copy ...) -(CvtBoolToUint8 ...) -> (Copy ...) +(CvtBoolToUint8 ...) => (Copy ...) // fused-multiply-add -(FMA x y z) -> (FMULAD z x y) +(FMA x y z) => (FMULAD z x y) // comparisons -(Eq8 x y) -> (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Eq16 x y) -> (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Eq32 x y) -> (Equal (CMP x y)) -(EqPtr x y) -> (Equal (CMP x y)) -(Eq(32|64)F x y) -> (Equal (CMP(F|D) x y)) +(Eq8 x y) => (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Eq16 x y) => (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Eq32 x y) => (Equal (CMP x y)) +(EqPtr x y) => (Equal (CMP x y)) +(Eq(32|64)F x y) => (Equal (CMP(F|D) x y)) -(Neq8 x y) -> (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Neq16 x y) -> (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Neq32 x y) -> (NotEqual (CMP x y)) -(NeqPtr x y) -> (NotEqual (CMP x y)) -(Neq(32|64)F x y) -> (NotEqual (CMP(F|D) x y)) +(Neq8 x y) => (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Neq16 x y) => (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Neq32 x y) => (NotEqual (CMP x y)) +(NeqPtr x y) => (NotEqual (CMP x y)) +(Neq(32|64)F x y) => (NotEqual (CMP(F|D) x y)) -(Less8 x y) -> (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) -(Less16 x y) -> (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) -(Less32 x y) -> (LessThan (CMP x y)) -(Less(32|64)F x y) -> (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN +(Less8 x y) => (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) +(Less16 x y) => (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) +(Less32 x y) => (LessThan (CMP x y)) +(Less(32|64)F x y) => (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN -(Less8U x y) -> (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Less16U x y) -> (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Less32U x y) -> (LessThanU (CMP x y)) +(Less8U x y) => (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Less16U x y) => (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Less32U x y) => (LessThanU (CMP x y)) -(Leq8 x y) -> (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) -(Leq16 x y) -> (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) -(Leq32 x y) -> (LessEqual (CMP x y)) -(Leq(32|64)F x y) -> (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN +(Leq8 x y) => (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) +(Leq16 x y) => (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) +(Leq32 x y) => (LessEqual (CMP x y)) +(Leq(32|64)F x y) => (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN -(Leq8U x y) -> (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) -(Leq16U x y) -> (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) -(Leq32U x y) -> (LessEqualU (CMP x y)) +(Leq8U x y) => (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Leq32U x y) => (LessEqualU (CMP x y)) (OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr) (OffPtr [off] ptr) -> (ADDconst [off] ptr) (Addr ...) -> (MOVWaddr ...) -(LocalAddr {sym} base _) -> (MOVWaddr {sym} base) +(LocalAddr {sym} base _) => (MOVWaddr {sym} base) // loads -(Load ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) -(Load ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) -(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) -(Load ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) -(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) -(Load ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem) -(Load ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem) -(Load ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem) +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) // stores (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index eaf20e27b79..7209d364df8 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -12912,7 +12912,7 @@ func rewriteValueARM_OpAvg32u(v *Value) bool { y := v_1 v.reset(OpARMADD) v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpARMSUB, t) v1.AddArg2(x, y) v0.AddArg(v1) @@ -12929,7 +12929,7 @@ func rewriteValueARM_OpBitLen32(v *Value) bool { t := v.Type x := v_0 v.reset(OpARMRSBconst) - v.AuxInt = 32 + v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARMCLZ, t) v0.AddArg(x) v.AddArg(v0) @@ -12986,7 +12986,7 @@ func rewriteValueARM_OpConstNil(v *Value) bool { // result: (MOVWconst [0]) for { v.reset(OpARMMOVWconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } } @@ -13371,7 +13371,7 @@ func rewriteValueARM_OpEqB(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool) v0.AddArg2(x, y) v.AddArg(v0) @@ -13874,10 +13874,10 @@ func rewriteValueARM_OpLocalAddr(v *Value) bool { // match: (LocalAddr {sym} base _) // result: (MOVWaddr {sym} base) for { - sym := v.Aux + sym := auxToSym(v.Aux) base := v_0 v.reset(OpARMMOVWaddr) - v.Aux = sym + v.Aux = symToAux(sym) v.AddArg(base) return true } @@ -13893,13 +13893,13 @@ func rewriteValueARM_OpLsh16x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(v1) v.AddArg2(v0, v2) return true @@ -13915,11 +13915,11 @@ func rewriteValueARM_OpLsh16x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(y) v.AddArg2(v0, v1) return true @@ -13952,12 +13952,12 @@ func rewriteValueARM_OpLsh16x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 16) { break } v.reset(OpConst16) - v.AuxInt = 0 + v.AuxInt = int16ToAuxInt(0) return true } return false @@ -13990,13 +13990,13 @@ func rewriteValueARM_OpLsh32x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(v1) v.AddArg2(v0, v2) return true @@ -14012,11 +14012,11 @@ func rewriteValueARM_OpLsh32x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(y) v.AddArg2(v0, v1) return true @@ -14049,12 +14049,12 @@ func rewriteValueARM_OpLsh32x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 32) { break } v.reset(OpConst32) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -14087,13 +14087,13 @@ func rewriteValueARM_OpLsh8x16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(v1) v.AddArg2(v0, v2) return true @@ -14109,11 +14109,11 @@ func rewriteValueARM_OpLsh8x32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(y) v.AddArg2(v0, v1) return true @@ -14146,12 +14146,12 @@ func rewriteValueARM_OpLsh8x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 8) { break } v.reset(OpConst8) - v.AuxInt = 0 + v.AuxInt = int8ToAuxInt(0) return true } return false @@ -14525,7 +14525,7 @@ func rewriteValueARM_OpNeg16(v *Value) bool { for { x := v_0 v.reset(OpARMRSBconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v.AddArg(x) return true } @@ -14537,7 +14537,7 @@ func rewriteValueARM_OpNeg32(v *Value) bool { for { x := v_0 v.reset(OpARMRSBconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v.AddArg(x) return true } @@ -14549,7 +14549,7 @@ func rewriteValueARM_OpNeg8(v *Value) bool { for { x := v_0 v.reset(OpARMRSBconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v.AddArg(x) return true } @@ -14667,7 +14667,7 @@ func rewriteValueARM_OpNot(v *Value) bool { for { x := v_0 v.reset(OpARMXORconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v.AddArg(x) return true } @@ -14907,7 +14907,7 @@ func rewriteValueARM_OpRsh16Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) @@ -14915,7 +14915,7 @@ func rewriteValueARM_OpRsh16Ux16(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v3.AuxInt = 256 + v3.AuxInt = int32ToAuxInt(256) v3.AddArg(v2) v.AddArg2(v0, v3) return true @@ -14932,13 +14932,13 @@ func rewriteValueARM_OpRsh16Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(y) v.AddArg2(v0, v2) return true @@ -14976,12 +14976,12 @@ func rewriteValueARM_OpRsh16Ux64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 16) { break } v.reset(OpConst16) - v.AuxInt = 0 + v.AuxInt = int16ToAuxInt(0) return true } return false @@ -15021,7 +15021,7 @@ func rewriteValueARM_OpRsh16x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(v1) v.AddArg3(v0, v1, v2) return true @@ -15041,7 +15041,7 @@ func rewriteValueARM_OpRsh16x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(y) v.AddArg3(v0, y, v1) return true @@ -15080,14 +15080,14 @@ func rewriteValueARM_OpRsh16x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 16) { break } v.reset(OpARMSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg(x) v.AddArg(v0) return true @@ -15124,13 +15124,13 @@ func rewriteValueARM_OpRsh32Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v0.AddArg2(x, v1) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(v1) v.AddArg2(v0, v2) return true @@ -15146,11 +15146,11 @@ func rewriteValueARM_OpRsh32Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v0.AddArg2(x, y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(y) v.AddArg2(v0, v1) return true @@ -15183,12 +15183,12 @@ func rewriteValueARM_OpRsh32Ux64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 32) { break } v.reset(OpConst32) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -15224,7 +15224,7 @@ func rewriteValueARM_OpRsh32x16(v *Value) bool { v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v0.AddArg(y) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(v0) v.AddArg3(x, v0, v1) return true @@ -15241,7 +15241,7 @@ func rewriteValueARM_OpRsh32x32(v *Value) bool { y := v_1 v.reset(OpARMSRAcond) v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = 256 + v0.AuxInt = int32ToAuxInt(256) v0.AddArg(y) v.AddArg3(x, y, v0) return true @@ -15275,12 +15275,12 @@ func rewriteValueARM_OpRsh32x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 32) { break } v.reset(OpARMSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v.AddArg(x) return true } @@ -15314,7 +15314,7 @@ func rewriteValueARM_OpRsh8Ux16(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) @@ -15322,7 +15322,7 @@ func rewriteValueARM_OpRsh8Ux16(v *Value) bool { v2.AddArg(y) v0.AddArg2(v1, v2) v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v3.AuxInt = 256 + v3.AuxInt = int32ToAuxInt(256) v3.AddArg(v2) v.AddArg2(v0, v3) return true @@ -15339,13 +15339,13 @@ func rewriteValueARM_OpRsh8Ux32(v *Value) bool { x := v_0 y := v_1 v.reset(OpARMCMOVWHSconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) v1.AddArg(x) v0.AddArg2(v1, y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(y) v.AddArg2(v0, v2) return true @@ -15383,12 +15383,12 @@ func rewriteValueARM_OpRsh8Ux64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 8) { break } v.reset(OpConst8) - v.AuxInt = 0 + v.AuxInt = int8ToAuxInt(0) return true } return false @@ -15428,7 +15428,7 @@ func rewriteValueARM_OpRsh8x16(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) v1.AddArg(y) v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v2.AuxInt = 256 + v2.AuxInt = int32ToAuxInt(256) v2.AddArg(v1) v.AddArg3(v0, v1, v2) return true @@ -15448,7 +15448,7 @@ func rewriteValueARM_OpRsh8x32(v *Value) bool { v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) v0.AddArg(x) v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v1.AuxInt = 256 + v1.AuxInt = int32ToAuxInt(256) v1.AddArg(y) v.AddArg3(v0, y, v1) return true @@ -15487,14 +15487,14 @@ func rewriteValueARM_OpRsh8x64(v *Value) bool { if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) >= 8) { break } v.reset(OpARMSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 24 + v0.AuxInt = int32ToAuxInt(24) v0.AddArg(x) v.AddArg(v0) return true @@ -15651,7 +15651,7 @@ func rewriteValueARM_OpSignmask(v *Value) bool { for { x := v_0 v.reset(OpARMSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v.AddArg(x) return true } @@ -15665,9 +15665,9 @@ func rewriteValueARM_OpSlicemask(v *Value) bool { t := v.Type x := v_0 v.reset(OpARMSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpARMRSBconst, t) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v0.AddArg(x) v.AddArg(v0) return true @@ -15958,9 +15958,9 @@ func rewriteValueARM_OpZeromask(v *Value) bool { for { x := v_0 v.reset(OpARMSRAconst) - v.AuxInt = 31 + v.AuxInt = int32ToAuxInt(31) v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v0.AddArg2(x, x) v.AddArg(v0) return true